repo_id
stringclasses 563
values | file_path
stringlengths 40
166
| content
stringlengths 1
2.94M
| __index_level_0__
int64 0
0
|
|---|---|---|---|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/indexed
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/indexed/tests/tests.rs
|
use std::cell::{Ref, RefCell, RefMut};
use light_bounded_vec::BoundedVec;
use light_concurrent_merkle_tree::{
errors::ConcurrentMerkleTreeError,
event::IndexedMerkleTreeUpdate,
light_hasher::{Hasher, Poseidon},
};
use light_hash_set::{HashSet, HashSetError};
use light_indexed_merkle_tree::{
array::{IndexedArray, IndexedElement},
errors::IndexedMerkleTreeError,
reference, IndexedMerkleTree, HIGHEST_ADDRESS_PLUS_ONE,
};
use light_utils::bigint::bigint_to_be_bytes_array;
use num_bigint::{BigUint, RandBigInt, ToBigUint};
use num_traits::{FromBytes, Num};
use rand::thread_rng;
use thiserror::Error;
const MERKLE_TREE_HEIGHT: usize = 4;
const MERKLE_TREE_CHANGELOG: usize = 256;
const MERKLE_TREE_ROOTS: usize = 1024;
const MERKLE_TREE_CANOPY: usize = 0;
const MERKLE_TREE_INDEXED_CHANGELOG: usize = 64;
const NET_HEIGHT: usize = MERKLE_TREE_HEIGHT - MERKLE_TREE_CANOPY;
const QUEUE_ELEMENTS: usize = 1024;
const SAFETY_MARGIN: usize = 10;
const NR_NULLIFIERS: usize = 2;
/// A mock function which imitates a Merkle tree program instruction for
/// inserting nullifiers into the queue.
fn program_insert<H>(
// PDA
mut queue: RefMut<'_, HashSet>,
merkle_tree: Ref<'_, IndexedMerkleTree<H, usize, MERKLE_TREE_HEIGHT, NET_HEIGHT>>,
// Instruction data
nullifiers: [[u8; 32]; NR_NULLIFIERS],
) -> Result<(), HashSetError>
where
H: Hasher,
{
for i in 0..NR_NULLIFIERS {
let nullifier = BigUint::from_be_bytes(nullifiers[i].as_slice());
queue.insert(&nullifier, merkle_tree.sequence_number())?;
}
Ok(())
}
#[derive(Error, Debug)]
enum RelayerUpdateError {
#[error("Updating Merkle tree failed, {0:?}")]
MerkleTreeUpdate(Vec<IndexedMerkleTreeError>),
}
/// A mock function which imitates a Merkle tree program instruction for
/// inserting nullifiers from the queue to the tree.
fn program_update<H>(
// PDAs
queue: &mut RefMut<'_, HashSet>,
merkle_tree: &mut RefMut<'_, IndexedMerkleTree<H, usize, MERKLE_TREE_HEIGHT, NET_HEIGHT>>,
// Instruction data
changelog_index: u16,
indexed_changelog_index: u16,
queue_index: u16,
low_nullifier: IndexedElement<usize>,
low_nullifier_next_value: &BigUint,
low_nullifier_proof: &mut BoundedVec<[u8; 32]>,
) -> Result<IndexedMerkleTreeUpdate<usize>, IndexedMerkleTreeError>
where
H: Hasher,
{
// Get the nullifier from the queue.
let nullifier = queue
.get_unmarked_bucket(queue_index as usize)
.unwrap()
.unwrap();
// Update the Merkle tree.
let update = merkle_tree.update(
usize::from(changelog_index),
usize::from(indexed_changelog_index),
nullifier.value_biguint(),
low_nullifier.clone(),
low_nullifier_next_value.clone(),
low_nullifier_proof,
)?;
// Mark the nullifier.
queue
.mark_with_sequence_number(queue_index as usize, merkle_tree.sequence_number())
.unwrap();
Ok(update)
}
// TODO: unify these helpers with MockIndexer
/// A mock function which imitates a relayer endpoint for updating the
/// nullifier Merkle tree.
fn relayer_update<H>(
// PDAs
queue: &mut RefMut<'_, HashSet>,
merkle_tree: &mut RefMut<'_, IndexedMerkleTree<H, usize, MERKLE_TREE_HEIGHT, NET_HEIGHT>>,
) -> Result<(), RelayerUpdateError>
where
H: Hasher,
{
let mut relayer_indexing_array = IndexedArray::<H, usize>::default();
let mut relayer_merkle_tree =
reference::IndexedMerkleTree::<H, usize>::new(MERKLE_TREE_HEIGHT, MERKLE_TREE_CANOPY)
.unwrap();
let mut update_errors: Vec<IndexedMerkleTreeError> = Vec::new();
let queue_indices = queue.iter().map(|(index, _)| index).collect::<Vec<_>>();
for queue_index in queue_indices {
let changelog_index = merkle_tree.changelog_index();
let indexed_changelog_index = merkle_tree.indexed_changelog_index();
let queue_element = queue.get_unmarked_bucket(queue_index).unwrap().unwrap();
// Create new element from the dequeued value.
let (old_low_nullifier, old_low_nullifier_next_value) = relayer_indexing_array
.find_low_element_for_nonexistent(&queue_element.value_biguint())
.unwrap();
let nullifier_bundle = relayer_indexing_array
.new_element_with_low_element_index(
old_low_nullifier.index,
&queue_element.value_biguint(),
)
.unwrap();
let mut low_nullifier_proof = relayer_merkle_tree
.get_proof_of_leaf(usize::from(old_low_nullifier.index), false)
.unwrap();
// Update on-chain tree.
let update_successful = match program_update(
queue,
merkle_tree,
changelog_index as u16,
indexed_changelog_index as u16,
queue_index as u16,
old_low_nullifier,
&old_low_nullifier_next_value,
&mut low_nullifier_proof,
) {
Ok(event) => {
assert_eq!(
event.new_low_element.index,
nullifier_bundle.new_low_element.index
);
assert_eq!(
event.new_low_element.next_index,
nullifier_bundle.new_low_element.next_index
);
assert_eq!(
event.new_low_element.value,
bigint_to_be_bytes_array::<32>(&nullifier_bundle.new_low_element.value)
.unwrap()
);
assert_eq!(
event.new_low_element.next_value,
bigint_to_be_bytes_array::<32>(&nullifier_bundle.new_element.value).unwrap()
);
let leaf_hash = nullifier_bundle
.new_low_element
.hash::<H>(&nullifier_bundle.new_element.value)
.unwrap();
assert_eq!(event.new_low_element_hash, leaf_hash);
let leaf_hash = nullifier_bundle
.new_element
.hash::<H>(&nullifier_bundle.new_element_next_value)
.unwrap();
assert_eq!(event.new_high_element_hash, leaf_hash);
assert_eq!(
event.new_high_element.index,
nullifier_bundle.new_element.index
);
assert_eq!(
event.new_high_element.next_index,
nullifier_bundle.new_element.next_index
);
assert_eq!(
event.new_high_element.value,
bigint_to_be_bytes_array::<32>(&nullifier_bundle.new_element.value).unwrap()
);
assert_eq!(
event.new_high_element.next_value,
bigint_to_be_bytes_array::<32>(&nullifier_bundle.new_element_next_value)
.unwrap()
);
true
}
Err(e) => {
update_errors.push(e);
false
}
};
// Check if the on-chain Merkle tree was really updated.
if update_successful {
// Update off-chain tree.
relayer_merkle_tree
.update(
&nullifier_bundle.new_low_element,
&nullifier_bundle.new_element,
&nullifier_bundle.new_element_next_value,
)
.unwrap();
let low_nullifier_leaf = nullifier_bundle
.new_low_element
.hash::<H>(&nullifier_bundle.new_element.value)
.unwrap();
let low_nullifier_proof = relayer_merkle_tree
.get_proof_of_leaf(nullifier_bundle.new_low_element.index(), false)
.unwrap();
merkle_tree
.validate_proof(
&low_nullifier_leaf,
nullifier_bundle.new_low_element.index(),
&low_nullifier_proof,
)
.unwrap();
let new_nullifier_leaf = nullifier_bundle
.new_element
.hash::<H>(&nullifier_bundle.new_element_next_value)
.unwrap();
let new_nullifier_proof = relayer_merkle_tree
.get_proof_of_leaf(nullifier_bundle.new_element.index(), false)
.unwrap();
merkle_tree
.validate_proof(
&new_nullifier_leaf,
nullifier_bundle.new_element.index(),
&new_nullifier_proof,
)
.unwrap();
// Insert the element to the indexing array.
relayer_indexing_array
.append_with_low_element_index(
nullifier_bundle.new_low_element.index,
&nullifier_bundle.new_element.value,
)
.unwrap();
}
}
if update_errors.is_empty() {
Ok(())
} else {
Err(RelayerUpdateError::MerkleTreeUpdate(update_errors))
}
}
/// Tests the valid case of:
///
/// * Inserting nullifiers to the queue.
/// * Calling the relayer to update the on-chain nullifier Merkle tree.
fn insert_and_update<H>()
where
H: Hasher,
{
// On-chain PDAs.
let onchain_queue: RefCell<HashSet> =
RefCell::new(HashSet::new(QUEUE_ELEMENTS, MERKLE_TREE_ROOTS + SAFETY_MARGIN).unwrap());
let onchain_tree: RefCell<IndexedMerkleTree<H, usize, MERKLE_TREE_HEIGHT, NET_HEIGHT>> =
RefCell::new(
IndexedMerkleTree::new(
MERKLE_TREE_HEIGHT,
MERKLE_TREE_CHANGELOG,
MERKLE_TREE_ROOTS,
MERKLE_TREE_CANOPY,
MERKLE_TREE_INDEXED_CHANGELOG,
)
.unwrap(),
);
onchain_tree.borrow_mut().init().unwrap();
// Insert a pair of nullifiers.
let nullifier1 = 30_u32.to_biguint().unwrap();
let nullifier2 = 10_u32.to_biguint().unwrap();
program_insert::<H>(
onchain_queue.borrow_mut(),
onchain_tree.borrow(),
[
bigint_to_be_bytes_array(&nullifier1).unwrap(),
bigint_to_be_bytes_array(&nullifier2).unwrap(),
],
)
.unwrap();
// Insert an another pair of nullifiers.
let nullifier3 = 20_u32.to_biguint().unwrap();
let nullifier4 = 50_u32.to_biguint().unwrap();
program_insert::<H>(
onchain_queue.borrow_mut(),
onchain_tree.borrow(),
[
bigint_to_be_bytes_array(&nullifier3).unwrap(),
bigint_to_be_bytes_array(&nullifier4).unwrap(),
],
)
.unwrap();
// Call relayer to update the tree.
relayer_update::<H>(
&mut onchain_queue.borrow_mut(),
&mut onchain_tree.borrow_mut(),
)
.unwrap();
}
#[test]
pub fn test_insert_and_update_poseidon() {
insert_and_update::<Poseidon>()
}
/// Tests the invalid case of inserting the same nullifiers multiple times into
/// the queue and Merkle tree - an attempt of double spending.
fn double_spend<H>()
where
H: Hasher,
{
// On-chain PDAs.
let onchain_queue: RefCell<HashSet> = RefCell::new(HashSet::new(20, 0).unwrap());
let onchain_tree: RefCell<IndexedMerkleTree<H, usize, MERKLE_TREE_HEIGHT, NET_HEIGHT>> =
RefCell::new(
IndexedMerkleTree::new(
MERKLE_TREE_HEIGHT,
MERKLE_TREE_CHANGELOG,
MERKLE_TREE_ROOTS,
MERKLE_TREE_CANOPY,
MERKLE_TREE_INDEXED_CHANGELOG,
)
.unwrap(),
);
onchain_tree.borrow_mut().init().unwrap();
// Insert a pair of nulifiers.
let nullifier1 = 30_u32.to_biguint().unwrap();
let nullifier1: [u8; 32] = bigint_to_be_bytes_array(&nullifier1).unwrap();
let nullifier2 = 10_u32.to_biguint().unwrap();
let nullifier2: [u8; 32] = bigint_to_be_bytes_array(&nullifier2).unwrap();
program_insert::<H>(
onchain_queue.borrow_mut(),
onchain_tree.borrow(),
[nullifier1, nullifier2],
)
.unwrap();
// Try inserting the same pair into the queue. It should fail with an error.
let res = program_insert::<H>(
onchain_queue.borrow_mut(),
onchain_tree.borrow(),
[nullifier1, nullifier2],
);
assert!(matches!(res, Err(HashSetError::ElementAlreadyExists)));
// Update the on-chain tree (so it contains the nullifiers we inserted).
relayer_update::<H>(
&mut onchain_queue.borrow_mut(),
&mut onchain_tree.borrow_mut(),
)
.unwrap();
// The nullifiers are in the tree and not in the queue anymore. We can try
// our luck with double-spending again.
program_insert::<H>(
onchain_queue.borrow_mut(),
onchain_tree.borrow(),
[nullifier1, nullifier2],
)
.unwrap();
// At the same time, insert also some new nullifiers which aren't spent
// yet. We want to make sure that they will be processed successfully and
// only the invalid nullifiers will produce errors.
let nullifier3 = 25_u32.to_biguint().unwrap();
let nullifier4 = 5_u32.to_biguint().unwrap();
program_insert::<H>(
onchain_queue.borrow_mut(),
onchain_tree.borrow(),
[
bigint_to_be_bytes_array(&nullifier3).unwrap(),
bigint_to_be_bytes_array(&nullifier4).unwrap(),
],
)
.unwrap();
// We expect exactly two errors (for the invalid nullifiers). No more, no
// less.
let res = relayer_update::<H>(
&mut onchain_queue.borrow_mut(),
&mut onchain_tree.borrow_mut(),
);
assert!(matches!(res, Err(RelayerUpdateError::MerkleTreeUpdate(_))));
}
#[test]
pub fn test_double_spend_queue_poseidon() {
double_spend::<Poseidon>()
}
/// Try to insert a nullifier to the tree while pointing to an invalid low
/// nullifier.
///
/// Such invalid insertion needs to be performed manually, without relayer's
/// help (which would always insert that nullifier correctly).
fn insert_invalid_low_element<H>()
where
H: Hasher,
{
// On-chain PDAs.
let onchain_queue: RefCell<HashSet> =
RefCell::new(HashSet::new(QUEUE_ELEMENTS, MERKLE_TREE_ROOTS + SAFETY_MARGIN).unwrap());
let onchain_tree: RefCell<IndexedMerkleTree<H, usize, MERKLE_TREE_HEIGHT, NET_HEIGHT>> =
RefCell::new(
IndexedMerkleTree::new(
MERKLE_TREE_HEIGHT,
MERKLE_TREE_CHANGELOG,
MERKLE_TREE_ROOTS,
MERKLE_TREE_CANOPY,
MERKLE_TREE_INDEXED_CHANGELOG,
)
.unwrap(),
);
onchain_tree.borrow_mut().init().unwrap();
// Local artifacts.
let mut local_indexed_array = IndexedArray::<H, usize>::default();
let mut local_merkle_tree =
reference::IndexedMerkleTree::<H, usize>::new(MERKLE_TREE_HEIGHT, MERKLE_TREE_CANOPY)
.unwrap();
// Insert a pair of nullifiers, correctly. Just do it with relayer.
let nullifier1 = 30_u32.to_biguint().unwrap();
let nullifier2 = 10_u32.to_biguint().unwrap();
onchain_queue
.borrow_mut()
.insert(&nullifier1, onchain_tree.borrow().sequence_number())
.unwrap();
onchain_queue
.borrow_mut()
.insert(&nullifier2, onchain_tree.borrow().sequence_number())
.unwrap();
let nullifier_bundle = local_indexed_array.append(&nullifier1).unwrap();
local_merkle_tree
.update(
&nullifier_bundle.new_low_element,
&nullifier_bundle.new_element,
&nullifier_bundle.new_element_next_value,
)
.unwrap();
let nullifier_bundle = local_indexed_array.append(&nullifier2).unwrap();
local_merkle_tree
.update(
&nullifier_bundle.new_low_element,
&nullifier_bundle.new_element,
&nullifier_bundle.new_element_next_value,
)
.unwrap();
relayer_update(
&mut onchain_queue.borrow_mut(),
&mut onchain_tree.borrow_mut(),
)
.unwrap();
// Try inserting nullifier 20, while pointing to index 1 (value 30) as low
// nullifier. Point to index 2 (value 10) as next value.
// Therefore, the new element is lowe than the supposed low element.
let nullifier3 = 20_u32.to_biguint().unwrap();
onchain_queue
.borrow_mut()
.insert(&nullifier3, onchain_tree.borrow().sequence_number())
.unwrap();
let changelog_index = onchain_tree.borrow().changelog_index();
let indexed_changelog_index = onchain_tree.borrow().indexed_changelog_index();
// Index of our new nullifier in the queue.
let queue_index = onchain_queue
.borrow()
.find_element_index(&nullifier3, None)
.unwrap()
.unwrap();
// (Invalid) low nullifier.
let low_nullifier = local_indexed_array.get(1).cloned().unwrap();
let low_nullifier_next_value = local_indexed_array
.get(usize::from(low_nullifier.next_index))
.cloned()
.unwrap()
.value;
let mut low_nullifier_proof = local_merkle_tree.get_proof_of_leaf(1, false).unwrap();
assert!(matches!(
program_update(
&mut onchain_queue.borrow_mut(),
&mut onchain_tree.borrow_mut(),
changelog_index as u16,
indexed_changelog_index as u16,
queue_index as u16,
low_nullifier,
&low_nullifier_next_value,
&mut low_nullifier_proof,
),
Err(IndexedMerkleTreeError::LowElementGreaterOrEqualToNewElement)
));
// Try inserting nullifier 50, while pointing to index 0 as low nullifier.
// Therefore, the new element is greate than next element.
let nullifier3 = 50_u32.to_biguint().unwrap();
onchain_queue
.borrow_mut()
.insert(&nullifier3, onchain_tree.borrow().sequence_number())
.unwrap();
let changelog_index = onchain_tree.borrow().changelog_index();
let indexed_changelog_index = onchain_tree.borrow().indexed_changelog_index();
// Index of our new nullifier in the queue.
let queue_index = onchain_queue
.borrow()
.find_element_index(&nullifier3, None)
.unwrap()
.unwrap();
// (Invalid) low nullifier.
let low_nullifier = local_indexed_array.get(0).cloned().unwrap();
let low_nullifier_next_value = local_indexed_array
.get(usize::from(low_nullifier.next_index))
.cloned()
.unwrap()
.value;
let mut low_nullifier_proof = local_merkle_tree.get_proof_of_leaf(0, false).unwrap();
assert!(matches!(
program_update(
&mut onchain_queue.borrow_mut(),
&mut onchain_tree.borrow_mut(),
changelog_index as u16,
indexed_changelog_index as u16,
queue_index as u16,
low_nullifier,
&low_nullifier_next_value,
&mut low_nullifier_proof,
),
Err(IndexedMerkleTreeError::NewElementGreaterOrEqualToNextElement)
));
let nullifier4 = 45_u32.to_biguint().unwrap();
onchain_queue
.borrow_mut()
.insert(&nullifier4, onchain_tree.borrow().sequence_number())
.unwrap();
let changelog_index = onchain_tree.borrow().changelog_index();
let indexed_changelog_index = onchain_tree.borrow().indexed_changelog_index();
let (low_nullifier, low_nullifier_next_value) = local_indexed_array
.find_low_element_for_nonexistent(&nullifier4)
.unwrap();
let mut low_nullifier_proof = local_merkle_tree
.get_proof_of_leaf(low_nullifier.index(), false)
.unwrap();
let result = program_update(
&mut onchain_queue.borrow_mut(),
&mut onchain_tree.borrow_mut(),
changelog_index as u16,
indexed_changelog_index as u16,
queue_index as u16,
low_nullifier,
&low_nullifier_next_value,
&mut low_nullifier_proof,
);
println!("result {:?}", result);
assert!(matches!(
result,
Err(IndexedMerkleTreeError::ConcurrentMerkleTree(
ConcurrentMerkleTreeError::InvalidProof(_, _)
))
));
}
#[test]
pub fn test_insert_invalid_low_element_poseidon() {
insert_invalid_low_element::<Poseidon>()
}
#[test]
pub fn hash_reference_indexed_element() {
let element = IndexedElement::<usize> {
value: 0.to_biguint().unwrap(),
index: 0,
next_index: 1,
};
let next_value = BigUint::from_str_radix(HIGHEST_ADDRESS_PLUS_ONE, 10).unwrap();
let hash = element.hash::<Poseidon>(&next_value).unwrap();
assert_eq!(
hash,
[
40, 8, 192, 134, 75, 198, 77, 187, 129, 249, 133, 121, 54, 189, 242, 28, 117, 71, 255,
32, 155, 52, 136, 196, 99, 146, 204, 174, 160, 238, 0, 110
]
);
}
#[test]
pub fn functional_non_inclusion_test() {
let mut relayer_indexing_array = IndexedArray::<Poseidon, usize>::default();
// appends the first element
let mut relayer_merkle_tree = reference::IndexedMerkleTree::<Poseidon, usize>::new(
MERKLE_TREE_HEIGHT,
MERKLE_TREE_CANOPY,
)
.unwrap();
let nullifier1 = 30_u32.to_biguint().unwrap();
relayer_merkle_tree
.append(&nullifier1, &mut relayer_indexing_array)
.unwrap();
// indexed array:
// element: 0
// value: 0
// next_value: 30
// index: 0
// element: 1
// value: 30
// next_value: 0
// index: 1
// merkle tree:
// leaf index: 0 = H(0, 1, 30) //Hash(value, next_index, next_value)
// leaf index: 1 = H(30, 0, 0)
let indexed_array_element_0 = relayer_indexing_array.get(0).unwrap();
assert_eq!(indexed_array_element_0.value, 0_u32.to_biguint().unwrap());
assert_eq!(indexed_array_element_0.next_index, 1);
assert_eq!(indexed_array_element_0.index, 0);
let indexed_array_element_1 = relayer_indexing_array.get(1).unwrap();
assert_eq!(indexed_array_element_1.value, 30_u32.to_biguint().unwrap());
assert_eq!(indexed_array_element_1.next_index, 0);
assert_eq!(indexed_array_element_1.index, 1);
let leaf_0 = relayer_merkle_tree.merkle_tree.get_leaf(0);
let leaf_1 = relayer_merkle_tree.merkle_tree.get_leaf(1);
assert_eq!(
leaf_0,
Poseidon::hashv(&[
&0_u32.to_biguint().unwrap().to_bytes_be(),
&1_u32.to_biguint().unwrap().to_bytes_be(),
&30_u32.to_biguint().unwrap().to_bytes_be()
])
.unwrap()
);
assert_eq!(
leaf_1,
Poseidon::hashv(&[
&30_u32.to_biguint().unwrap().to_bytes_be(),
&0_u32.to_biguint().unwrap().to_bytes_be(),
&0_u32.to_biguint().unwrap().to_bytes_be()
])
.unwrap()
);
let non_inclusion_proof = relayer_merkle_tree
.get_non_inclusion_proof(&10_u32.to_biguint().unwrap(), &relayer_indexing_array)
.unwrap();
assert_eq!(non_inclusion_proof.root, relayer_merkle_tree.root());
assert_eq!(
non_inclusion_proof.value,
bigint_to_be_bytes_array::<32>(&10_u32.to_biguint().unwrap()).unwrap()
);
assert_eq!(non_inclusion_proof.leaf_lower_range_value, [0; 32]);
assert_eq!(
non_inclusion_proof.leaf_higher_range_value,
bigint_to_be_bytes_array::<32>(&30_u32.to_biguint().unwrap()).unwrap()
);
assert_eq!(non_inclusion_proof.leaf_index, 0);
relayer_merkle_tree
.verify_non_inclusion_proof(&non_inclusion_proof)
.unwrap();
}
// /**
// *
// * Range Hash (value, next_index, next_value) -> need next value not next value index
// * Update of a range:
// * 1. Find the low element, low element points to the next hight element
// * 2. update low element with H (low_value, new_inserted_value_index, new_inserted_value)
// * 3. append the tree with H(new_inserted_value,index_of_next_value, next_value)
// *
// */
// /// This test is generating a situation where the low element has to be patched.
// /// Scenario:
// /// 1. two parties start with the initialized indexing array
// /// 2. both parties compute their values with the empty indexed Merkle tree state
// /// 3. party one inserts first
// /// 4. party two needs to patch the low element because the low element has changed
// /// 5. party two inserts
// Commented because the test is not working
// TODO: figure out address Merkle tree changelog
// #[test]
// pub fn functional_changelog_test() {
// let address_1 = 30_u32.to_biguint().unwrap();
// let address_2 = 10_u32.to_biguint().unwrap();
// cargo test -- --nocapture print_test_data
#[test]
#[ignore = "only used to generate test data"]
pub fn print_test_data() {
let mut relayer_indexing_array = IndexedArray::<Poseidon, usize>::default();
relayer_indexing_array.init().unwrap();
let mut relayer_merkle_tree =
reference::IndexedMerkleTree::<Poseidon, usize>::new(4, 0).unwrap();
relayer_merkle_tree.init().unwrap();
let root = relayer_merkle_tree.root();
let root_bn = BigUint::from_bytes_be(&root);
println!("root {:?}", root_bn);
println!("indexed mt inited root {:?}", relayer_merkle_tree.root());
let address1 = 30_u32.to_biguint().unwrap();
let test_address: BigUint = BigUint::from_bytes_be(&[
171, 159, 63, 33, 62, 94, 156, 27, 61, 216, 203, 164, 91, 229, 110, 16, 230, 124, 129, 133,
222, 159, 99, 235, 50, 181, 94, 203, 105, 23, 82,
]);
let non_inclusion_proof_0 = relayer_merkle_tree
.get_non_inclusion_proof(&test_address, &relayer_indexing_array)
.unwrap();
println!("non inclusion proof init {:?}", non_inclusion_proof_0);
relayer_merkle_tree
.append(&address1, &mut relayer_indexing_array)
.unwrap();
println!(
"indexed mt with one append {:?}",
relayer_merkle_tree.root()
);
let root_bn = BigUint::from_bytes_be(&relayer_merkle_tree.root());
println!("indexed mt with one append {:?}", root_bn);
let proof = relayer_merkle_tree.get_proof_of_leaf(2, true).unwrap();
let leaf = relayer_merkle_tree.merkle_tree.get_leaf(2);
let leaf_bn = BigUint::from_bytes_be(&leaf);
println!("(30) leaf_hash[2] = {:?}", leaf_bn);
let subtrees = relayer_merkle_tree.merkle_tree.get_subtrees();
for subtree in subtrees {
let subtree_bn = BigUint::from_bytes_be(&subtree);
println!("subtree = {:?}", subtree_bn);
}
let res = relayer_merkle_tree.merkle_tree.verify(&leaf, &proof, 2);
println!("verify leaf 2 {:?}", res);
println!(
"indexed array state element 0 {:?}",
relayer_indexing_array.get(0).unwrap()
);
println!(
"indexed array state element 1 {:?}",
relayer_indexing_array.get(1).unwrap()
);
println!(
"indexed array state element 2 {:?}",
relayer_indexing_array.get(2).unwrap()
);
let address2 = 42_u32.to_biguint().unwrap();
let non_inclusion_proof = relayer_merkle_tree
.get_non_inclusion_proof(&address2, &relayer_indexing_array)
.unwrap();
println!("non inclusion proof address 2 {:?}", non_inclusion_proof);
relayer_merkle_tree
.append(&address2, &mut relayer_indexing_array)
.unwrap();
println!(
"indexed mt with two appends {:?}",
relayer_merkle_tree.root()
);
let root_bn = BigUint::from_bytes_be(&relayer_merkle_tree.root());
println!("indexed mt with two appends {:?}", root_bn);
println!(
"indexed array state element 0 {:?}",
relayer_indexing_array.get(0).unwrap()
);
println!(
"indexed array state element 1 {:?}",
relayer_indexing_array.get(1).unwrap()
);
println!(
"indexed array state element 2 {:?}",
relayer_indexing_array.get(2).unwrap()
);
println!(
"indexed array state element 3 {:?}",
relayer_indexing_array.get(3).unwrap()
);
let address3 = 12_u32.to_biguint().unwrap();
let non_inclusion_proof = relayer_merkle_tree
.get_non_inclusion_proof(&address3, &relayer_indexing_array)
.unwrap();
relayer_merkle_tree
.append(&address3, &mut relayer_indexing_array)
.unwrap();
println!(
"indexed mt with three appends {:?}",
relayer_merkle_tree.root()
);
let root_bn = BigUint::from_bytes_be(&relayer_merkle_tree.root());
println!("indexed mt with three appends {:?}", root_bn);
println!("non inclusion proof address 3 {:?}", non_inclusion_proof);
println!(
"indexed array state element 0 {:?}",
relayer_indexing_array.get(0).unwrap()
);
println!(
"indexed array state element 1 {:?}",
relayer_indexing_array.get(1).unwrap()
);
println!(
"indexed array state element 2 {:?}",
relayer_indexing_array.get(2).unwrap()
);
println!(
"indexed array state element 3 {:?}",
relayer_indexing_array.get(3).unwrap()
);
println!(
"indexed array state element 4 {:?}",
relayer_indexing_array.get(4).unwrap()
);
// // indexed array:
// // element: 0
// // value: 0
// // next_value: 30
// // index: 0
// // element: 1
// // value: 30
// // next_value: 0
// // index: 1
// // merkle tree:
// // leaf index: 0 = H(0, 1, 30) //Hash(value, next_index, next_value)
// // leaf index: 1 = H(30, 0, 0)
// let indexed_array_element_0 = relayer_indexing_array.get(0).unwrap();
// assert_eq!(indexed_array_element_0.value, 0_u32.to_biguint().unwrap());
// assert_eq!(indexed_array_element_0.next_index, 1);
// assert_eq!(indexed_array_element_0.index, 0);
// let indexed_array_element_1 = relayer_indexing_array.get(1).unwrap();
// assert_eq!(indexed_array_element_1.value, 30_u32.to_biguint().unwrap());
// assert_eq!(indexed_array_element_1.next_index, 0);
// assert_eq!(indexed_array_element_1.index, 1);
// let leaf_0 = relayer_merkle_tree.merkle_tree.leaf(0);
// let leaf_1 = relayer_merkle_tree.merkle_tree.leaf(1);
// assert_eq!(
// leaf_0,
// Poseidon::hashv(&[
// &0_u32.to_biguint().unwrap().to_bytes_be(),
// &1_u32.to_biguint().unwrap().to_bytes_be(),
// &30_u32.to_biguint().unwrap().to_bytes_be()
// ])
// .unwrap()
// );
// assert_eq!(
// leaf_1,
// Poseidon::hashv(&[
// &30_u32.to_biguint().unwrap().to_bytes_be(),
// &0_u32.to_biguint().unwrap().to_bytes_be(),
// &0_u32.to_biguint().unwrap().to_bytes_be()
// ])
// .unwrap()
// );
// let non_inclusion_proof = relayer_merkle_tree
// .get_non_inclusion_proof(&10_u32.to_biguint().unwrap(), &relayer_indexing_array)
// .unwrap();
// assert_eq!(non_inclusion_proof.root, relayer_merkle_tree.root());
// assert_eq!(
// non_inclusion_proof.value,
// bigint_to_be_bytes_array::<32>(&10_u32.to_biguint().unwrap()).unwrap()
// );
// assert_eq!(non_inclusion_proof.leaf_lower_range_value, [0; 32]);
// assert_eq!(
// non_inclusion_proof.leaf_higher_range_value,
// bigint_to_be_bytes_array::<32>(&30_u32.to_biguint().unwrap()).unwrap()
// );
// assert_eq!(non_inclusion_proof.leaf_index, 0);
// relayer_merkle_tree
// .verify_non_inclusion_proof(&non_inclusion_proof)
// .unwrap();
}
/// Performs conflicting Merkle tree updates where:
///
/// 1. Party one inserts 30.
/// 2. Party two inserts 10.
///
/// In this case, party two needs to update:
///
/// * The inserted element (10) to point to 30 as the next one.
#[test]
fn functional_changelog_test_1() {
let address_1 = 30_u32.to_biguint().unwrap();
let address_2 = 10_u32.to_biguint().unwrap();
let address_3 = 11_u32.to_biguint().unwrap();
const HEIGHT: usize = 10;
perform_change_log_test::<false, false, HEIGHT, 16, 16, 0, 16, HEIGHT>(&[
address_1, address_2, address_3,
]);
}
/// Performs conflicting Merkle tree updates where:
///
/// 1. Party one inserts 10.
/// 2. Party two inserts 30.
///
/// In this case, party two needs to update:
///
/// * The low element from 0 to 10.
#[test]
fn functional_changelog_test_2() {
let address_1 = 10_u32.to_biguint().unwrap();
let address_2 = 30_u32.to_biguint().unwrap();
const HEIGHT: usize = 10;
perform_change_log_test::<false, false, HEIGHT, 16, 16, 0, 16, HEIGHT>(&[address_1, address_2]);
}
/// Performs conflicting Merkle tree updates where:
///
/// 1. Party one inserts 30.
/// 2. Party two inserts 10.
/// 3. Party three inserts 20.
///
/// In this case:
///
/// * Party one:
/// * Updates the inserted element (10) to point to 30 as the next one.
/// * Party two:
/// * Updates the low element from 0 to 10.
#[test]
fn functional_changelog_test_3() {
let address_1 = 30_u32.to_biguint().unwrap();
let address_2 = 10_u32.to_biguint().unwrap();
let address_3 = 20_u32.to_biguint().unwrap();
const HEIGHT: usize = 10;
perform_change_log_test::<false, false, HEIGHT, 16, 16, 0, 16, HEIGHT>(&[
address_1, address_2, address_3,
]);
}
/// Performs conflicting Merkle tree updates where two parties try to insert
/// the same element.
#[test]
fn functional_changelog_test_double_spend() {
let address = 10_u32.to_biguint().unwrap();
const HEIGHT: usize = 10;
perform_change_log_test::<true, false, HEIGHT, 16, 16, 0, 16, HEIGHT>(&[
address.clone(),
address.clone(),
]);
}
#[test]
fn functional_changelog_test_random_8_512_512_0_512() {
const HEIGHT: usize = 8;
const CHANGELOG: usize = 512;
const ROOTS: usize = 512;
const CANOPY: usize = 0;
const INDEXED_CHANGELOG: usize = 512;
const N_OPERATIONS: usize = (1 << HEIGHT) / 2;
const NET_HEIGHT: usize = HEIGHT - CANOPY;
functional_changelog_test_random::<
false,
HEIGHT,
CHANGELOG,
ROOTS,
CANOPY,
INDEXED_CHANGELOG,
N_OPERATIONS,
NET_HEIGHT,
>()
}
/// Performs concurrent updates, where the indexed changelog eventually wraps
/// around. Updates with an old proof and old changelog index are expected to
/// fail.
#[test]
fn functional_changelog_test_random_wrap_around_8_128_512_0_512() {
const HEIGHT: usize = 8;
const CHANGELOG: usize = 512;
const ROOTS: usize = 512;
const CANOPY: usize = 0;
const INDEXED_CHANGELOG: usize = 128;
const N_OPERATIONS: usize = (1 << HEIGHT) / 2;
const NET_HEIGHT: usize = HEIGHT - CANOPY;
for _ in 0..100 {
functional_changelog_test_random::<
true,
HEIGHT,
CHANGELOG,
ROOTS,
CANOPY,
INDEXED_CHANGELOG,
N_OPERATIONS,
NET_HEIGHT,
>()
}
}
/// Performs `N_OPERATIONS` concurrent updates with random elements. All of them without
/// updating the changelog indices. All of them should result in using indexed changelog
/// for patching the proof.
fn functional_changelog_test_random<
const WRAP_AROUND: bool,
const HEIGHT: usize,
const CHANGELOG: usize,
const ROOTS: usize,
const CANOPY: usize,
const INDEXED_CHANGELOG: usize,
const N_OPERATIONS: usize,
const NET_HEIGHT: usize,
>() {
let mut rng = thread_rng();
let leaves: Vec<BigUint> = (0..N_OPERATIONS).map(|_| rng.gen_biguint(248)).collect();
perform_change_log_test::<
false,
WRAP_AROUND,
HEIGHT,
CHANGELOG,
ROOTS,
CANOPY,
INDEXED_CHANGELOG,
NET_HEIGHT,
>(&leaves);
}
/// Performs conflicting Merkle tree updates where multiple actors try to add
/// add new ranges when using the same (for the most of actors - outdated)
/// Merkle proofs and changelog indices.
///
/// Scenario:
///
/// 1. Two paries start with the same indexed array state.
/// 2. Both parties compute their values with the same indexed Merkle tree
/// state.
/// 3. Party one inserts first.
/// 4. Party two needs to patch the low element, because the low element has
/// changed.
/// 5. Party two inserts.
/// 6. Party N needs to patch the low element, because the low element has
/// changed.
/// 7. Party N inserts.
///
/// `DOUBLE_SPEND` indicates whether the provided addresses are an attempt to
/// double-spend by the subsequent parties. When set to `true`, we expect
/// subsequent updates to fail.
fn perform_change_log_test<
const DOUBLE_SPEND: bool,
const WRAP_AROUND: bool,
const HEIGHT: usize,
const CHANGELOG: usize,
const ROOTS: usize,
const CANOPY: usize,
const INDEXED_CHANGELOG: usize,
const NET_HEIGHT: usize,
>(
addresses: &[BigUint],
) {
// Initialize the trees and indexed array.
let mut relayer_indexed_array = IndexedArray::<Poseidon, usize>::default();
relayer_indexed_array.init().unwrap();
let mut relayer_merkle_tree =
reference::IndexedMerkleTree::<Poseidon, usize>::new(HEIGHT, CANOPY).unwrap();
let mut onchain_indexed_merkle_tree =
IndexedMerkleTree::<Poseidon, usize, HEIGHT, NET_HEIGHT>::new(
HEIGHT,
CHANGELOG,
ROOTS,
CANOPY,
INDEXED_CHANGELOG,
)
.unwrap();
onchain_indexed_merkle_tree.init().unwrap();
onchain_indexed_merkle_tree.add_highest_element().unwrap();
relayer_merkle_tree.init().unwrap();
assert_eq!(
relayer_merkle_tree.root(),
onchain_indexed_merkle_tree.root(),
"environment setup failed relayer and onchain indexed Merkle tree roots are inconsistent"
);
// Perform updates for each actor, where every of them is using the same
// changelog indices, generating a conflict which needs to be solved by
// patching from changelog.
let mut indexed_arrays = vec![relayer_indexed_array.clone(); addresses.len()];
let changelog_index = onchain_indexed_merkle_tree.changelog_index();
let indexed_changelog_index = onchain_indexed_merkle_tree.indexed_changelog_index();
for (i, (address, indexed_array)) in addresses.iter().zip(indexed_arrays.iter_mut()).enumerate()
{
let (old_low_address, old_low_address_next_value) = indexed_array
.find_low_element_for_nonexistent(&address)
.unwrap();
let address_bundle = indexed_array
.new_element_with_low_element_index(old_low_address.index, address)
.unwrap();
let mut low_element_proof = relayer_merkle_tree
.get_proof_of_leaf(old_low_address.index, false)
.unwrap();
if DOUBLE_SPEND && i > 0 {
let res = onchain_indexed_merkle_tree.update(
changelog_index,
indexed_changelog_index,
address_bundle.new_element.value,
old_low_address,
old_low_address_next_value,
&mut low_element_proof,
);
assert!(matches!(
res,
Err(IndexedMerkleTreeError::NewElementGreaterOrEqualToNextElement)
));
} else if WRAP_AROUND && (i + 1) * 2 > INDEXED_CHANGELOG {
// After a wrap-around of the indexed changelog, we expect leaf
// updates to break immediately.
let res = onchain_indexed_merkle_tree.update(
changelog_index,
indexed_changelog_index,
address_bundle.new_element.value.clone(),
old_low_address.clone(),
old_low_address_next_value,
&mut low_element_proof,
);
println!("changelog_index {:?}", changelog_index);
println!("indexed_changelog_index {:?}", indexed_changelog_index);
println!(
"address_bundle new_element_next_value{:?}",
address_bundle.new_element_next_value
);
println!(
"address_bundle new_element {:?}",
address_bundle.new_element
);
println!("old_low_address {:?}", old_low_address);
println!("res {:?}", res);
assert!(matches!(
res,
Err(IndexedMerkleTreeError::ConcurrentMerkleTree(
ConcurrentMerkleTreeError::CannotUpdateLeaf
))
));
} else {
onchain_indexed_merkle_tree
.update(
changelog_index,
indexed_changelog_index,
address_bundle.new_element.value,
old_low_address,
old_low_address_next_value,
&mut low_element_proof,
)
.unwrap();
for i in onchain_indexed_merkle_tree.changelog.iter() {
println!("indexed array state element {:?} ", i);
}
}
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/indexed
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/indexed/src/copy.rs
|
use std::{fmt, marker::PhantomData, ops::Deref};
use crate::{errors::IndexedMerkleTreeError, IndexedMerkleTree};
use light_bounded_vec::CyclicBoundedVecMetadata;
use light_concurrent_merkle_tree::{
copy::ConcurrentMerkleTreeCopy, errors::ConcurrentMerkleTreeError,
};
use light_hasher::Hasher;
use light_utils::offset::copy::{read_cyclic_bounded_vec_at, read_value_at};
use num_traits::{CheckedAdd, CheckedSub, ToBytes, Unsigned};
#[derive(Debug)]
pub struct IndexedMerkleTreeCopy<H, I, const HEIGHT: usize, const NET_HEIGHT: usize>(
IndexedMerkleTree<H, I, HEIGHT, NET_HEIGHT>,
)
where
H: Hasher,
I: CheckedAdd
+ CheckedSub
+ Copy
+ Clone
+ fmt::Debug
+ PartialOrd
+ ToBytes
+ TryFrom<usize>
+ Unsigned,
usize: From<I>;
impl<H, I, const HEIGHT: usize, const NET_HEIGHT: usize>
IndexedMerkleTreeCopy<H, I, HEIGHT, NET_HEIGHT>
where
H: Hasher,
I: CheckedAdd
+ CheckedSub
+ Copy
+ Clone
+ fmt::Debug
+ PartialOrd
+ ToBytes
+ TryFrom<usize>
+ Unsigned,
usize: From<I>,
{
/// Casts a byte slice into wrapped `IndexedMerkleTree` structure reference,
/// including dynamic fields.
///
/// # Purpose
///
/// This method is meant to be used mostly in Solana programs, where memory
/// constraints are tight and we want to make sure no data is copied.
pub fn from_bytes_copy(bytes: &[u8]) -> Result<Self, IndexedMerkleTreeError> {
let (merkle_tree, mut offset) =
ConcurrentMerkleTreeCopy::<H, HEIGHT>::struct_from_bytes_copy(bytes)?;
let indexed_changelog_metadata: CyclicBoundedVecMetadata =
unsafe { read_value_at(bytes, &mut offset) };
let expected_size = IndexedMerkleTree::<H, I, HEIGHT, NET_HEIGHT>::size_in_account(
merkle_tree.height,
merkle_tree.changelog.capacity(),
merkle_tree.roots.capacity(),
merkle_tree.canopy_depth,
indexed_changelog_metadata.capacity(),
);
if bytes.len() < expected_size {
return Err(IndexedMerkleTreeError::ConcurrentMerkleTree(
ConcurrentMerkleTreeError::BufferSize(expected_size, bytes.len()),
));
}
let indexed_changelog =
unsafe { read_cyclic_bounded_vec_at(bytes, &mut offset, &indexed_changelog_metadata) };
Ok(Self(IndexedMerkleTree {
merkle_tree,
indexed_changelog,
_index: PhantomData,
}))
}
}
impl<H, I, const HEIGHT: usize, const NET_HEIGHT: usize> Deref
for IndexedMerkleTreeCopy<H, I, HEIGHT, NET_HEIGHT>
where
H: Hasher,
I: CheckedAdd
+ CheckedSub
+ Copy
+ Clone
+ fmt::Debug
+ PartialOrd
+ ToBytes
+ TryFrom<usize>
+ Unsigned,
usize: From<I>,
{
type Target = IndexedMerkleTree<H, I, HEIGHT, NET_HEIGHT>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[cfg(test)]
mod test {
use light_hasher::Poseidon;
use light_utils::bigint::bigint_to_be_bytes_array;
use num_bigint::RandBigInt;
use rand::thread_rng;
use crate::zero_copy::IndexedMerkleTreeZeroCopyMut;
use super::*;
fn from_bytes_copy<
const HEIGHT: usize,
const CHANGELOG_SIZE: usize,
const ROOTS: usize,
const CANOPY_DEPTH: usize,
const INDEXED_CHANGELOG_SIZE: usize,
const OPERATIONS: usize,
const NET_HEIGHT: usize,
>() {
let mut mt_1 = IndexedMerkleTree::<Poseidon, usize, HEIGHT, NET_HEIGHT>::new(
HEIGHT,
CHANGELOG_SIZE,
ROOTS,
CANOPY_DEPTH,
INDEXED_CHANGELOG_SIZE,
)
.unwrap();
mt_1.init().unwrap();
let mut bytes = vec![
0u8;
IndexedMerkleTree::<Poseidon, usize, HEIGHT, NET_HEIGHT>::size_in_account(
HEIGHT,
CHANGELOG_SIZE,
ROOTS,
CANOPY_DEPTH,
INDEXED_CHANGELOG_SIZE
)
];
{
let mut mt_2 =
IndexedMerkleTreeZeroCopyMut::<Poseidon, usize, HEIGHT, NET_HEIGHT>::from_bytes_zero_copy_init(
&mut bytes,
HEIGHT,
CANOPY_DEPTH,
CHANGELOG_SIZE,
ROOTS,
INDEXED_CHANGELOG_SIZE,
)
.unwrap();
mt_2.init().unwrap();
assert_eq!(mt_1, *mt_2);
}
let mut rng = thread_rng();
for _ in 0..OPERATIONS {
// Reload the tree from bytes on each iteration.
let mut mt_2 =
IndexedMerkleTreeZeroCopyMut::<Poseidon, usize, HEIGHT,NET_HEIGHT>::from_bytes_zero_copy_mut(
&mut bytes,
)
.unwrap();
let leaf: [u8; 32] = bigint_to_be_bytes_array::<32>(&rng.gen_biguint(248)).unwrap();
mt_1.append(&leaf).unwrap();
mt_2.append(&leaf).unwrap();
assert_eq!(mt_1, *mt_2);
}
// Read a copy of that Merkle tree.
let mt_2 =
IndexedMerkleTreeCopy::<Poseidon, usize, HEIGHT, NET_HEIGHT>::from_bytes_copy(&bytes)
.unwrap();
assert_eq!(mt_1, *mt_2);
}
#[test]
fn test_from_bytes_copy_26_1400_2400_10_256_1024() {
const HEIGHT: usize = 26;
const CHANGELOG_SIZE: usize = 1400;
const ROOTS: usize = 2400;
const CANOPY_DEPTH: usize = 10;
const INDEXED_CHANGELOG_SIZE: usize = 256;
const NET_HEIGHT: usize = 16;
const OPERATIONS: usize = 1024;
from_bytes_copy::<
HEIGHT,
CHANGELOG_SIZE,
ROOTS,
CANOPY_DEPTH,
INDEXED_CHANGELOG_SIZE,
OPERATIONS,
NET_HEIGHT,
>()
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/indexed
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/indexed/src/zero_copy.rs
|
use std::{
fmt,
marker::PhantomData,
mem,
ops::{Deref, DerefMut},
};
use light_bounded_vec::{CyclicBoundedVec, CyclicBoundedVecMetadata};
use light_concurrent_merkle_tree::{
errors::ConcurrentMerkleTreeError,
zero_copy::{ConcurrentMerkleTreeZeroCopy, ConcurrentMerkleTreeZeroCopyMut},
ConcurrentMerkleTree,
};
use light_hasher::Hasher;
use light_utils::offset::zero_copy::{read_array_like_ptr_at, read_ptr_at, write_at};
use num_traits::{CheckedAdd, CheckedSub, ToBytes, Unsigned};
use crate::{errors::IndexedMerkleTreeError, IndexedMerkleTree};
#[derive(Debug)]
pub struct IndexedMerkleTreeZeroCopy<'a, H, I, const HEIGHT: usize, const NET_HEIGHT: usize>
where
H: Hasher,
I: CheckedAdd
+ CheckedSub
+ Copy
+ Clone
+ fmt::Debug
+ PartialOrd
+ ToBytes
+ TryFrom<usize>
+ Unsigned,
usize: From<I>,
{
pub merkle_tree: mem::ManuallyDrop<IndexedMerkleTree<H, I, HEIGHT, NET_HEIGHT>>,
// The purpose of this field is ensuring that the wrapper does not outlive
// the buffer.
_bytes: &'a [u8],
}
impl<'a, H, I, const HEIGHT: usize, const NET_HEIGHT: usize>
IndexedMerkleTreeZeroCopy<'a, H, I, HEIGHT, NET_HEIGHT>
where
H: Hasher,
I: CheckedAdd
+ CheckedSub
+ Copy
+ Clone
+ fmt::Debug
+ PartialOrd
+ ToBytes
+ TryFrom<usize>
+ Unsigned,
usize: From<I>,
{
/// Returns a zero-copy wrapper of `IndexedMerkleTree` created from the
/// data in the provided `bytes` buffer.
pub fn from_bytes_zero_copy(bytes: &'a [u8]) -> Result<Self, IndexedMerkleTreeError> {
let (merkle_tree, mut offset) =
ConcurrentMerkleTreeZeroCopy::struct_from_bytes_zero_copy(bytes)?;
let indexed_changelog_metadata: *mut CyclicBoundedVecMetadata =
unsafe { read_ptr_at(bytes, &mut offset) };
let expected_size = IndexedMerkleTree::<H, I, HEIGHT, NET_HEIGHT>::size_in_account(
merkle_tree.height,
merkle_tree.changelog.capacity(),
merkle_tree.roots.capacity(),
merkle_tree.canopy_depth,
unsafe { (*indexed_changelog_metadata).capacity() },
);
if bytes.len() < expected_size {
return Err(IndexedMerkleTreeError::ConcurrentMerkleTree(
ConcurrentMerkleTreeError::BufferSize(expected_size, bytes.len()),
));
}
let indexed_changelog = unsafe {
CyclicBoundedVec::from_raw_parts(
indexed_changelog_metadata,
read_array_like_ptr_at(
bytes,
&mut offset,
(*indexed_changelog_metadata).capacity(),
),
)
};
Ok(Self {
merkle_tree: mem::ManuallyDrop::new(IndexedMerkleTree {
merkle_tree,
indexed_changelog,
_index: PhantomData,
}),
_bytes: bytes,
})
}
}
impl<'a, H, I, const HEIGHT: usize, const NET_HEIGHT: usize> Deref
for IndexedMerkleTreeZeroCopy<'a, H, I, HEIGHT, NET_HEIGHT>
where
H: Hasher,
I: CheckedAdd
+ CheckedSub
+ Copy
+ Clone
+ fmt::Debug
+ PartialOrd
+ ToBytes
+ TryFrom<usize>
+ Unsigned,
usize: From<I>,
{
type Target = IndexedMerkleTree<H, I, HEIGHT, NET_HEIGHT>;
fn deref(&self) -> &Self::Target {
&self.merkle_tree
}
}
#[derive(Debug)]
pub struct IndexedMerkleTreeZeroCopyMut<'a, H, I, const HEIGHT: usize, const NET_HEIGHT: usize>(
IndexedMerkleTreeZeroCopy<'a, H, I, HEIGHT, NET_HEIGHT>,
)
where
H: Hasher,
I: CheckedAdd
+ CheckedSub
+ Copy
+ Clone
+ fmt::Debug
+ PartialOrd
+ ToBytes
+ TryFrom<usize>
+ Unsigned,
usize: From<I>;
impl<'a, H, I, const HEIGHT: usize, const NET_HEIGHT: usize>
IndexedMerkleTreeZeroCopyMut<'a, H, I, HEIGHT, NET_HEIGHT>
where
H: Hasher,
I: CheckedAdd
+ CheckedSub
+ Copy
+ Clone
+ fmt::Debug
+ PartialOrd
+ ToBytes
+ TryFrom<usize>
+ Unsigned,
usize: From<I>,
{
pub fn from_bytes_zero_copy_mut(bytes: &'a mut [u8]) -> Result<Self, IndexedMerkleTreeError> {
Ok(Self(IndexedMerkleTreeZeroCopy::from_bytes_zero_copy(
bytes,
)?))
}
pub fn from_bytes_zero_copy_init(
bytes: &'a mut [u8],
height: usize,
canopy_depth: usize,
changelog_capacity: usize,
roots_capacity: usize,
indexed_changelog_capacity: usize,
) -> Result<Self, IndexedMerkleTreeError> {
let _ = ConcurrentMerkleTreeZeroCopyMut::<H, HEIGHT>::fill_non_dyn_fields_in_buffer(
bytes,
height,
canopy_depth,
changelog_capacity,
roots_capacity,
)?;
let expected_size = IndexedMerkleTree::<H, I, HEIGHT, NET_HEIGHT>::size_in_account(
height,
changelog_capacity,
roots_capacity,
canopy_depth,
indexed_changelog_capacity,
);
if bytes.len() < expected_size {
return Err(IndexedMerkleTreeError::ConcurrentMerkleTree(
ConcurrentMerkleTreeError::BufferSize(expected_size, bytes.len()),
));
}
let mut offset = ConcurrentMerkleTree::<H, HEIGHT>::size_in_account(
height,
changelog_capacity,
roots_capacity,
canopy_depth,
);
let indexed_changelog_metadata = CyclicBoundedVecMetadata::new(indexed_changelog_capacity);
write_at::<CyclicBoundedVecMetadata>(
bytes,
&indexed_changelog_metadata.to_le_bytes(),
&mut offset,
);
Self::from_bytes_zero_copy_mut(bytes)
}
}
impl<'a, H, I, const HEIGHT: usize, const NET_HEIGHT: usize> Deref
for IndexedMerkleTreeZeroCopyMut<'a, H, I, HEIGHT, NET_HEIGHT>
where
H: Hasher,
I: CheckedAdd
+ CheckedSub
+ Copy
+ Clone
+ fmt::Debug
+ PartialOrd
+ ToBytes
+ TryFrom<usize>
+ Unsigned,
usize: From<I>,
{
type Target = IndexedMerkleTree<H, I, HEIGHT, NET_HEIGHT>;
fn deref(&self) -> &Self::Target {
&self.0.merkle_tree
}
}
impl<'a, H, I, const HEIGHT: usize, const NET_HEIGHT: usize> DerefMut
for IndexedMerkleTreeZeroCopyMut<'a, H, I, HEIGHT, NET_HEIGHT>
where
H: Hasher,
I: CheckedAdd
+ CheckedSub
+ Copy
+ Clone
+ fmt::Debug
+ PartialOrd
+ ToBytes
+ TryFrom<usize>
+ Unsigned,
usize: From<I>,
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0.merkle_tree
}
}
#[cfg(test)]
mod test {
use light_hasher::Poseidon;
use light_utils::bigint::bigint_to_be_bytes_array;
use num_bigint::RandBigInt;
use rand::thread_rng;
use super::*;
fn from_bytes_zero_copy<
const HEIGHT: usize,
const NET_HEIGHT: usize,
const CHANGELOG_SIZE: usize,
const ROOTS: usize,
const CANOPY_DEPTH: usize,
const INDEXED_CHANGELOG_SIZE: usize,
const OPERATIONS: usize,
>() {
let mut mt_1 = IndexedMerkleTree::<Poseidon, usize, HEIGHT, NET_HEIGHT>::new(
HEIGHT,
CHANGELOG_SIZE,
ROOTS,
CANOPY_DEPTH,
INDEXED_CHANGELOG_SIZE,
)
.unwrap();
mt_1.init().unwrap();
let mut bytes = vec![
0u8;
IndexedMerkleTree::<Poseidon, usize, HEIGHT, NET_HEIGHT>::size_in_account(
HEIGHT,
CHANGELOG_SIZE,
ROOTS,
CANOPY_DEPTH,
INDEXED_CHANGELOG_SIZE
)
];
{
let mut mt_2 =
IndexedMerkleTreeZeroCopyMut::<Poseidon, usize, HEIGHT, NET_HEIGHT>::from_bytes_zero_copy_init(
&mut bytes,
HEIGHT,
CANOPY_DEPTH,
CHANGELOG_SIZE,
ROOTS,
INDEXED_CHANGELOG_SIZE,
)
.unwrap();
mt_2.init().unwrap();
assert_eq!(mt_1, *mt_2);
}
let mut rng = thread_rng();
for _ in 0..OPERATIONS {
// Reload the tree from bytes on each iteration.
let mut mt_2 =
IndexedMerkleTreeZeroCopyMut::<Poseidon, usize, HEIGHT,NET_HEIGHT>::from_bytes_zero_copy_mut(
&mut bytes,
)
.unwrap();
let leaf: [u8; 32] = bigint_to_be_bytes_array::<32>(&rng.gen_biguint(248)).unwrap();
mt_1.append(&leaf).unwrap();
mt_2.append(&leaf).unwrap();
assert_eq!(mt_1, *mt_2);
}
}
#[test]
fn test_from_bytes_zero_copy_26_1400_2400_10_256_1024() {
const HEIGHT: usize = 26;
const NET_HEIGHT: usize = 16;
const CHANGELOG_SIZE: usize = 1400;
const ROOTS: usize = 2400;
const CANOPY_DEPTH: usize = 10;
const INDEXED_CHANGELOG_SIZE: usize = 256;
const OPERATIONS: usize = 1024;
from_bytes_zero_copy::<
HEIGHT,
NET_HEIGHT,
CHANGELOG_SIZE,
ROOTS,
CANOPY_DEPTH,
INDEXED_CHANGELOG_SIZE,
OPERATIONS,
>()
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/indexed
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/indexed/src/lib.rs
|
use std::{
fmt,
marker::PhantomData,
mem,
ops::{Deref, DerefMut},
};
use array::{IndexedArray, IndexedElement};
use changelog::IndexedChangelogEntry;
use light_bounded_vec::{BoundedVec, CyclicBoundedVec, CyclicBoundedVecMetadata};
use light_concurrent_merkle_tree::{
errors::ConcurrentMerkleTreeError,
event::{IndexedMerkleTreeUpdate, RawIndexedElement},
light_hasher::Hasher,
ConcurrentMerkleTree,
};
use light_utils::bigint::bigint_to_be_bytes_array;
use num_bigint::BigUint;
use num_traits::{CheckedAdd, CheckedSub, ToBytes, Unsigned};
pub mod array;
pub mod changelog;
pub mod copy;
pub mod errors;
pub mod reference;
pub mod zero_copy;
use crate::errors::IndexedMerkleTreeError;
pub const HIGHEST_ADDRESS_PLUS_ONE: &str =
"452312848583266388373324160190187140051835877600158453279131187530910662655";
#[derive(Debug)]
#[repr(C)]
pub struct IndexedMerkleTree<H, I, const HEIGHT: usize, const NET_HEIGHT: usize>
where
H: Hasher,
I: CheckedAdd
+ CheckedSub
+ Copy
+ Clone
+ fmt::Debug
+ PartialOrd
+ ToBytes
+ TryFrom<usize>
+ Unsigned,
usize: From<I>,
{
pub merkle_tree: ConcurrentMerkleTree<H, HEIGHT>,
pub indexed_changelog: CyclicBoundedVec<IndexedChangelogEntry<I, NET_HEIGHT>>,
_index: PhantomData<I>,
}
pub type IndexedMerkleTree26<H, I> = IndexedMerkleTree<H, I, 26, 16>;
impl<H, I, const HEIGHT: usize, const NET_HEIGHT: usize> IndexedMerkleTree<H, I, HEIGHT, NET_HEIGHT>
where
H: Hasher,
I: CheckedAdd
+ CheckedSub
+ Copy
+ Clone
+ fmt::Debug
+ PartialOrd
+ ToBytes
+ TryFrom<usize>
+ Unsigned,
usize: From<I>,
{
/// Size of the struct **without** dynamically sized fields (`BoundedVec`,
/// `CyclicBoundedVec`).
pub fn non_dyn_fields_size() -> usize {
ConcurrentMerkleTree::<H, HEIGHT>::non_dyn_fields_size()
// indexed_changelog (metadata)
+ mem::size_of::<CyclicBoundedVecMetadata>()
}
// TODO(vadorovsky): Make a macro for that.
pub fn size_in_account(
height: usize,
changelog_size: usize,
roots_size: usize,
canopy_depth: usize,
indexed_changelog_size: usize,
) -> usize {
ConcurrentMerkleTree::<H, HEIGHT>::size_in_account(
height,
changelog_size,
roots_size,
canopy_depth,
)
// indexed_changelog (metadata)
+ mem::size_of::<CyclicBoundedVecMetadata>()
// indexed_changelog
+ mem::size_of::<IndexedChangelogEntry<I, NET_HEIGHT>>() * indexed_changelog_size
}
pub fn new(
height: usize,
changelog_size: usize,
roots_size: usize,
canopy_depth: usize,
indexed_changelog_size: usize,
) -> Result<Self, ConcurrentMerkleTreeError> {
let merkle_tree = ConcurrentMerkleTree::<H, HEIGHT>::new(
height,
changelog_size,
roots_size,
canopy_depth,
)?;
Ok(Self {
merkle_tree,
indexed_changelog: CyclicBoundedVec::with_capacity(indexed_changelog_size),
_index: PhantomData,
})
}
pub fn init(&mut self) -> Result<(), IndexedMerkleTreeError> {
self.merkle_tree.init()?;
// Append the first low leaf, which has value 0 and does not point
// to any other leaf yet.
// This low leaf is going to be updated during the first `update`
// operation.
self.merkle_tree.append(&H::zero_indexed_leaf())?;
// Emit first changelog entries.
let element = RawIndexedElement {
value: [0_u8; 32],
next_index: I::zero(),
next_value: [0_u8; 32],
index: I::zero(),
};
let changelog_entry = IndexedChangelogEntry {
element,
proof: H::zero_bytes()[..NET_HEIGHT].try_into().unwrap(),
changelog_index: 0,
};
self.indexed_changelog.push(changelog_entry.clone());
self.indexed_changelog.push(changelog_entry);
Ok(())
}
/// Add the hightest element with a maximum value allowed by the prime
/// field.
///
/// Initializing an indexed Merkle tree not only with the lowest element
/// (mandatory for the IMT algorithm to work), but also the highest element,
/// makes non-inclusion proofs easier - there is no special case needed for
/// the first insertion.
///
/// However, it comes with a tradeoff - the space available in the tree
/// becomes lower by 1.
pub fn add_highest_element(&mut self) -> Result<(), IndexedMerkleTreeError> {
let mut indexed_array = IndexedArray::<H, I>::default();
let element_bundle = indexed_array.init()?;
let new_low_leaf = element_bundle
.new_low_element
.hash::<H>(&element_bundle.new_element.value)?;
let mut proof = BoundedVec::with_capacity(self.merkle_tree.height);
for i in 0..self.merkle_tree.height - self.merkle_tree.canopy_depth {
// PANICS: Calling `unwrap()` pushing into this bounded vec
// cannot panic since it has enough capacity.
proof.push(H::zero_bytes()[i]).unwrap();
}
let (changelog_index, _) = self.merkle_tree.update(
self.changelog_index(),
&H::zero_indexed_leaf(),
&new_low_leaf,
0,
&mut proof,
)?;
// Emit changelog for low element.
let low_element = RawIndexedElement {
value: bigint_to_be_bytes_array::<32>(&element_bundle.new_low_element.value)?,
next_index: element_bundle.new_low_element.next_index,
next_value: bigint_to_be_bytes_array::<32>(&element_bundle.new_element.value)?,
index: element_bundle.new_low_element.index,
};
let low_element_changelog_entry = IndexedChangelogEntry {
element: low_element,
proof: H::zero_bytes()[..NET_HEIGHT].try_into().unwrap(),
changelog_index,
};
self.indexed_changelog.push(low_element_changelog_entry);
let new_leaf = element_bundle
.new_element
.hash::<H>(&element_bundle.new_element_next_value)?;
let mut proof = BoundedVec::with_capacity(self.height);
let (changelog_index, _) = self.merkle_tree.append_with_proof(&new_leaf, &mut proof)?;
// Emit changelog for new element.
let new_element = RawIndexedElement {
value: bigint_to_be_bytes_array::<32>(&element_bundle.new_element.value)?,
next_index: element_bundle.new_element.next_index,
next_value: [0_u8; 32],
index: element_bundle.new_element.index,
};
let new_element_changelog_entry = IndexedChangelogEntry {
element: new_element,
proof: proof.as_slice()[..NET_HEIGHT].try_into().unwrap(),
changelog_index,
};
self.indexed_changelog.push(new_element_changelog_entry);
Ok(())
}
pub fn indexed_changelog_index(&self) -> usize {
self.indexed_changelog.last_index()
}
/// Checks whether the given Merkle `proof` for the given `node` (with index
/// `i`) is valid. The proof is valid when computing parent node hashes using
/// the whole path of the proof gives the same result as the given `root`.
pub fn validate_proof(
&self,
leaf: &[u8; 32],
leaf_index: usize,
proof: &BoundedVec<[u8; 32]>,
) -> Result<(), IndexedMerkleTreeError> {
self.merkle_tree.validate_proof(leaf, leaf_index, proof)?;
Ok(())
}
/// Iterates over indexed changelog and every time an entry corresponding
/// to the provided `low_element` is found, it patches:
///
/// * Changelog index - indexed changelog entries contain corresponding
/// changelog indices.
/// * New element - changes might impact the `next_index` field, which in
/// such case is updated.
/// * Low element - it might completely change if a change introduced an
/// element in our range.
/// * Merkle proof.
#[allow(clippy::type_complexity)]
pub fn patch_elements_and_proof(
&mut self,
indexed_changelog_index: usize,
changelog_index: &mut usize,
new_element: &mut IndexedElement<I>,
low_element: &mut IndexedElement<I>,
low_element_next_value: &mut BigUint,
low_leaf_proof: &mut BoundedVec<[u8; 32]>,
) -> Result<(), IndexedMerkleTreeError> {
let next_indexed_changelog_indices: Vec<usize> = self
.indexed_changelog
.iter_from(indexed_changelog_index)?
.skip(1)
.enumerate()
.filter_map(|(index, changelog_entry)| {
if changelog_entry.element.index == low_element.index {
Some((indexed_changelog_index + 1 + index) % self.indexed_changelog.len())
} else {
None
}
})
.collect();
let mut new_low_element = None;
for next_indexed_changelog_index in next_indexed_changelog_indices {
let changelog_entry = &mut self.indexed_changelog[next_indexed_changelog_index];
let next_element_value = BigUint::from_bytes_be(&changelog_entry.element.next_value);
if next_element_value < new_element.value {
// If the next element is lower than the current element, it means
// that it should become the low element.
//
// Save it and break the loop.
new_low_element = Some((
(next_indexed_changelog_index + 1) % self.indexed_changelog.len(),
next_element_value,
));
break;
}
// Patch the changelog index.
*changelog_index = changelog_entry.changelog_index;
// Patch the `next_index` of `new_element`.
new_element.next_index = changelog_entry.element.next_index;
// Patch the element.
low_element.update_from_raw_element(&changelog_entry.element);
// Patch the next value.
*low_element_next_value = BigUint::from_bytes_be(&changelog_entry.element.next_value);
// Patch the proof.
for i in 0..low_leaf_proof.len() {
low_leaf_proof[i] = changelog_entry.proof[i];
}
}
// If we found a new low element.
if let Some((new_low_element_changelog_index, new_low_element)) = new_low_element {
let new_low_element_changelog_entry =
&self.indexed_changelog[new_low_element_changelog_index];
*changelog_index = new_low_element_changelog_entry.changelog_index;
*low_element = IndexedElement {
index: new_low_element_changelog_entry.element.index,
value: new_low_element.clone(),
next_index: new_low_element_changelog_entry.element.next_index,
};
for i in 0..low_leaf_proof.len() {
low_leaf_proof[i] = new_low_element_changelog_entry.proof[i];
}
new_element.next_index = low_element.next_index;
// Start the patching process from scratch for the new low element.
return self.patch_elements_and_proof(
new_low_element_changelog_index,
changelog_index,
new_element,
low_element,
low_element_next_value,
low_leaf_proof,
);
}
Ok(())
}
pub fn update(
&mut self,
mut changelog_index: usize,
indexed_changelog_index: usize,
new_element_value: BigUint,
mut low_element: IndexedElement<I>,
mut low_element_next_value: BigUint,
low_leaf_proof: &mut BoundedVec<[u8; 32]>,
) -> Result<IndexedMerkleTreeUpdate<I>, IndexedMerkleTreeError> {
let mut new_element = IndexedElement {
index: I::try_from(self.merkle_tree.next_index())
.map_err(|_| IndexedMerkleTreeError::IntegerOverflow)?,
value: new_element_value,
next_index: low_element.next_index,
};
self.patch_elements_and_proof(
indexed_changelog_index,
&mut changelog_index,
&mut new_element,
&mut low_element,
&mut low_element_next_value,
low_leaf_proof,
)?;
// Check that the value of `new_element` belongs to the range
// of `old_low_element`.
if low_element.next_index == I::zero() {
// In this case, the `old_low_element` is the greatest element.
// The value of `new_element` needs to be greater than the value of
// `old_low_element` (and therefore, be the greatest).
if new_element.value <= low_element.value {
return Err(IndexedMerkleTreeError::LowElementGreaterOrEqualToNewElement);
}
} else {
// The value of `new_element` needs to be greater than the value of
// `old_low_element` (and therefore, be the greatest).
if new_element.value <= low_element.value {
return Err(IndexedMerkleTreeError::LowElementGreaterOrEqualToNewElement);
}
// The value of `new_element` needs to be lower than the value of
// next element pointed by `old_low_element`.
if new_element.value >= low_element_next_value {
return Err(IndexedMerkleTreeError::NewElementGreaterOrEqualToNextElement);
}
}
// Instantiate `new_low_element` - the low element with updated values.
let new_low_element = IndexedElement {
index: low_element.index,
value: low_element.value.clone(),
next_index: new_element.index,
};
// Update low element. If the `old_low_element` does not belong to the
// tree, validating the proof is going to fail.
let old_low_leaf = low_element.hash::<H>(&low_element_next_value)?;
let new_low_leaf = new_low_element.hash::<H>(&new_element.value)?;
let (new_changelog_index, _) = self.merkle_tree.update(
changelog_index,
&old_low_leaf,
&new_low_leaf,
low_element.index.into(),
low_leaf_proof,
)?;
// Emit changelog entry for low element.
let new_low_element = RawIndexedElement {
value: bigint_to_be_bytes_array::<32>(&new_low_element.value).unwrap(),
next_index: new_low_element.next_index,
next_value: bigint_to_be_bytes_array::<32>(&new_element.value)?,
index: new_low_element.index,
};
let low_element_changelog_entry = IndexedChangelogEntry {
element: new_low_element,
proof: low_leaf_proof.as_slice()[..NET_HEIGHT].try_into().unwrap(),
changelog_index: new_changelog_index,
};
self.indexed_changelog.push(low_element_changelog_entry);
// New element is always the newest one in the tree. Since we
// support concurrent updates, the index provided by the caller
// might be outdated. Let's just use the latest index indicated
// by the tree.
new_element.index =
I::try_from(self.next_index()).map_err(|_| IndexedMerkleTreeError::IntegerOverflow)?;
// Append new element.
let mut proof = BoundedVec::with_capacity(self.height);
let new_leaf = new_element.hash::<H>(&low_element_next_value)?;
let (new_changelog_index, _) = self.merkle_tree.append_with_proof(&new_leaf, &mut proof)?;
// Prepare raw new element to save in changelog.
let raw_new_element = RawIndexedElement {
value: bigint_to_be_bytes_array::<32>(&new_element.value).unwrap(),
next_index: new_element.next_index,
next_value: bigint_to_be_bytes_array::<32>(&low_element_next_value)?,
index: new_element.index,
};
// Emit changelog entry for new element.
let new_element_changelog_entry = IndexedChangelogEntry {
element: raw_new_element,
proof: proof.as_slice()[..NET_HEIGHT].try_into().unwrap(),
changelog_index: new_changelog_index,
};
self.indexed_changelog.push(new_element_changelog_entry);
let output = IndexedMerkleTreeUpdate {
new_low_element,
new_low_element_hash: new_low_leaf,
new_high_element: raw_new_element,
new_high_element_hash: new_leaf,
};
Ok(output)
}
}
impl<H, I, const HEIGHT: usize, const NET_HEIGHT: usize> Deref
for IndexedMerkleTree<H, I, HEIGHT, NET_HEIGHT>
where
H: Hasher,
I: CheckedAdd
+ CheckedSub
+ Copy
+ Clone
+ fmt::Debug
+ PartialOrd
+ ToBytes
+ TryFrom<usize>
+ Unsigned,
usize: From<I>,
{
type Target = ConcurrentMerkleTree<H, HEIGHT>;
fn deref(&self) -> &Self::Target {
&self.merkle_tree
}
}
impl<H, I, const HEIGHT: usize, const NET_HEIGHT: usize> DerefMut
for IndexedMerkleTree<H, I, HEIGHT, NET_HEIGHT>
where
H: Hasher,
I: CheckedAdd
+ CheckedSub
+ Copy
+ Clone
+ fmt::Debug
+ PartialOrd
+ ToBytes
+ TryFrom<usize>
+ Unsigned,
usize: From<I>,
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.merkle_tree
}
}
impl<H, I, const HEIGHT: usize, const NET_HEIGHT: usize> PartialEq
for IndexedMerkleTree<H, I, HEIGHT, NET_HEIGHT>
where
H: Hasher,
I: CheckedAdd
+ CheckedSub
+ Copy
+ Clone
+ fmt::Debug
+ PartialOrd
+ ToBytes
+ TryFrom<usize>
+ Unsigned,
usize: From<I>,
{
fn eq(&self, other: &Self) -> bool {
self.merkle_tree.eq(&other.merkle_tree)
&& self
.indexed_changelog
.capacity()
.eq(&other.indexed_changelog.capacity())
&& self
.indexed_changelog
.len()
.eq(&other.indexed_changelog.len())
&& self
.indexed_changelog
.first_index()
.eq(&other.indexed_changelog.first_index())
&& self
.indexed_changelog
.last_index()
.eq(&other.indexed_changelog.last_index())
&& self.indexed_changelog.eq(&other.indexed_changelog)
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/indexed
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/indexed/src/errors.rs
|
use light_bounded_vec::BoundedVecError;
use light_concurrent_merkle_tree::{
errors::ConcurrentMerkleTreeError, light_hasher::errors::HasherError,
};
use light_utils::UtilsError;
use thiserror::Error;
#[derive(Debug, Error)]
pub enum IndexedMerkleTreeError {
#[error("Integer overflow")]
IntegerOverflow,
#[error("Invalid index, it exceeds the number of elements.")]
IndexHigherThanMax,
#[error("Could not find the low element.")]
LowElementNotFound,
#[error("Low element is greater or equal to the provided new element.")]
LowElementGreaterOrEqualToNewElement,
#[error("The provided new element is greater or equal to the next element.")]
NewElementGreaterOrEqualToNextElement,
#[error("The element already exists, but was expected to be absent.")]
ElementAlreadyExists,
#[error("The element does not exist, but was expected to be present.")]
ElementDoesNotExist,
#[error("Invalid changelog buffer size, expected {0}, got {1}")]
ChangelogBufferSize(usize, usize),
#[error("Hasher error: {0}")]
Hasher(#[from] HasherError),
#[error("Concurrent Merkle tree error: {0}")]
ConcurrentMerkleTree(#[from] ConcurrentMerkleTreeError),
#[error("Utils error {0}")]
Utils(#[from] UtilsError),
#[error("Bounded vector error: {0}")]
BoundedVec(#[from] BoundedVecError),
#[error("Indexed array is full, cannot append more elements")]
ArrayFull,
}
// NOTE(vadorovsky): Unfortunately, we need to do it by hand. `num_derive::ToPrimitive`
// doesn't support data-carrying enums.
#[cfg(feature = "solana")]
impl From<IndexedMerkleTreeError> for u32 {
fn from(e: IndexedMerkleTreeError) -> u32 {
match e {
IndexedMerkleTreeError::IntegerOverflow => 11001,
IndexedMerkleTreeError::IndexHigherThanMax => 11002,
IndexedMerkleTreeError::LowElementNotFound => 11003,
IndexedMerkleTreeError::LowElementGreaterOrEqualToNewElement => 11004,
IndexedMerkleTreeError::NewElementGreaterOrEqualToNextElement => 11005,
IndexedMerkleTreeError::ElementAlreadyExists => 11006,
IndexedMerkleTreeError::ElementDoesNotExist => 11007,
IndexedMerkleTreeError::ChangelogBufferSize(_, _) => 11008,
IndexedMerkleTreeError::ArrayFull => 11009,
IndexedMerkleTreeError::Hasher(e) => e.into(),
IndexedMerkleTreeError::ConcurrentMerkleTree(e) => e.into(),
IndexedMerkleTreeError::Utils(e) => e.into(),
IndexedMerkleTreeError::BoundedVec(e) => e.into(),
}
}
}
#[cfg(feature = "solana")]
impl From<IndexedMerkleTreeError> for solana_program::program_error::ProgramError {
fn from(e: IndexedMerkleTreeError) -> Self {
solana_program::program_error::ProgramError::Custom(e.into())
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/indexed
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/indexed/src/array.rs
|
use std::{cmp::Ordering, fmt::Debug, marker::PhantomData};
use crate::{errors::IndexedMerkleTreeError, HIGHEST_ADDRESS_PLUS_ONE};
use light_concurrent_merkle_tree::{event::RawIndexedElement, light_hasher::Hasher};
use light_utils::bigint::bigint_to_be_bytes_array;
use num_bigint::BigUint;
use num_traits::Zero;
use num_traits::{CheckedAdd, CheckedSub, ToBytes, Unsigned};
#[derive(Clone, Debug, Default)]
pub struct IndexedElement<I>
where
I: CheckedAdd + CheckedSub + Copy + Clone + PartialOrd + ToBytes + TryFrom<usize> + Unsigned,
usize: From<I>,
{
pub index: I,
pub value: BigUint,
pub next_index: I,
}
impl<I> From<RawIndexedElement<I>> for IndexedElement<I>
where
I: CheckedAdd + CheckedSub + Copy + Clone + PartialOrd + ToBytes + TryFrom<usize> + Unsigned,
usize: From<I>,
{
fn from(value: RawIndexedElement<I>) -> Self {
IndexedElement {
index: value.index,
value: BigUint::from_bytes_be(&value.value),
next_index: value.next_index,
}
}
}
impl<I> PartialEq for IndexedElement<I>
where
I: CheckedAdd + CheckedSub + Copy + Clone + PartialOrd + ToBytes + TryFrom<usize> + Unsigned,
usize: From<I>,
{
fn eq(&self, other: &Self) -> bool {
self.value == other.value
}
}
impl<I> Eq for IndexedElement<I>
where
I: CheckedAdd + CheckedSub + Copy + Clone + PartialOrd + ToBytes + TryFrom<usize> + Unsigned,
usize: From<I>,
{
}
impl<I> PartialOrd for IndexedElement<I>
where
I: CheckedAdd + CheckedSub + Copy + Clone + PartialOrd + ToBytes + TryFrom<usize> + Unsigned,
usize: From<I>,
{
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<I> Ord for IndexedElement<I>
where
I: CheckedAdd + CheckedSub + Copy + Clone + PartialOrd + ToBytes + TryFrom<usize> + Unsigned,
usize: From<I>,
{
fn cmp(&self, other: &Self) -> Ordering {
self.value.cmp(&other.value)
}
}
impl<I> IndexedElement<I>
where
I: CheckedAdd + CheckedSub + Copy + Clone + PartialOrd + ToBytes + TryFrom<usize> + Unsigned,
usize: From<I>,
{
pub fn index(&self) -> usize {
self.index.into()
}
pub fn next_index(&self) -> usize {
self.next_index.into()
}
pub fn hash<H>(&self, next_value: &BigUint) -> Result<[u8; 32], IndexedMerkleTreeError>
where
H: Hasher,
{
let hash = H::hashv(&[
bigint_to_be_bytes_array::<32>(&self.value)?.as_ref(),
self.next_index.to_be_bytes().as_ref(),
bigint_to_be_bytes_array::<32>(next_value)?.as_ref(),
])?;
Ok(hash)
}
pub fn update_from_raw_element(&mut self, raw_element: &RawIndexedElement<I>) {
self.index = raw_element.index;
self.value = BigUint::from_bytes_be(&raw_element.value);
self.next_index = raw_element.next_index;
}
}
#[derive(Clone, Debug)]
pub struct IndexedElementBundle<I>
where
I: CheckedAdd + CheckedSub + Copy + Clone + PartialOrd + ToBytes + TryFrom<usize> + Unsigned,
usize: From<I>,
{
pub new_low_element: IndexedElement<I>,
pub new_element: IndexedElement<I>,
pub new_element_next_value: BigUint,
}
#[derive(Clone, Debug)]
pub struct IndexedArray<H, I>
where
H: Hasher,
I: CheckedAdd + CheckedSub + Copy + Clone + PartialOrd + ToBytes + TryFrom<usize> + Unsigned,
usize: From<I>,
{
pub elements: Vec<IndexedElement<I>>,
pub current_node_index: I,
pub highest_element_index: I,
_hasher: PhantomData<H>,
}
impl<H, I> Default for IndexedArray<H, I>
where
H: Hasher,
I: CheckedAdd + CheckedSub + Copy + Clone + PartialOrd + ToBytes + TryFrom<usize> + Unsigned,
usize: From<I>,
{
fn default() -> Self {
Self {
elements: vec![IndexedElement {
index: I::zero(),
value: BigUint::zero(),
next_index: I::zero(),
}],
current_node_index: I::zero(),
highest_element_index: I::zero(),
_hasher: PhantomData,
}
}
}
impl<H, I> IndexedArray<H, I>
where
H: Hasher,
I: CheckedAdd + CheckedSub + Copy + Clone + PartialOrd + ToBytes + TryFrom<usize> + Unsigned,
usize: From<I>,
{
pub fn get(&self, index: usize) -> Option<&IndexedElement<I>> {
self.elements.get(index)
}
pub fn len(&self) -> usize {
self.current_node_index.into()
}
pub fn is_empty(&self) -> bool {
self.current_node_index == I::zero()
}
pub fn iter(&self) -> IndexingArrayIter<H, I> {
IndexingArrayIter {
indexing_array: self,
front: 0,
back: self.current_node_index.into(),
}
}
pub fn find_element(&self, value: &BigUint) -> Option<&IndexedElement<I>> {
self.elements[..self.len() + 1]
.iter()
.find(|&node| node.value == *value)
}
pub fn init(&mut self) -> Result<IndexedElementBundle<I>, IndexedMerkleTreeError> {
use num_traits::Num;
let init_value = BigUint::from_str_radix(HIGHEST_ADDRESS_PLUS_ONE, 10)
.map_err(|_| IndexedMerkleTreeError::IntegerOverflow)?;
self.append(&init_value)
}
/// Returns the index of the low element for the given `value`, which is
/// not yet the part of the array.
///
/// Low element is the greatest element which still has lower value than
/// the provided one.
///
/// Low elements are used in non-membership proofs.
pub fn find_low_element_index_for_nonexistent(
&self,
value: &BigUint,
) -> Result<I, IndexedMerkleTreeError> {
// Try to find element whose next element is higher than the provided
// value.
for (i, node) in self.elements.iter().enumerate() {
if node.value == *value {
return Err(IndexedMerkleTreeError::ElementAlreadyExists);
}
if self.elements[node.next_index()].value > *value && node.value < *value {
return i
.try_into()
.map_err(|_| IndexedMerkleTreeError::IntegerOverflow);
}
}
// If no such element was found, it means that our value is going to be
// the greatest in the array. This means that the currently greatest
// element is going to be the low element of our value.
Ok(self.highest_element_index)
}
/// Returns the:
///
/// * Low element for the given value.
/// * Next value for that low element.
///
/// For the given `value`, which is not yet the part of the array.
///
/// Low element is the greatest element which still has lower value than
/// the provided one.
///
/// Low elements are used in non-membership proofs.
pub fn find_low_element_for_nonexistent(
&self,
value: &BigUint,
) -> Result<(IndexedElement<I>, BigUint), IndexedMerkleTreeError> {
let low_element_index = self.find_low_element_index_for_nonexistent(value)?;
let low_element = self.elements[usize::from(low_element_index)].clone();
Ok((
low_element.clone(),
self.elements[low_element.next_index()].value.clone(),
))
}
/// Returns the index of the low element for the given `value`, which is
/// already the part of the array.
///
/// Low element is the greatest element which still has lower value than
/// the provided one.
///
/// Low elements are used in non-membership proofs.
pub fn find_low_element_index_for_existent(
&self,
value: &BigUint,
) -> Result<I, IndexedMerkleTreeError> {
for (i, node) in self.elements[..self.len() + 1].iter().enumerate() {
if self.elements[usize::from(node.next_index)].value == *value {
let i = i
.try_into()
.map_err(|_| IndexedMerkleTreeError::IntegerOverflow)?;
return Ok(i);
}
}
Err(IndexedMerkleTreeError::ElementDoesNotExist)
}
/// Returns the low element for the given `value`, which is already the
/// part of the array.
///
/// Low element is the greatest element which still has lower value than
/// the provided one.
///
/// Low elements are used in non-membership proofs.
pub fn find_low_element_for_existent(
&self,
value: &BigUint,
) -> Result<IndexedElement<I>, IndexedMerkleTreeError> {
let low_element_index = self.find_low_element_index_for_existent(value)?;
let low_element = self.elements[usize::from(low_element_index)].clone();
Ok(low_element)
}
/// Returns the hash of the given element. That hash consists of:
///
/// * The value of the given element.
/// * The `next_index` of the given element.
/// * The value of the element pointed by `next_index`.
pub fn hash_element(&self, index: I) -> Result<[u8; 32], IndexedMerkleTreeError> {
let element = self
.elements
.get(usize::from(index))
.ok_or(IndexedMerkleTreeError::IndexHigherThanMax)?;
let next_element = self
.elements
.get(usize::from(element.next_index))
.ok_or(IndexedMerkleTreeError::IndexHigherThanMax)?;
element.hash::<H>(&next_element.value)
}
/// Returns an updated low element and a new element, created based on the
/// provided `low_element_index` and `value`.
pub fn new_element_with_low_element_index(
&self,
low_element_index: I,
value: &BigUint,
) -> Result<IndexedElementBundle<I>, IndexedMerkleTreeError> {
let mut new_low_element = self.elements[usize::from(low_element_index)].clone();
let new_element_index = self
.current_node_index
.checked_add(&I::one())
.ok_or(IndexedMerkleTreeError::IntegerOverflow)?;
let new_element = IndexedElement {
index: new_element_index,
value: value.clone(),
next_index: new_low_element.next_index,
};
new_low_element.next_index = new_element_index;
let new_element_next_value = self.elements[usize::from(new_element.next_index)]
.value
.clone();
Ok(IndexedElementBundle {
new_low_element,
new_element,
new_element_next_value,
})
}
pub fn new_element(
&self,
value: &BigUint,
) -> Result<IndexedElementBundle<I>, IndexedMerkleTreeError> {
let low_element_index = self.find_low_element_index_for_nonexistent(value)?;
let element = self.new_element_with_low_element_index(low_element_index, value)?;
Ok(element)
}
/// Appends the given `value` to the indexing array.
pub fn append_with_low_element_index(
&mut self,
low_element_index: I,
value: &BigUint,
) -> Result<IndexedElementBundle<I>, IndexedMerkleTreeError> {
// TOD0: add length check, and add field to with tree height here
let old_low_element = &self.elements[usize::from(low_element_index)];
// Check that the `value` belongs to the range of `old_low_element`.
if old_low_element.next_index == I::zero() {
// In this case, the `old_low_element` is the greatest element.
// The value of `new_element` needs to be greater than the value of
// `old_low_element` (and therefore, be the greatest).
if value <= &old_low_element.value {
return Err(IndexedMerkleTreeError::LowElementGreaterOrEqualToNewElement);
}
} else {
// The value of `new_element` needs to be greater than the value of
// `old_low_element` (and therefore, be the greatest).
if value <= &old_low_element.value {
return Err(IndexedMerkleTreeError::LowElementGreaterOrEqualToNewElement);
}
// The value of `new_element` needs to be lower than the value of
// next element pointed by `old_low_element`.
if value >= &self.elements[usize::from(old_low_element.next_index)].value {
return Err(IndexedMerkleTreeError::NewElementGreaterOrEqualToNextElement);
}
}
// Create new node.
let new_element_bundle =
self.new_element_with_low_element_index(low_element_index, value)?;
// If the old low element wasn't pointing to any element, it means that:
//
// * It used to be the highest element.
// * Our new element, which we are appending, is going the be the
// highest element.
//
// Therefore, we need to save the new element index as the highest
// index.
if old_low_element.next_index == I::zero() {
self.highest_element_index = new_element_bundle.new_element.index;
}
// Insert new node.
self.current_node_index = new_element_bundle.new_element.index;
self.elements.push(new_element_bundle.new_element.clone());
// Update low element.
self.elements[usize::from(low_element_index)] = new_element_bundle.new_low_element.clone();
Ok(new_element_bundle)
}
pub fn append(
&mut self,
value: &BigUint,
) -> Result<IndexedElementBundle<I>, IndexedMerkleTreeError> {
let low_element_index = self.find_low_element_index_for_nonexistent(value)?;
self.append_with_low_element_index(low_element_index, value)
}
pub fn lowest(&self) -> Option<IndexedElement<I>> {
if self.current_node_index < I::one() {
None
} else {
self.elements.get(1).cloned()
}
}
}
pub struct IndexingArrayIter<'a, H, I>
where
H: Hasher,
I: CheckedAdd + CheckedSub + Copy + Clone + PartialOrd + ToBytes + TryFrom<usize> + Unsigned,
usize: From<I>,
{
indexing_array: &'a IndexedArray<H, I>,
front: usize,
back: usize,
}
impl<'a, H, I> Iterator for IndexingArrayIter<'a, H, I>
where
H: Hasher,
I: CheckedAdd + CheckedSub + Copy + Clone + PartialOrd + ToBytes + TryFrom<usize> + Unsigned,
usize: From<I>,
{
type Item = &'a IndexedElement<I>;
fn next(&mut self) -> Option<Self::Item> {
if self.front <= self.back {
let result = self.indexing_array.elements.get(self.front);
self.front += 1;
result
} else {
None
}
}
}
impl<'a, H, I> DoubleEndedIterator for IndexingArrayIter<'a, H, I>
where
H: Hasher,
I: CheckedAdd + CheckedSub + Copy + Clone + PartialOrd + ToBytes + TryFrom<usize> + Unsigned,
usize: From<I>,
{
fn next_back(&mut self) -> Option<Self::Item> {
if self.back >= self.front {
let result = self.indexing_array.elements.get(self.back);
self.back -= 1;
result
} else {
None
}
}
}
#[cfg(test)]
mod test {
use light_concurrent_merkle_tree::light_hasher::Poseidon;
use num_bigint::{RandBigInt, ToBigUint};
use rand::thread_rng;
use super::*;
#[test]
fn test_indexed_element_cmp() {
let mut rng = thread_rng();
for _ in 0..1000 {
let value = rng.gen_biguint(128);
let element_1 = IndexedElement::<u16> {
index: 0,
value: value.clone(),
next_index: 1,
};
let element_2 = IndexedElement::<u16> {
index: 1,
value,
next_index: 2,
};
assert_eq!(element_1, element_2);
assert_eq!(element_2, element_1);
assert!(matches!(element_1.cmp(&element_2), Ordering::Equal));
assert!(matches!(element_2.cmp(&element_1), Ordering::Equal));
let value_higher = rng.gen_biguint(128);
if value_higher == 0.to_biguint().unwrap() {
continue;
}
let value_lower = rng.gen_biguint_below(&value_higher);
let element_lower = IndexedElement::<u16> {
index: 0,
value: value_lower,
next_index: 1,
};
let element_higher = IndexedElement::<u16> {
index: 1,
value: value_higher,
next_index: 2,
};
assert_ne!(element_lower, element_higher);
assert_ne!(element_higher, element_lower);
assert!(matches!(element_lower.cmp(&element_higher), Ordering::Less));
assert!(matches!(
element_higher.cmp(&element_lower),
Ordering::Greater
));
assert!(matches!(
element_lower.partial_cmp(&element_higher),
Some(Ordering::Less)
));
assert!(matches!(
element_higher.partial_cmp(&element_lower),
Some(Ordering::Greater)
));
}
}
/// Tests the insertion of elements to the indexing array.
#[test]
fn test_append() {
// The initial state of the array looks like:
//
// ```
// value = [0] [0] [0] [0] [0] [0] [0] [0]
// next_index = [0] [0] [0] [0] [0] [0] [0] [0]
// ```
let mut indexed_array: IndexedArray<Poseidon, usize> = IndexedArray::default();
let nullifier1 = 30_u32.to_biguint().unwrap();
let bundle1 = indexed_array.new_element(&nullifier1).unwrap();
assert!(indexed_array.find_element(&nullifier1).is_none());
indexed_array.append(&nullifier1).unwrap();
// After adding a new value 30, it should look like:
//
// ```
// value = [ 0] [30] [0] [0] [0] [0] [0] [0]
// next_index = [ 1] [ 0] [0] [0] [0] [0] [0] [0]
// ```
//
// Because:
//
// * Low element is the first node, with index 0 and value 0. There is
// no node with value greater as 30, so we found it as a one pointing to
// node 0 (which will always have value 0).
// * The new nullifier is inserted in index 1.
// * `next_*` fields of the low nullifier are updated to point to the new
// nullifier.
assert_eq!(
indexed_array.find_element(&nullifier1),
Some(&bundle1.new_element),
);
let expected_hash = Poseidon::hashv(&[
bigint_to_be_bytes_array::<32>(&nullifier1)
.unwrap()
.as_ref(),
0_usize.to_be_bytes().as_ref(),
bigint_to_be_bytes_array::<32>(&(0.to_biguint().unwrap()))
.unwrap()
.as_ref(),
])
.unwrap();
assert_eq!(indexed_array.hash_element(1).unwrap(), expected_hash);
assert_eq!(
indexed_array.elements[0],
IndexedElement {
index: 0,
value: 0_u32.to_biguint().unwrap(),
next_index: 1,
},
);
assert_eq!(
indexed_array.elements[1],
IndexedElement {
index: 1,
value: 30_u32.to_biguint().unwrap(),
next_index: 0,
}
);
assert_eq!(
indexed_array.iter().collect::<Vec<_>>().as_slice(),
&[
&IndexedElement {
index: 0,
value: 0_u32.to_biguint().unwrap(),
next_index: 1,
},
&IndexedElement {
index: 1,
value: 30_u32.to_biguint().unwrap(),
next_index: 0
}
]
);
let nullifier2 = 10_u32.to_biguint().unwrap();
let bundle2 = indexed_array.new_element(&nullifier2).unwrap();
assert!(indexed_array.find_element(&nullifier2).is_none());
indexed_array.append(&nullifier2).unwrap();
// After adding an another value 10, it should look like:
//
// ```
// value = [ 0] [30] [10] [0] [0] [0] [0] [0]
// next_index = [ 2] [ 0] [ 1] [0] [0] [0] [0] [0]
// ```
//
// Because:
//
// * Low nullifier is still the node 0, but this time for differen reason -
// its `next_index` 2 contains value 30, whish is greater than 10.
// * The new nullifier is inserted as node 2.
// * Low nullifier is pointing to the index 1. We assign the 1st nullifier
// as the next nullifier of our new nullifier. Therefore, our new nullifier
// looks like: `[value = 10, next_index = 1]`.
// * Low nullifier is updated to point to the new nullifier. Therefore,
// after update it looks like: `[value = 0, next_index = 2]`.
// * The previously inserted nullifier, the node 1, remains unchanged.
assert_eq!(
indexed_array.find_element(&nullifier2),
Some(&bundle2.new_element),
);
let expected_hash = Poseidon::hashv(&[
bigint_to_be_bytes_array::<32>(&nullifier2)
.unwrap()
.as_ref(),
1_usize.to_be_bytes().as_ref(),
bigint_to_be_bytes_array::<32>(&(30.to_biguint().unwrap()))
.unwrap()
.as_ref(),
])
.unwrap();
assert_eq!(indexed_array.hash_element(2).unwrap(), expected_hash);
assert_eq!(
indexed_array.elements[0],
IndexedElement {
index: 0,
value: 0_u32.to_biguint().unwrap(),
next_index: 2,
}
);
assert_eq!(
indexed_array.elements[1],
IndexedElement {
index: 1,
value: 30_u32.to_biguint().unwrap(),
next_index: 0,
}
);
assert_eq!(
indexed_array.elements[2],
IndexedElement {
index: 2,
value: 10_u32.to_biguint().unwrap(),
next_index: 1,
}
);
assert_eq!(
indexed_array.iter().collect::<Vec<_>>().as_slice(),
&[
&IndexedElement {
index: 0,
value: 0_u32.to_biguint().unwrap(),
next_index: 2,
},
&IndexedElement {
index: 1,
value: 30_u32.to_biguint().unwrap(),
next_index: 0,
},
&IndexedElement {
index: 2,
value: 10_u32.to_biguint().unwrap(),
next_index: 1,
}
]
);
let nullifier3 = 20_u32.to_biguint().unwrap();
let bundle3 = indexed_array.new_element(&nullifier3).unwrap();
assert!(indexed_array.find_element(&nullifier3).is_none());
indexed_array.append(&nullifier3).unwrap();
// After adding an another value 20, it should look like:
//
// ```
// value = [ 0] [30] [10] [20] [0] [0] [0] [0]
// next_index = [ 2] [ 0] [ 3] [ 1] [0] [0] [0] [0]
// ```
//
// Because:
// * Low nullifier is the node 2.
// * The new nullifier is inserted as node 3.
// * Low nullifier is pointing to the node 2. We assign the 1st nullifier
// as the next nullifier of our new nullifier. Therefore, our new
// nullifier looks like:
// * Low nullifier is updated to point to the new nullifier. Therefore,
// after update it looks like: `[value = 10, next_index = 3]`.
assert_eq!(
indexed_array.find_element(&nullifier3),
Some(&bundle3.new_element),
);
let expected_hash = Poseidon::hashv(&[
bigint_to_be_bytes_array::<32>(&nullifier3)
.unwrap()
.as_ref(),
1_usize.to_be_bytes().as_ref(),
bigint_to_be_bytes_array::<32>(&(30.to_biguint().unwrap()))
.unwrap()
.as_ref(),
])
.unwrap();
assert_eq!(indexed_array.hash_element(3).unwrap(), expected_hash);
assert_eq!(
indexed_array.elements[0],
IndexedElement {
index: 0,
value: 0_u32.to_biguint().unwrap(),
next_index: 2,
}
);
assert_eq!(
indexed_array.elements[1],
IndexedElement {
index: 1,
value: 30_u32.to_biguint().unwrap(),
next_index: 0,
}
);
assert_eq!(
indexed_array.elements[2],
IndexedElement {
index: 2,
value: 10_u32.to_biguint().unwrap(),
next_index: 3,
}
);
assert_eq!(
indexed_array.elements[3],
IndexedElement {
index: 3,
value: 20_u32.to_biguint().unwrap(),
next_index: 1,
}
);
assert_eq!(
indexed_array.iter().collect::<Vec<_>>().as_slice(),
&[
&IndexedElement {
index: 0,
value: 0_u32.to_biguint().unwrap(),
next_index: 2,
},
&IndexedElement {
index: 1,
value: 30_u32.to_biguint().unwrap(),
next_index: 0,
},
&IndexedElement {
index: 2,
value: 10_u32.to_biguint().unwrap(),
next_index: 3,
},
&IndexedElement {
index: 2,
value: 20_u32.to_biguint().unwrap(),
next_index: 1
}
]
);
let nullifier4 = 50_u32.to_biguint().unwrap();
let bundle4 = indexed_array.new_element(&nullifier4).unwrap();
assert!(indexed_array.find_element(&nullifier4).is_none());
indexed_array.append(&nullifier4).unwrap();
// After adding an another value 50, it should look like:
//
// ```
// value = [ 0] [30] [10] [20] [50] [0] [0] [0]
// next_index = [ 2] [ 4] [ 3] [ 1] [0 ] [0] [0] [0]
// ```
//
// Because:
//
// * Low nullifier is the node 1 - there is no node with value greater
// than 50, so we found it as a one having 0 as the `next_value`.
// * The new nullifier is inserted as node 4.
// * Low nullifier is not pointing to any node. So our new nullifier
// is not going to point to any other node either. Therefore, the new
// nullifier looks like: `[value = 50, next_index = 0]`.
// * Low nullifier is updated to point to the new nullifier. Therefore,
// after update it looks like: `[value = 30, next_index = 4]`.
assert_eq!(
indexed_array.find_element(&nullifier4),
Some(&bundle4.new_element),
);
let expected_hash = Poseidon::hashv(&[
bigint_to_be_bytes_array::<32>(&nullifier4)
.unwrap()
.as_ref(),
0_usize.to_be_bytes().as_ref(),
bigint_to_be_bytes_array::<32>(&(0.to_biguint().unwrap()))
.unwrap()
.as_ref(),
])
.unwrap();
assert_eq!(indexed_array.hash_element(4).unwrap(), expected_hash);
assert_eq!(
indexed_array.elements[0],
IndexedElement {
index: 0,
value: 0_u32.to_biguint().unwrap(),
next_index: 2,
}
);
assert_eq!(
indexed_array.elements[1],
IndexedElement {
index: 1,
value: 30_u32.to_biguint().unwrap(),
next_index: 4,
}
);
assert_eq!(
indexed_array.elements[2],
IndexedElement {
index: 2,
value: 10_u32.to_biguint().unwrap(),
next_index: 3,
}
);
assert_eq!(
indexed_array.elements[3],
IndexedElement {
index: 3,
value: 20_u32.to_biguint().unwrap(),
next_index: 1,
}
);
assert_eq!(
indexed_array.elements[4],
IndexedElement {
index: 4,
value: 50_u32.to_biguint().unwrap(),
next_index: 0,
}
);
assert_eq!(
indexed_array.iter().collect::<Vec<_>>().as_slice(),
&[
&IndexedElement {
index: 0,
value: 0_u32.to_biguint().unwrap(),
next_index: 2,
},
&IndexedElement {
index: 1,
value: 30_u32.to_biguint().unwrap(),
next_index: 4,
},
&IndexedElement {
index: 2,
value: 10_u32.to_biguint().unwrap(),
next_index: 3,
},
&IndexedElement {
index: 3,
value: 20_u32.to_biguint().unwrap(),
next_index: 1,
},
&IndexedElement {
index: 4,
value: 50_u32.to_biguint().unwrap(),
next_index: 0,
}
]
);
}
#[test]
fn test_append_with_low_element_index() {
// The initial state of the array looks like:
//
// ```
// value = [0] [0] [0] [0] [0] [0] [0] [0]
// next_index = [0] [0] [0] [0] [0] [0] [0] [0]
// ```
let mut indexing_array: IndexedArray<Poseidon, usize> = IndexedArray::default();
let low_element_index = 0;
let nullifier1 = 30_u32.to_biguint().unwrap();
indexing_array
.append_with_low_element_index(low_element_index, &nullifier1)
.unwrap();
// After adding a new value 30, it should look like:
//
// ```
// value = [ 0] [30] [0] [0] [0] [0] [0] [0]
// next_index = [ 1] [ 0] [0] [0] [0] [0] [0] [0]
// ```
//
// Because:
//
// * Low element is the first node, with index 0 and value 0. There is
// no node with value greater as 30, so we found it as a one pointing to
// node 0 (which will always have value 0).
// * The new nullifier is inserted in index 1.
// * `next_*` fields of the low nullifier are updated to point to the new
// nullifier.
assert_eq!(
indexing_array.elements[0],
IndexedElement {
index: 0,
value: 0_u32.to_biguint().unwrap(),
next_index: 1,
},
);
assert_eq!(
indexing_array.elements[1],
IndexedElement {
index: 1,
value: 30_u32.to_biguint().unwrap(),
next_index: 0,
}
);
let low_element_index = 0;
let nullifier2 = 10_u32.to_biguint().unwrap();
indexing_array
.append_with_low_element_index(low_element_index, &nullifier2)
.unwrap();
// After adding an another value 10, it should look like:
//
// ```
// value = [ 0] [30] [10] [0] [0] [0] [0] [0]
// next_index = [ 2] [ 0] [ 1] [0] [0] [0] [0] [0]
// ```
//
// Because:
//
// * Low nullifier is still the node 0, but this time for differen reason -
// its `next_index` 2 contains value 30, whish is greater than 10.
// * The new nullifier is inserted as node 2.
// * Low nullifier is pointing to the index 1. We assign the 1st nullifier
// as the next nullifier of our new nullifier. Therefore, our new nullifier
// looks like: `[value = 10, next_index = 1]`.
// * Low nullifier is updated to point to the new nullifier. Therefore,
// after update it looks like: `[value = 0, next_index = 2]`.
// * The previously inserted nullifier, the node 1, remains unchanged.
assert_eq!(
indexing_array.elements[0],
IndexedElement {
index: 0,
value: 0_u32.to_biguint().unwrap(),
next_index: 2,
}
);
assert_eq!(
indexing_array.elements[1],
IndexedElement {
index: 1,
value: 30_u32.to_biguint().unwrap(),
next_index: 0,
}
);
assert_eq!(
indexing_array.elements[2],
IndexedElement {
index: 2,
value: 10_u32.to_biguint().unwrap(),
next_index: 1,
}
);
let low_element_index = 2;
let nullifier3 = 20_u32.to_biguint().unwrap();
indexing_array
.append_with_low_element_index(low_element_index, &nullifier3)
.unwrap();
// After adding an another value 20, it should look like:
//
// ```
// value = [ 0] [30] [10] [20] [0] [0] [0] [0]
// next_index = [ 2] [ 0] [ 3] [ 1] [0] [0] [0] [0]
// ```
//
// Because:
// * Low nullifier is the node 2.
// * The new nullifier is inserted as node 3.
// * Low nullifier is pointing to the node 2. We assign the 1st nullifier
// as the next nullifier of our new nullifier. Therefore, our new
// nullifier looks like:
// * Low nullifier is updated to point to the new nullifier. Therefore,
// after update it looks like: `[value = 10, next_index = 3]`.
assert_eq!(
indexing_array.elements[0],
IndexedElement {
index: 0,
value: 0_u32.to_biguint().unwrap(),
next_index: 2,
}
);
assert_eq!(
indexing_array.elements[1],
IndexedElement {
index: 1,
value: 30_u32.to_biguint().unwrap(),
next_index: 0,
}
);
assert_eq!(
indexing_array.elements[2],
IndexedElement {
index: 2,
value: 10_u32.to_biguint().unwrap(),
next_index: 3,
}
);
assert_eq!(
indexing_array.elements[3],
IndexedElement {
index: 3,
value: 20_u32.to_biguint().unwrap(),
next_index: 1,
}
);
let low_element_index = 1;
let nullifier4 = 50_u32.to_biguint().unwrap();
indexing_array
.append_with_low_element_index(low_element_index, &nullifier4)
.unwrap();
// After adding an another value 50, it should look like:
//
// ```
// value = [ 0] [30] [10] [20] [50] [0] [0] [0]
// next_index = [ 2] [ 4] [ 3] [ 1] [0 ] [0] [0] [0]
// ```
//
// Because:
//
// * Low nullifier is the node 1 - there is no node with value greater
// than 50, so we found it as a one having 0 as the `next_value`.
// * The new nullifier is inserted as node 4.
// * Low nullifier is not pointing to any node. So our new nullifier
// is not going to point to any other node either. Therefore, the new
// nullifier looks like: `[value = 50, next_index = 0]`.
// * Low nullifier is updated to point to the new nullifier. Therefore,
// after update it looks like: `[value = 30, next_index = 4]`.
assert_eq!(
indexing_array.elements[0],
IndexedElement {
index: 0,
value: 0_u32.to_biguint().unwrap(),
next_index: 2,
}
);
assert_eq!(
indexing_array.elements[1],
IndexedElement {
index: 1,
value: 30_u32.to_biguint().unwrap(),
next_index: 4,
}
);
assert_eq!(
indexing_array.elements[2],
IndexedElement {
index: 2,
value: 10_u32.to_biguint().unwrap(),
next_index: 3,
}
);
assert_eq!(
indexing_array.elements[3],
IndexedElement {
index: 3,
value: 20_u32.to_biguint().unwrap(),
next_index: 1,
}
);
assert_eq!(
indexing_array.elements[4],
IndexedElement {
index: 4,
value: 50_u32.to_biguint().unwrap(),
next_index: 0,
}
);
}
/// Tries to violate the integrity of the array by pointing to invalid low
/// nullifiers. Tests whether the range check works correctly and disallows
/// the invalid appends from happening.
#[test]
fn test_append_with_low_element_index_invalid() {
// The initial state of the array looks like:
//
// ```
// value = [0] [0] [0] [0] [0] [0] [0] [0]
// next_index = [0] [0] [0] [0] [0] [0] [0] [0]
// ```
let mut indexing_array: IndexedArray<Poseidon, usize> = IndexedArray::default();
// Append nullifier 30. The low nullifier is at index 0. The array
// should look like:
//
// ```
// value = [ 0] [30] [0] [0] [0] [0] [0] [0]
// next_index = [ 1] [ 0] [0] [0] [0] [0] [0] [0]
// ```
let low_element_index = 0;
let nullifier1 = 30_u32.to_biguint().unwrap();
indexing_array
.append_with_low_element_index(low_element_index, &nullifier1)
.unwrap();
// Try appending nullifier 20, while pointing to index 1 as low
// nullifier.
// Therefore, the new element is lower than the supposed low element.
let low_element_index = 1;
let nullifier2 = 20_u32.to_biguint().unwrap();
assert!(matches!(
indexing_array.append_with_low_element_index(low_element_index, &nullifier2),
Err(IndexedMerkleTreeError::LowElementGreaterOrEqualToNewElement)
));
// Try appending nullifier 50, while pointing to index 0 as low
// nullifier.
// Therefore, the new element is greater than next element.
let low_element_index = 0;
let nullifier2 = 50_u32.to_biguint().unwrap();
assert!(matches!(
indexing_array.append_with_low_element_index(low_element_index, &nullifier2),
Err(IndexedMerkleTreeError::NewElementGreaterOrEqualToNextElement),
));
// Append nullifier 50 correctly, with 0 as low nullifier. The array
// should look like:
//
// ```
// value = [ 0] [30] [50] [0] [0] [0] [0] [0]
// next_index = [ 1] [ 2] [ 0] [0] [0] [0] [0] [0]
// ```
let low_element_index = 1;
let nullifier2 = 50_u32.to_biguint().unwrap();
indexing_array
.append_with_low_element_index(low_element_index, &nullifier2)
.unwrap();
// Try appending nullifier 40, while pointint to index 2 (value 50) as
// low nullifier.
// Therefore, the pointed low element is greater than the new element.
let low_element_index = 2;
let nullifier3 = 40_u32.to_biguint().unwrap();
assert!(matches!(
indexing_array.append_with_low_element_index(low_element_index, &nullifier3),
Err(IndexedMerkleTreeError::LowElementGreaterOrEqualToNewElement)
));
}
/// Tests whether `find_*_for_existent` elements return `None` when a
/// nonexistent is provided.
#[test]
fn test_find_low_element_for_existent_element() {
let mut indexed_array: IndexedArray<Poseidon, usize> = IndexedArray::default();
// Append nullifiers 40 and 20.
let low_element_index = 0;
let nullifier_1 = 40_u32.to_biguint().unwrap();
indexed_array
.append_with_low_element_index(low_element_index, &nullifier_1)
.unwrap();
let low_element_index = 0;
let nullifier_2 = 20_u32.to_biguint().unwrap();
indexed_array
.append_with_low_element_index(low_element_index, &nullifier_2)
.unwrap();
// Try finding a low element for nonexistent nullifier 30.
let nonexistent_nullifier = 30_u32.to_biguint().unwrap();
// `*_existent` methods should fail.
let res = indexed_array.find_low_element_index_for_existent(&nonexistent_nullifier);
assert!(matches!(
res,
Err(IndexedMerkleTreeError::ElementDoesNotExist)
));
let res = indexed_array.find_low_element_for_existent(&nonexistent_nullifier);
assert!(matches!(
res,
Err(IndexedMerkleTreeError::ElementDoesNotExist)
));
// `*_nonexistent` methods should succeed.
let low_element_index = indexed_array
.find_low_element_index_for_nonexistent(&nonexistent_nullifier)
.unwrap();
assert_eq!(low_element_index, 2);
let low_element = indexed_array
.find_low_element_for_nonexistent(&nonexistent_nullifier)
.unwrap();
assert_eq!(
low_element,
(
IndexedElement::<usize> {
index: 2,
value: 20_u32.to_biguint().unwrap(),
next_index: 1,
},
40_u32.to_biguint().unwrap(),
)
);
// Try finding a low element of existent nullifier 40.
// `_existent` methods should succeed.
let low_element_index = indexed_array
.find_low_element_index_for_existent(&nullifier_1)
.unwrap();
assert_eq!(low_element_index, 2);
let low_element = indexed_array
.find_low_element_for_existent(&nullifier_1)
.unwrap();
assert_eq!(
low_element,
IndexedElement::<usize> {
index: 2,
value: 20_u32.to_biguint().unwrap(),
next_index: 1,
},
);
// `*_nonexistent` methods should fail.
let res = indexed_array.find_low_element_index_for_nonexistent(&nullifier_1);
assert!(matches!(
res,
Err(IndexedMerkleTreeError::ElementAlreadyExists)
));
let res = indexed_array.find_low_element_for_nonexistent(&nullifier_1);
assert!(matches!(
res,
Err(IndexedMerkleTreeError::ElementAlreadyExists)
));
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/indexed
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/indexed/src/reference.rs
|
use std::marker::PhantomData;
use light_bounded_vec::{BoundedVec, BoundedVecError};
use light_concurrent_merkle_tree::light_hasher::{errors::HasherError, Hasher};
use light_merkle_tree_reference::{MerkleTree, ReferenceMerkleTreeError};
use light_utils::bigint::bigint_to_be_bytes_array;
use num_bigint::BigUint;
use num_traits::{CheckedAdd, CheckedSub, Num, ToBytes, Unsigned};
use thiserror::Error;
use crate::{
array::{IndexedArray, IndexedElement},
errors::IndexedMerkleTreeError,
HIGHEST_ADDRESS_PLUS_ONE,
};
#[derive(Debug, Error)]
pub enum IndexedReferenceMerkleTreeError {
#[error("NonInclusionProofFailedLowerBoundViolated")]
NonInclusionProofFailedLowerBoundViolated,
#[error("NonInclusionProofFailedHigherBoundViolated")]
NonInclusionProofFailedHigherBoundViolated,
#[error(transparent)]
Indexed(#[from] IndexedMerkleTreeError),
#[error(transparent)]
Reference(#[from] ReferenceMerkleTreeError),
#[error(transparent)]
Hasher(#[from] HasherError),
}
#[derive(Debug, Clone)]
#[repr(C)]
pub struct IndexedMerkleTree<H, I>
where
H: Hasher,
I: CheckedAdd + CheckedSub + Copy + Clone + PartialOrd + ToBytes + TryFrom<usize> + Unsigned,
{
pub merkle_tree: MerkleTree<H>,
_index: PhantomData<I>,
}
impl<H, I> IndexedMerkleTree<H, I>
where
H: Hasher,
I: CheckedAdd + CheckedSub + Copy + Clone + PartialOrd + ToBytes + TryFrom<usize> + Unsigned,
usize: From<I>,
{
pub fn new(
height: usize,
canopy_depth: usize,
) -> Result<Self, IndexedReferenceMerkleTreeError> {
let mut merkle_tree = MerkleTree::new(height, canopy_depth);
// Append the first low leaf, which has value 0 and does not point
// to any other leaf yet.
// This low leaf is going to be updated during the first `update`
// operation.
merkle_tree.append(&H::zero_indexed_leaf())?;
Ok(Self {
merkle_tree,
_index: PhantomData,
})
}
/// Initializes the reference indexed merkle tree on par with the
/// on-chain indexed concurrent merkle tree.
/// Inserts the ranges 0 - BN254 Field Size - 1 into the tree.
pub fn init(&mut self) -> Result<(), IndexedReferenceMerkleTreeError> {
let mut indexed_array = IndexedArray::<H, I>::default();
let init_value = BigUint::from_str_radix(HIGHEST_ADDRESS_PLUS_ONE, 10).unwrap();
let nullifier_bundle = indexed_array.append(&init_value)?;
let new_low_leaf = nullifier_bundle
.new_low_element
.hash::<H>(&nullifier_bundle.new_element.value)?;
self.merkle_tree.update(&new_low_leaf, 0)?;
let new_leaf = nullifier_bundle
.new_element
.hash::<H>(&nullifier_bundle.new_element_next_value)?;
self.merkle_tree.append(&new_leaf)?;
Ok(())
}
pub fn get_path_of_leaf(
&self,
index: usize,
full: bool,
) -> Result<BoundedVec<[u8; 32]>, BoundedVecError> {
self.merkle_tree.get_path_of_leaf(index, full)
}
pub fn get_proof_of_leaf(
&self,
index: usize,
full: bool,
) -> Result<BoundedVec<[u8; 32]>, BoundedVecError> {
self.merkle_tree.get_proof_of_leaf(index, full)
}
pub fn root(&self) -> [u8; 32] {
self.merkle_tree.root()
}
// TODO: rename input values
pub fn update(
&mut self,
new_low_element: &IndexedElement<I>,
new_element: &IndexedElement<I>,
new_element_next_value: &BigUint,
) -> Result<(), IndexedReferenceMerkleTreeError> {
// Update the low element.
let new_low_leaf = new_low_element.hash::<H>(&new_element.value)?;
self.merkle_tree
.update(&new_low_leaf, usize::from(new_low_element.index))?;
// Append the new element.
let new_leaf = new_element.hash::<H>(new_element_next_value)?;
self.merkle_tree.append(&new_leaf)?;
Ok(())
}
// TODO: add append with new value, so that we don't need to compute the lowlevel values manually
pub fn append(
&mut self,
value: &BigUint,
indexed_array: &mut IndexedArray<H, I>,
) -> Result<(), IndexedReferenceMerkleTreeError> {
let nullifier_bundle = indexed_array.append(value).unwrap();
self.update(
&nullifier_bundle.new_low_element,
&nullifier_bundle.new_element,
&nullifier_bundle.new_element_next_value,
)?;
Ok(())
}
pub fn get_non_inclusion_proof(
&self,
value: &BigUint,
indexed_array: &IndexedArray<H, I>,
) -> Result<NonInclusionProof, IndexedReferenceMerkleTreeError> {
let (low_element, _next_value) = indexed_array.find_low_element_for_nonexistent(value)?;
let merkle_proof = self
.get_proof_of_leaf(usize::from(low_element.index), true)
.unwrap();
let higher_range_value = indexed_array
.get(low_element.next_index())
.unwrap()
.value
.clone();
Ok(NonInclusionProof {
root: self.root(),
value: bigint_to_be_bytes_array::<32>(value).unwrap(),
leaf_lower_range_value: bigint_to_be_bytes_array::<32>(&low_element.value).unwrap(),
leaf_higher_range_value: bigint_to_be_bytes_array::<32>(&higher_range_value).unwrap(),
leaf_index: low_element.index.into(),
next_index: low_element.next_index(),
merkle_proof,
})
}
pub fn verify_non_inclusion_proof(
&self,
proof: &NonInclusionProof,
) -> Result<(), IndexedReferenceMerkleTreeError> {
let value_big_int = BigUint::from_bytes_be(&proof.value);
let lower_end_value = BigUint::from_bytes_be(&proof.leaf_lower_range_value);
if lower_end_value >= value_big_int {
return Err(IndexedReferenceMerkleTreeError::NonInclusionProofFailedLowerBoundViolated);
}
let higher_end_value = BigUint::from_bytes_be(&proof.leaf_higher_range_value);
if higher_end_value <= value_big_int {
return Err(
IndexedReferenceMerkleTreeError::NonInclusionProofFailedHigherBoundViolated,
);
}
let array_element = IndexedElement::<usize> {
value: lower_end_value,
index: proof.leaf_index,
next_index: proof.next_index,
};
let leaf_hash = array_element.hash::<H>(&higher_end_value)?;
self.merkle_tree
.verify(&leaf_hash, &proof.merkle_proof, proof.leaf_index)
.unwrap();
Ok(())
}
}
// TODO: check why next_index is usize while index is I
/// We prove non-inclusion by:
/// 1. Showing that value is greater than leaf_lower_range_value and less than leaf_higher_range_value
/// 2. Showing that the leaf_hash H(leaf_lower_range_value, leaf_next_index, leaf_higher_value) is included in the root (Merkle tree)
#[derive(Debug)]
pub struct NonInclusionProof {
pub root: [u8; 32],
pub value: [u8; 32],
pub leaf_lower_range_value: [u8; 32],
pub leaf_higher_range_value: [u8; 32],
pub leaf_index: usize,
pub next_index: usize,
pub merkle_proof: BoundedVec<[u8; 32]>,
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/indexed
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/indexed/src/changelog.rs
|
use light_concurrent_merkle_tree::event::RawIndexedElement;
/// NET_HEIGHT = HEIGHT - CANOPY_DEPTH
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct IndexedChangelogEntry<I, const NET_HEIGHT: usize>
where
I: Clone,
{
/// Element that was a subject to the change.
pub element: RawIndexedElement<I>,
/// Merkle proof of that operation.
pub proof: [[u8; 32]; NET_HEIGHT],
/// Index of a changelog entry in `ConcurrentMerkleTree` corresponding to
/// the same operation.
pub changelog_index: usize,
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/bounded-vec/Cargo.toml
|
[package]
name = "light-bounded-vec"
version = "1.1.0"
description = "Bounded and cyclic vector implementations"
repository = "https://github.com/Lightprotocol/light-protocol"
license = "Apache-2.0"
edition = "2021"
[features]
solana = ["solana-program"]
[dependencies]
bytemuck = { version = "1.17", features = ["min_const_generics"] }
memoffset = "0.9"
solana-program = { workspace = true, optional = true }
thiserror = "1.0"
[dev-dependencies]
rand = "0.8"
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/bounded-vec
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/bounded-vec/src/lib.rs
|
use std::{
alloc::{self, handle_alloc_error, Layout},
fmt, mem,
ops::{Index, IndexMut},
ptr::{self, NonNull},
slice::{self, Iter, IterMut, SliceIndex},
};
use memoffset::span_of;
use thiserror::Error;
#[derive(Debug, Error, PartialEq)]
pub enum BoundedVecError {
#[error("The vector is full, cannot push any new elements")]
Full,
#[error("Requested array of size {0}, but the vector has {1} elements")]
ArraySize(usize, usize),
#[error("The requested start index is out of bounds.")]
IterFromOutOfBounds,
}
#[cfg(feature = "solana")]
impl From<BoundedVecError> for u32 {
fn from(e: BoundedVecError) -> u32 {
match e {
BoundedVecError::Full => 8001,
BoundedVecError::ArraySize(_, _) => 8002,
BoundedVecError::IterFromOutOfBounds => 8003,
}
}
}
#[cfg(feature = "solana")]
impl From<BoundedVecError> for solana_program::program_error::ProgramError {
fn from(e: BoundedVecError) -> Self {
solana_program::program_error::ProgramError::Custom(e.into())
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct BoundedVecMetadata {
capacity: usize,
length: usize,
}
impl BoundedVecMetadata {
pub fn new(capacity: usize) -> Self {
Self {
capacity,
length: 0,
}
}
pub fn new_with_length(capacity: usize, length: usize) -> Self {
Self { capacity, length }
}
pub fn from_le_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
Self {
capacity: usize::from_le_bytes(bytes[span_of!(Self, capacity)].try_into().unwrap()),
length: usize::from_le_bytes(bytes[span_of!(Self, length)].try_into().unwrap()),
}
}
pub fn to_le_bytes(&self) -> [u8; mem::size_of::<Self>()] {
let mut bytes = [0u8; mem::size_of::<Self>()];
bytes[span_of!(Self, capacity)].copy_from_slice(&self.capacity.to_le_bytes());
bytes[span_of!(Self, length)].copy_from_slice(&self.length.to_le_bytes());
bytes
}
pub fn capacity(&self) -> usize {
self.capacity
}
pub fn length(&self) -> usize {
self.length
}
}
/// `BoundedVec` is a custom vector implementation which forbids
/// post-initialization reallocations. The size is not known during compile
/// time (that makes it different from arrays), but can be defined only once
/// (that makes it different from [`Vec`](std::vec::Vec)).
pub struct BoundedVec<T>
where
T: Clone,
{
metadata: *mut BoundedVecMetadata,
data: NonNull<T>,
}
impl<T> BoundedVec<T>
where
T: Clone,
{
#[inline]
fn metadata_with_capacity(capacity: usize) -> *mut BoundedVecMetadata {
let layout = Layout::new::<BoundedVecMetadata>();
let metadata = unsafe { alloc::alloc(layout) as *mut BoundedVecMetadata };
if metadata.is_null() {
handle_alloc_error(layout);
}
unsafe {
*metadata = BoundedVecMetadata {
capacity,
length: 0,
};
}
metadata
}
#[inline]
fn metadata_from(src_metadata: &BoundedVecMetadata) -> *mut BoundedVecMetadata {
let layout = Layout::new::<BoundedVecMetadata>();
let metadata = unsafe { alloc::alloc(layout) as *mut BoundedVecMetadata };
if metadata.is_null() {
handle_alloc_error(layout);
}
unsafe { (*metadata).clone_from(src_metadata) };
metadata
}
#[inline]
fn data_with_capacity(capacity: usize) -> NonNull<T> {
let layout = Layout::array::<T>(capacity).unwrap();
let data_ptr = unsafe { alloc::alloc(layout) as *mut T };
if data_ptr.is_null() {
handle_alloc_error(layout);
}
// PANICS: We ensured that the pointer is not NULL.
NonNull::new(data_ptr).unwrap()
}
#[inline]
pub fn with_capacity(capacity: usize) -> Self {
let metadata = Self::metadata_with_capacity(capacity);
let data = Self::data_with_capacity(capacity);
Self { metadata, data }
}
/// Creates a `BoundedVec<T>` with the given `metadata`.
///
/// # Safety
///
/// This method is unsafe, as it does not guarantee the correctness of
/// provided parameters (other than `capacity`). The full responisibility
/// is on the caller.
#[inline]
pub unsafe fn with_metadata(metadata: &BoundedVecMetadata) -> Self {
let capacity = metadata.capacity();
let metadata = Self::metadata_from(metadata);
let data = Self::data_with_capacity(capacity);
Self { metadata, data }
}
pub fn metadata(&self) -> &BoundedVecMetadata {
unsafe { &*self.metadata }
}
pub fn from_array<const N: usize>(array: &[T; N]) -> Self {
let mut vec = Self::with_capacity(N);
for element in array {
// SAFETY: We are sure that the array and the vector have equal
// sizes, there is no chance for the error to occur.
vec.push(element.clone()).unwrap();
}
vec
}
pub fn from_slice(slice: &[T]) -> Self {
let mut vec = Self::with_capacity(slice.len());
for element in slice {
// SAFETY: We are sure that the array and the vector have equal
// sizes, there is no chance for the error to occur.
vec.push(element.clone()).unwrap();
}
vec
}
/// Creates `BoundedVec<T>` directly from a pointer, a capacity, and a length.
///
/// # Safety
///
/// This is highly unsafe, due to the number of invariants that aren't
/// checked:
///
/// * `ptr` must have been allocated using the global allocator, such as via
/// the [`alloc::alloc`] function.
/// * `T` needs to have the same alignment as what `ptr` was allocated with.
/// (`T` having a less strict alignment is not sufficient, the alignment really
/// needs to be equal to satisfy the [`dealloc`] requirement that memory must be
/// allocated and deallocated with the same layout.)
/// * The size of `T` times the `capacity` (ie. the allocated size in bytes) needs
/// to be the same size as the pointer was allocated with. (Because similar to
/// alignment, [`dealloc`] must be called with the same layout `size`.)
/// * `length` needs to be less than or equal to `capacity`.
/// * The first `length` values must be properly initialized values of type `T`.
/// * `capacity` needs to be the capacity that the pointer was allocated with.
/// * The allocated size in bytes must be no larger than `isize::MAX`.
/// See the safety documentation of [`pointer::offset`].
#[inline]
pub unsafe fn from_raw_parts(metadata: *mut BoundedVecMetadata, ptr: *mut T) -> Self {
let data = NonNull::new(ptr).unwrap();
Self { metadata, data }
}
/// Returns the total number of elements the vector can hold without
/// reallocating.
///
/// # Examples
///
/// ```
/// let mut vec: Vec<i32> = Vec::with_capacity(10);
/// vec.push(42);
/// assert!(vec.capacity() >= 10);
/// ```
#[inline]
pub fn capacity(&self) -> usize {
unsafe { (*self.metadata).capacity }
}
#[inline]
pub fn as_slice(&self) -> &[T] {
unsafe { slice::from_raw_parts(self.data.as_ptr(), self.len()) }
}
#[inline]
pub fn as_mut_slice(&mut self) -> &mut [T] {
unsafe { slice::from_raw_parts_mut(self.data.as_ptr(), self.len()) }
}
/// Appends an element to the back of a collection.
///
/// # Panics
///
/// Panics if the new capacity exceeds `isize::MAX` bytes.
///
/// # Examples
///
/// ```
/// let mut vec = vec![1, 2];
/// vec.push(3);
/// assert_eq!(vec, [1, 2, 3]);
/// ```
#[inline]
pub fn push(&mut self, value: T) -> Result<(), BoundedVecError> {
if self.len() == self.capacity() {
return Err(BoundedVecError::Full);
}
unsafe { ptr::write(self.data.as_ptr().add(self.len()), value) };
self.inc_len();
Ok(())
}
#[inline]
pub fn len(&self) -> usize {
unsafe { (*self.metadata).length }
}
#[inline]
fn inc_len(&mut self) {
unsafe { (*self.metadata).length += 1 };
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
#[inline]
pub fn get(&self, index: usize) -> Option<&T> {
if index >= self.len() {
return None;
}
let cell = unsafe { &*self.data.as_ptr().add(index) };
Some(cell)
}
#[inline]
pub fn get_mut(&mut self, index: usize) -> Option<&mut T> {
if index >= self.len() {
return None;
}
let cell = unsafe { &mut *self.data.as_ptr().add(index) };
Some(cell)
}
/// Returns a mutable pointer to `BoundedVec`'s buffer.
#[inline(always)]
pub fn as_mut_ptr(&mut self) -> *mut T {
self.data.as_ptr()
}
#[inline]
pub fn iter(&self) -> Iter<'_, T> {
self.as_slice().iter()
}
#[inline]
pub fn iter_mut(&mut self) -> IterMut<'_, T> {
self.as_mut_slice().iter_mut()
}
#[inline]
pub fn last(&self) -> Option<&T> {
if self.is_empty() {
return None;
}
self.get(self.len() - 1)
}
#[inline]
pub fn last_mut(&mut self) -> Option<&mut T> {
if self.is_empty() {
return None;
}
self.get_mut(self.len() - 1)
}
pub fn to_array<const N: usize>(&self) -> Result<[T; N], BoundedVecError> {
if self.len() != N {
return Err(BoundedVecError::ArraySize(N, self.len()));
}
Ok(std::array::from_fn(|i| self.get(i).unwrap().clone()))
}
pub fn to_vec(self) -> Vec<T> {
self.as_slice().to_vec()
}
pub fn extend<U: IntoIterator<Item = T>>(&mut self, iter: U) -> Result<(), BoundedVecError> {
for item in iter {
self.push(item)?;
}
Ok(())
}
}
impl<T> Clone for BoundedVec<T>
where
T: Clone,
{
fn clone(&self) -> Self {
// Create a new buffer with the same capacity as the original
let layout = Layout::new::<BoundedVecMetadata>();
let metadata = unsafe { alloc::alloc(layout) as *mut BoundedVecMetadata };
if metadata.is_null() {
handle_alloc_error(layout);
}
unsafe { *metadata = (*self.metadata).clone() };
let layout = Layout::array::<T>(self.capacity()).unwrap();
let data_ptr = unsafe { alloc::alloc(layout) as *mut T };
if data_ptr.is_null() {
handle_alloc_error(layout);
}
let data = NonNull::new(data_ptr).unwrap();
// Copy elements from the original data slice to the new slice
let new_vec = Self { metadata, data };
// Clone each element into the new vector
for i in 0..self.len() {
unsafe { ptr::write(data_ptr.add(i), (*self.get(i).unwrap()).clone()) };
}
new_vec
}
}
impl<T> fmt::Debug for BoundedVec<T>
where
T: Clone + fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self.as_slice())
}
}
impl<T> Drop for BoundedVec<T>
where
T: Clone,
{
fn drop(&mut self) {
let layout = Layout::array::<T>(self.capacity()).unwrap();
unsafe { alloc::dealloc(self.data.as_ptr() as *mut u8, layout) };
let layout = Layout::new::<BoundedVecMetadata>();
unsafe { alloc::dealloc(self.metadata as *mut u8, layout) };
}
}
impl<T, I: SliceIndex<[T]>> Index<I> for BoundedVec<T>
where
T: Clone,
I: SliceIndex<[T]>,
{
type Output = I::Output;
#[inline]
fn index(&self, index: I) -> &Self::Output {
self.as_slice().index(index)
}
}
impl<T, I> IndexMut<I> for BoundedVec<T>
where
T: Clone,
I: SliceIndex<[T]>,
{
fn index_mut(&mut self, index: I) -> &mut Self::Output {
self.as_mut_slice().index_mut(index)
}
}
impl<T> IntoIterator for BoundedVec<T>
where
T: Clone,
{
type Item = T;
type IntoIter = BoundedVecIntoIterator<T>;
fn into_iter(self) -> Self::IntoIter {
BoundedVecIntoIterator {
vec: self,
current: 0,
}
}
}
impl<T> PartialEq for BoundedVec<T>
where
T: Clone + PartialEq,
{
fn eq(&self, other: &Self) -> bool {
self.iter().eq(other.iter())
}
}
impl<T> Eq for BoundedVec<T> where T: Clone + Eq {}
pub struct BoundedVecIntoIterator<T>
where
T: Clone,
{
vec: BoundedVec<T>,
current: usize,
}
impl<T> Iterator for BoundedVecIntoIterator<T>
where
T: Clone,
{
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
let element = self.vec.get(self.current).map(|element| element.to_owned());
self.current += 1;
element
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct CyclicBoundedVecMetadata {
capacity: usize,
length: usize,
first_index: usize,
last_index: usize,
}
impl CyclicBoundedVecMetadata {
pub fn new(capacity: usize) -> Self {
Self {
capacity,
length: 0,
first_index: 0,
last_index: 0,
}
}
pub fn new_with_indices(
capacity: usize,
length: usize,
first_index: usize,
last_index: usize,
) -> Self {
Self {
capacity,
length,
first_index,
last_index,
}
}
pub fn from_le_bytes(bytes: [u8; mem::size_of::<CyclicBoundedVecMetadata>()]) -> Self {
Self {
capacity: usize::from_le_bytes(bytes[span_of!(Self, capacity)].try_into().unwrap()),
length: usize::from_le_bytes(bytes[span_of!(Self, length)].try_into().unwrap()),
first_index: usize::from_le_bytes(
bytes[span_of!(Self, first_index)].try_into().unwrap(),
),
last_index: usize::from_le_bytes(bytes[span_of!(Self, last_index)].try_into().unwrap()),
}
}
pub fn to_le_bytes(&self) -> [u8; mem::size_of::<Self>()] {
let mut bytes = [0u8; mem::size_of::<Self>()];
bytes[span_of!(Self, capacity)].copy_from_slice(&self.capacity.to_le_bytes());
bytes[span_of!(Self, length)].copy_from_slice(&self.length.to_le_bytes());
bytes[span_of!(Self, first_index)].copy_from_slice(&self.first_index.to_le_bytes());
bytes[span_of!(Self, last_index)].copy_from_slice(&self.last_index.to_le_bytes());
bytes
}
pub fn capacity(&self) -> usize {
self.capacity
}
pub fn length(&self) -> usize {
self.length
}
}
/// `CyclicBoundedVec` is a wrapper around [`Vec`](std::vec::Vec) which:
///
/// * Forbids post-initialization reallocations.
/// * Starts overwriting elements from the beginning once it reaches its
/// capacity.
pub struct CyclicBoundedVec<T>
where
T: Clone,
{
metadata: *mut CyclicBoundedVecMetadata,
data: NonNull<T>,
}
impl<T> CyclicBoundedVec<T>
where
T: Clone,
{
#[inline]
fn metadata_with_capacity(capacity: usize) -> *mut CyclicBoundedVecMetadata {
let layout = Layout::new::<CyclicBoundedVecMetadata>();
let metadata = unsafe { alloc::alloc(layout) as *mut CyclicBoundedVecMetadata };
if metadata.is_null() {
handle_alloc_error(layout);
}
unsafe {
*metadata = CyclicBoundedVecMetadata {
capacity,
length: 0,
first_index: 0,
last_index: 0,
};
}
metadata
}
#[inline]
fn metadata_from(src_metadata: &CyclicBoundedVecMetadata) -> *mut CyclicBoundedVecMetadata {
let layout = Layout::new::<CyclicBoundedVecMetadata>();
let metadata = unsafe { alloc::alloc(layout) as *mut CyclicBoundedVecMetadata };
if metadata.is_null() {
handle_alloc_error(layout);
}
unsafe { (*metadata).clone_from(src_metadata) };
metadata
}
#[inline]
fn data_with_capacity(capacity: usize) -> NonNull<T> {
let layout = Layout::array::<T>(capacity).unwrap();
let data_ptr = unsafe { alloc::alloc(layout) as *mut T };
if data_ptr.is_null() {
handle_alloc_error(layout);
}
// PANICS: We ensured that the pointer is not NULL.
NonNull::new(data_ptr).unwrap()
}
#[inline]
pub fn with_capacity(capacity: usize) -> Self {
let metadata = Self::metadata_with_capacity(capacity);
let data = Self::data_with_capacity(capacity);
Self { metadata, data }
}
/// Creates a `CyclicBoundedVec<T>` with the given `metadata`.
///
/// # Safety
///
/// This method is unsafe, as it does not guarantee the correctness of
/// provided parameters (other than `capacity`). The full responisibility
/// is on the caller.
#[inline]
pub unsafe fn with_metadata(metadata: &CyclicBoundedVecMetadata) -> Self {
let capacity = metadata.capacity();
let metadata = Self::metadata_from(metadata);
let data = Self::data_with_capacity(capacity);
Self { metadata, data }
}
pub fn metadata(&self) -> &CyclicBoundedVecMetadata {
unsafe { &*self.metadata }
}
/// Creates a `CyclicBoundedVec<T>` directly from a pointer, a capacity, and a length.
///
/// # Safety
///
/// This is highly unsafe, due to the number of invariants that aren't
/// checked:
///
/// * `ptr` must have been allocated using the global allocator, such as via
/// the [`alloc::alloc`] function.
/// * `T` needs to have the same alignment as what `ptr` was allocated with.
/// (`T` having a less strict alignment is not sufficient, the alignment really
/// needs to be equal to satisfy the [`dealloc`] requirement that memory must be
/// allocated and deallocated with the same layout.)
/// * The size of `T` times the `capacity` (ie. the allocated size in bytes) needs
/// to be the same size as the pointer was allocated with. (Because similar to
/// alignment, [`dealloc`] must be called with the same layout `size`.)
/// * `length` needs to be less than or equal to `capacity`.
/// * The first `length` values must be properly initialized values of type `T`.
/// * `capacity` needs to be the capacity that the pointer was allocated with.
/// * The allocated size in bytes must be no larger than `isize::MAX`.
/// See the safety documentation of [`pointer::offset`].
#[inline]
pub unsafe fn from_raw_parts(metadata: *mut CyclicBoundedVecMetadata, ptr: *mut T) -> Self {
let data = NonNull::new(ptr).unwrap();
Self { metadata, data }
}
/// Returns the total number of elements the vector can hold without
/// reallocating.
///
/// # Examples
///
/// ```
/// let mut vec: Vec<i32> = Vec::with_capacity(10);
/// vec.push(42);
/// assert!(vec.capacity() >= 10);
/// ```
#[inline]
pub fn capacity(&self) -> usize {
unsafe { (*self.metadata).capacity }
}
#[inline]
pub fn as_slice(&self) -> &[T] {
unsafe { slice::from_raw_parts(self.data.as_ptr(), self.len()) }
}
/// Appends an element to the back of a collection.
///
/// # Examples
///
/// ```
/// let mut vec = vec![1, 2];
/// vec.push(3);
/// assert_eq!(vec, [1, 2, 3]);
/// ```
#[inline]
pub fn push(&mut self, value: T) {
if self.is_empty() {
self.inc_len();
} else if self.len() < self.capacity() {
self.inc_len();
self.inc_last_index();
} else {
self.inc_last_index();
self.inc_first_index();
}
// SAFETY: We made sure that `last_index` doesn't exceed the capacity.
unsafe {
std::ptr::write(self.data.as_ptr().add(self.last_index()), value);
}
}
#[inline]
pub fn len(&self) -> usize {
unsafe { (*self.metadata).length }
}
#[inline]
fn inc_len(&mut self) {
unsafe { (*self.metadata).length += 1 }
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
#[inline]
pub fn get(&self, index: usize) -> Option<&T> {
if index >= self.len() {
return None;
}
let cell = unsafe { &*self.data.as_ptr().add(index) };
Some(cell)
}
#[inline]
pub fn get_mut(&mut self, index: usize) -> Option<&mut T> {
if index >= self.len() {
return None;
}
let cell = unsafe { &mut *self.data.as_ptr().add(index) };
Some(cell)
}
/// Returns a mutable pointer to `BoundedVec`'s buffer.
#[inline(always)]
pub fn as_mut_ptr(&mut self) -> *mut T {
self.data.as_ptr()
}
#[inline]
pub fn iter(&self) -> CyclicBoundedVecIterator<'_, T> {
CyclicBoundedVecIterator {
vec: self,
current: self.first_index(),
is_finished: false,
}
}
#[inline]
pub fn iter_from(
&self,
start: usize,
) -> Result<CyclicBoundedVecIterator<'_, T>, BoundedVecError> {
if start >= self.len() {
return Err(BoundedVecError::IterFromOutOfBounds);
}
Ok(CyclicBoundedVecIterator {
vec: self,
current: start,
is_finished: false,
})
}
#[inline]
pub fn first_index(&self) -> usize {
unsafe { (*self.metadata).first_index }
}
#[inline]
fn inc_first_index(&self) {
unsafe {
(*self.metadata).first_index = ((*self.metadata).first_index + 1) % self.capacity();
}
}
#[inline]
pub fn first(&self) -> Option<&T> {
self.get(self.first_index())
}
#[inline]
pub fn first_mut(&mut self) -> Option<&mut T> {
self.get_mut(self.first_index())
}
#[inline]
pub fn last_index(&self) -> usize {
unsafe { (*self.metadata).last_index }
}
#[inline]
fn inc_last_index(&mut self) {
unsafe {
(*self.metadata).last_index = ((*self.metadata).last_index + 1) % self.capacity();
}
}
#[inline]
pub fn last(&self) -> Option<&T> {
self.get(self.last_index())
}
#[inline]
pub fn last_mut(&mut self) -> Option<&mut T> {
self.get_mut(self.last_index())
}
}
impl<T> fmt::Debug for CyclicBoundedVec<T>
where
T: Clone + fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self.as_slice())
}
}
impl<T> Drop for CyclicBoundedVec<T>
where
T: Clone,
{
fn drop(&mut self) {
let layout = Layout::array::<T>(self.capacity()).unwrap();
unsafe { alloc::dealloc(self.data.as_ptr() as *mut u8, layout) };
let layout = Layout::new::<CyclicBoundedVecMetadata>();
unsafe { alloc::dealloc(self.metadata as *mut u8, layout) };
}
}
impl<T> Index<usize> for CyclicBoundedVec<T>
where
T: Clone,
{
type Output = T;
#[inline]
fn index(&self, index: usize) -> &Self::Output {
self.get(index).unwrap()
}
}
impl<T> IndexMut<usize> for CyclicBoundedVec<T>
where
T: Clone,
{
#[inline]
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
self.get_mut(index).unwrap()
}
}
impl<T> PartialEq for CyclicBoundedVec<T>
where
T: Clone + PartialEq,
{
fn eq(&self, other: &Self) -> bool {
self.iter().eq(other.iter())
}
}
impl<T> Eq for CyclicBoundedVec<T> where T: Clone + Eq {}
pub struct CyclicBoundedVecIterator<'a, T>
where
T: Clone,
{
vec: &'a CyclicBoundedVec<T>,
current: usize,
is_finished: bool,
}
impl<'a, T> Iterator for CyclicBoundedVecIterator<'a, T>
where
T: Clone,
{
type Item = &'a T;
fn next(&mut self) -> Option<Self::Item> {
if self.vec.capacity() == 0 || self.is_finished {
None
} else {
if self.current == self.vec.last_index() {
self.is_finished = true;
}
let new_current = (self.current + 1) % self.vec.capacity();
let element = self.vec.get(self.current);
self.current = new_current;
element
}
}
}
#[cfg(test)]
mod test {
use std::array;
use rand::{
distributions::{Distribution, Standard},
thread_rng, Rng,
};
use super::*;
use rand::distributions::uniform::{SampleRange, SampleUniform};
/// Generates a random value in the given range, excluding the values provided
/// in `exclude`.
fn gen_range_exclude<N, R, T>(rng: &mut N, range: R, exclude: &[T]) -> T
where
N: Rng,
R: Clone + SampleRange<T>,
T: PartialEq + SampleUniform,
{
loop {
// This utility is supposed to be used only in unit tests. This `clone`
// is harmless and necessary (can't pass a reference to range, it has
// to be moved).
let sample = rng.gen_range(range.clone());
if !exclude.contains(&sample) {
return sample;
}
}
}
#[test]
fn test_gen_range_exclude() {
let mut rng = thread_rng();
for n_excluded in 1..100 {
let excluded: Vec<u64> = (0..n_excluded).map(|_| rng.gen_range(0..100)).collect();
for _ in 0..10_000 {
let sample = gen_range_exclude(&mut rng, 0..100, excluded.as_slice());
for excluded in excluded.iter() {
assert_ne!(&sample, excluded);
}
}
}
}
fn rand_bounded_vec<T>() -> BoundedVec<T>
where
T: Clone,
Standard: Distribution<T>,
{
let mut rng = rand::thread_rng();
let capacity = rng.gen_range(1..1000);
let length = rng.gen_range(0..capacity);
let mut bounded_vec = BoundedVec::<T>::with_capacity(capacity);
for _ in 0..length {
let element = rng.gen();
bounded_vec.push(element).unwrap();
}
bounded_vec
}
#[test]
fn test_bounded_vec_metadata_serialization() {
let mut rng = thread_rng();
for _ in 0..1000 {
let capacity = rng.gen();
let metadata = BoundedVecMetadata::new(capacity);
assert_eq!(metadata.capacity(), capacity);
assert_eq!(metadata.length(), 0);
let bytes = metadata.to_le_bytes();
let metadata_2 = BoundedVecMetadata::from_le_bytes(bytes);
assert_eq!(metadata, metadata_2);
}
}
#[test]
fn test_bounded_vec_with_capacity() {
for capacity in 0..1024 {
let bounded_vec = BoundedVec::<u32>::with_capacity(capacity);
assert_eq!(bounded_vec.capacity(), capacity);
assert_eq!(bounded_vec.len(), 0);
}
}
fn bounded_vec_from_array<const N: usize>() {
let mut rng = thread_rng();
let arr: [u64; N] = array::from_fn(|_| rng.gen());
let vec = BoundedVec::from_array(&arr);
assert_eq!(&arr, vec.as_slice());
}
#[test]
fn test_bounded_vec_from_array_256() {
bounded_vec_from_array::<256>()
}
#[test]
fn test_bounded_vec_from_array_512() {
bounded_vec_from_array::<512>()
}
#[test]
fn test_bounded_vec_from_array_1024() {
bounded_vec_from_array::<1024>()
}
#[test]
fn test_bounded_vec_from_slice() {
let mut rng = thread_rng();
for capacity in 0..10_000 {
let vec: Vec<u64> = (0..capacity).map(|_| rng.gen()).collect();
let bounded_vec = BoundedVec::from_slice(&vec);
assert_eq!(vec.as_slice(), bounded_vec.as_slice());
}
}
#[test]
fn test_bounded_vec_is_empty() {
let mut rng = thread_rng();
let mut vec = BoundedVec::with_capacity(1000);
assert!(vec.is_empty());
for _ in 0..1000 {
let element: u64 = rng.gen();
vec.push(element).unwrap();
assert!(!vec.is_empty());
}
}
#[test]
fn test_bounded_vec_get() {
let mut vec = BoundedVec::with_capacity(1000);
for i in 0..1000 {
assert!(vec.get(i).is_none());
vec.push(i).unwrap();
}
for i in 0..1000 {
assert_eq!(vec.get(i), Some(&i));
}
for i in 1000..10_000 {
assert!(vec.get(i).is_none());
}
}
#[test]
fn test_bounded_vec_get_mut() {
let mut vec = BoundedVec::with_capacity(1000);
for i in 0..1000 {
assert!(vec.get_mut(i).is_none());
vec.push(i).unwrap();
}
for i in 0..1000 {
let element = vec.get_mut(i).unwrap();
assert_eq!(element, &i);
*element = i * 2;
}
for i in 0..1000 {
assert_eq!(vec.get_mut(i), Some(&mut (i * 2)));
}
for i in 1000..10_000 {
assert!(vec.get_mut(i).is_none());
}
}
#[test]
fn test_bounded_vec_iter_mut() {
let mut vec = BoundedVec::with_capacity(1000);
for i in 0..1000 {
vec.push(i).unwrap();
}
for (i, element) in vec.iter().enumerate() {
assert_eq!(*element, i);
}
for element in vec.iter_mut() {
*element = *element * 2;
}
for (i, element) in vec.iter().enumerate() {
assert_eq!(*element, i * 2);
}
}
#[test]
fn test_bounded_vec_last() {
let mut rng = thread_rng();
let mut vec = BoundedVec::with_capacity(1000);
assert!(vec.last().is_none());
for _ in 0..1000 {
let element: u64 = rng.gen();
vec.push(element).unwrap();
assert_eq!(vec.last(), Some(&element));
}
}
#[test]
fn test_bounded_vec_last_mut() {
let mut rng = thread_rng();
let mut vec = BoundedVec::with_capacity(1000);
assert!(vec.last_mut().is_none());
for _ in 0..1000 {
let element_old: u64 = rng.gen();
vec.push(element_old).unwrap();
let element_ref = vec.last_mut().unwrap();
assert_eq!(*element_ref, element_old);
// Assign a new value.
let element_new: u64 = rng.gen();
*element_ref = element_new;
// Assert that it took the effect.
let element_ref = vec.last_mut().unwrap();
assert_eq!(*element_ref, element_new);
}
}
#[test]
fn test_bounded_vec_to_array() {
let vec = BoundedVec::from_array(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
let arr: [u32; 16] = vec.to_array().unwrap();
assert_eq!(arr, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
assert!(matches!(
vec.to_array::<15>(),
Err(BoundedVecError::ArraySize(_, _))
));
assert!(matches!(
vec.to_array::<17>(),
Err(BoundedVecError::ArraySize(_, _))
));
}
#[test]
fn test_bounded_vec_to_vec() {
let mut rng = thread_rng();
for capacity in (0..10_000).step_by(100) {
let vec_1: Vec<u64> = (0..capacity).map(|_| rng.gen()).collect();
let bounded_vec = BoundedVec::from_slice(&vec_1);
let vec_2 = bounded_vec.to_vec();
assert_eq!(vec_1.as_slice(), vec_2.as_slice());
}
}
#[test]
fn test_bounded_vec_extend() {
let mut rng = thread_rng();
for capacity in (1..10_000).step_by(100) {
let length = rng.gen_range(0..capacity);
let mut vec = BoundedVec::with_capacity(capacity);
vec.extend(0..length).unwrap();
assert_eq!(vec.capacity(), capacity);
assert_eq!(vec.len(), length);
for (element_1, element_2) in vec.iter().zip(0..length) {
assert_eq!(*element_1, element_2);
}
}
}
#[test]
fn test_bounded_vec_clone() {
for _ in 0..1000 {
let bounded_vec = rand_bounded_vec::<u32>();
let cloned_bounded_vec = bounded_vec.clone();
assert_eq!(bounded_vec.capacity(), cloned_bounded_vec.capacity());
assert_eq!(bounded_vec.len(), cloned_bounded_vec.len());
assert_eq!(bounded_vec, cloned_bounded_vec);
}
}
#[test]
fn test_bounded_vec_index() {
let mut vec = BoundedVec::with_capacity(1000);
for i in 0..1000 {
vec.push(i).unwrap();
}
for i in 0..1000 {
assert_eq!(vec[i], i);
}
for i in 0..1000 {
vec[i] = i * 2;
}
for i in 0..1000 {
assert_eq!(vec[i], i * 2);
}
}
#[test]
fn test_bounded_vec_into_iter() {
let mut vec = BoundedVec::with_capacity(1000);
for i in 0..1000 {
vec.push(i).unwrap();
}
for (i, element) in vec.into_iter().enumerate() {
assert_eq!(element, i);
}
}
#[test]
fn test_cyclic_bounded_vec_metadata_serialization() {
let mut rng = thread_rng();
for _ in 0..1000 {
let capacity = rng.gen();
let metadata = CyclicBoundedVecMetadata::new(capacity);
assert_eq!(metadata.capacity(), capacity);
assert_eq!(metadata.length(), 0);
let bytes = metadata.to_le_bytes();
let metadata_2 = CyclicBoundedVecMetadata::from_le_bytes(bytes);
assert_eq!(metadata, metadata_2);
}
}
#[test]
fn test_cyclic_bounded_vec_with_capacity() {
for capacity in 0..1024 {
let cyclic_bounded_vec = CyclicBoundedVec::<u32>::with_capacity(capacity);
assert_eq!(cyclic_bounded_vec.capacity(), capacity);
assert_eq!(cyclic_bounded_vec.len(), 0);
assert_eq!(cyclic_bounded_vec.first_index(), 0);
assert_eq!(cyclic_bounded_vec.last_index(), 0);
}
}
#[test]
fn test_cyclic_bounded_vec_is_empty() {
let mut rng = thread_rng();
let mut vec = CyclicBoundedVec::with_capacity(1000);
assert!(vec.is_empty());
for _ in 0..1000 {
let element: u64 = rng.gen();
vec.push(element);
assert!(!vec.is_empty());
}
}
#[test]
fn test_cyclic_bounded_vec_get() {
let mut vec = CyclicBoundedVec::with_capacity(1000);
for i in 0..1000 {
vec.push(i);
}
for i in 0..1000 {
assert_eq!(vec.get(i), Some(&i));
}
for i in 1000..10_000 {
assert!(vec.get(i).is_none());
}
}
#[test]
fn test_cyclic_bounded_vec_get_mut() {
let mut vec = CyclicBoundedVec::with_capacity(1000);
for i in 0..2000 {
vec.push(i);
}
for i in 0..1000 {
let element = vec.get_mut(i).unwrap();
assert_eq!(*element, 1000 + i);
*element = i * 2;
}
for i in 0..1000 {
assert_eq!(vec.get_mut(i), Some(&mut (i * 2)));
}
for i in 1000..10_000 {
assert!(vec.get_mut(i).is_none());
}
}
#[test]
fn test_cyclic_bounded_vec_first() {
let mut vec = CyclicBoundedVec::with_capacity(500);
assert!(vec.first().is_none());
for i in 0..1000 {
vec.push(i);
assert_eq!(vec.first(), Some(&((i as u64).saturating_sub(499))));
}
}
#[test]
fn test_cyclic_bounded_vec_last() {
let mut rng = thread_rng();
let mut vec = CyclicBoundedVec::with_capacity(500);
assert!(vec.last().is_none());
for _ in 0..1000 {
let element: u64 = rng.gen();
vec.push(element);
assert_eq!(vec.last(), Some(&element));
}
}
#[test]
fn test_cyclic_bounded_vec_last_mut() {
let mut rng = thread_rng();
let mut vec = CyclicBoundedVec::with_capacity(500);
assert!(vec.last_mut().is_none());
for _ in 0..1000 {
let element_old: u64 = rng.gen();
vec.push(element_old);
let element_ref = vec.last_mut().unwrap();
assert_eq!(*element_ref, element_old);
// Assign a new value.
let element_new: u64 = rng.gen();
*element_ref = element_new;
// Assert that it took the effect.
let element_ref = vec.last_mut().unwrap();
assert_eq!(*element_ref, element_new);
}
}
#[test]
fn test_cyclic_bounded_vec_manual() {
let mut cyclic_bounded_vec = CyclicBoundedVec::with_capacity(8);
// Fill up the cyclic vector.
//
// ```
// ^ $
// index [0, 1, 2, 3, 4, 5, 6, 7]
// value [0, 1, 2, 3, 4, 5, 6, 7]
// ```
//
// * `^` - first element
// * `$` - last element
for i in 0..8 {
cyclic_bounded_vec.push(i);
}
assert_eq!(cyclic_bounded_vec.first_index(), 0);
assert_eq!(cyclic_bounded_vec.last_index(), 7);
assert_eq!(
cyclic_bounded_vec.iter().collect::<Vec<_>>().as_slice(),
&[&0, &1, &2, &3, &4, &5, &6, &7]
);
// Overwrite half of values.
//
// ```
// $ ^
// index [0, 1, 2, 3, 4, 5, 6, 7]
// value [8, 9, 10, 11, 4, 5, 6, 7]
// ```
//
// * `^` - first element
// * `$` - last element
for i in 0..4 {
cyclic_bounded_vec.push(i + 8);
}
assert_eq!(cyclic_bounded_vec.first_index(), 4);
assert_eq!(cyclic_bounded_vec.last_index(), 3);
assert_eq!(
cyclic_bounded_vec.iter().collect::<Vec<_>>().as_slice(),
&[&4, &5, &6, &7, &8, &9, &10, &11]
);
// Overwrite even more.
//
// ```
// $ ^
// index [0, 1, 2, 3, 4, 5, 6, 7]
// value [8, 9, 10, 11, 12, 13, 6, 7]
// ```
//
// * `^` - first element
// * `$` - last element
for i in 0..2 {
cyclic_bounded_vec.push(i + 12);
}
assert_eq!(cyclic_bounded_vec.first_index(), 6);
assert_eq!(cyclic_bounded_vec.last_index(), 5);
assert_eq!(
cyclic_bounded_vec.iter().collect::<Vec<_>>().as_slice(),
&[&6, &7, &8, &9, &10, &11, &12, &13]
);
// Overwrite all values from the first loop.
//
// ```
// ^ $
// index [0, 1, 2, 3, 4, 5, 6, 7]
// value [8, 9, 10, 11, 12, 13, 14, 15]
// ```
//
// * `^` - first element
// * `$` - last element
for i in 0..2 {
cyclic_bounded_vec.push(i + 14);
}
assert_eq!(cyclic_bounded_vec.first_index(), 0);
assert_eq!(cyclic_bounded_vec.last_index(), 7);
assert_eq!(
cyclic_bounded_vec.iter().collect::<Vec<_>>().as_slice(),
&[&8, &9, &10, &11, &12, &13, &14, &15]
);
}
/// Iteration on a vector with one element.
///
/// ```
/// ^$
/// index [0]
/// value [0]
/// ```
///
/// * `^` - first element
/// * `$` - last element
/// * `#` - visited elements
///
/// Length: 1
/// Capacity: 8
/// First index: 0
/// Last index: 0
///
/// Start iteration from: 0
///
/// Should iterate over one element.
#[test]
fn test_cyclic_bounded_vec_iter_one_element() {
let mut cyclic_bounded_vec = CyclicBoundedVec::with_capacity(8);
cyclic_bounded_vec.push(0);
assert_eq!(cyclic_bounded_vec.len(), 1);
assert_eq!(cyclic_bounded_vec.capacity(), 8);
assert_eq!(cyclic_bounded_vec.first_index(), 0);
assert_eq!(cyclic_bounded_vec.last_index(), 0);
let elements = cyclic_bounded_vec.iter().collect::<Vec<_>>();
assert_eq!(elements.len(), 1);
assert_eq!(elements.as_slice(), &[&0]);
let elements = cyclic_bounded_vec.iter_from(0).unwrap().collect::<Vec<_>>();
assert_eq!(elements.len(), 1);
assert_eq!(elements.as_slice(), &[&0]);
}
/// Iteration without reset in a vector which is not full.
///
/// ```
/// # # # #
/// ^ $
/// index [0, 1, 2, 3, 4, 5]
/// value [0, 1, 2, 3, 4, 5]
/// ```
///
/// * `^` - first element
/// * `$` - last element
/// * `#` - visited elements
///
/// Length: 6
/// Capacity: 8
/// First index: 0
/// Last index: 5
///
/// Start iteration from: 2
///
/// Should iterate over elements from 2 to 5, with 4 iterations.
#[test]
fn test_cyclic_bounded_vec_iter_from_without_reset_not_full_6_8_4() {
let mut cyclic_bounded_vec = CyclicBoundedVec::with_capacity(8);
for i in 0..6 {
cyclic_bounded_vec.push(i);
}
assert_eq!(cyclic_bounded_vec.len(), 6);
assert_eq!(cyclic_bounded_vec.capacity(), 8);
assert_eq!(cyclic_bounded_vec.first_index(), 0);
assert_eq!(cyclic_bounded_vec.last_index(), 5);
let elements = cyclic_bounded_vec.iter_from(2).unwrap().collect::<Vec<_>>();
assert_eq!(elements.len(), 4);
assert_eq!(elements.as_slice(), &[&2, &3, &4, &5]);
}
/// Iteration without reset in a vector which is full.
///
/// ```
/// # # #
/// ^ $
/// index [0, 1, 2, 3, 4]
/// value [0, 1, 2, 3, 4]
/// ```
///
/// * `^` - first element
/// * `$` - last element
/// * `#` - visited elements
///
/// Length: 5
/// Capacity: 5
/// First index: 0
/// Last index: 4
///
/// Start iteration from: 2
///
/// Should iterate over elements 2..4 - 3 iterations.
#[test]
fn test_cyclic_bounded_vec_iter_from_without_reset_not_full_5_5_4() {
let mut cyclic_bounded_vec = CyclicBoundedVec::with_capacity(5);
for i in 0..5 {
cyclic_bounded_vec.push(i);
}
assert_eq!(cyclic_bounded_vec.len(), 5);
assert_eq!(cyclic_bounded_vec.capacity(), 5);
assert_eq!(cyclic_bounded_vec.first_index(), 0);
assert_eq!(cyclic_bounded_vec.last_index(), 4);
let elements = cyclic_bounded_vec.iter_from(2).unwrap().collect::<Vec<_>>();
assert_eq!(elements.len(), 3);
assert_eq!(elements.as_slice(), &[&2, &3, &4]);
}
/// Iteration without reset in a vector which is full.
///
/// ```
/// # # # # # #
/// ^ $
/// index [0, 1, 2, 3, 4, 5, 6, 7]
/// value [0, 1, 2, 3, 4, 5, 6, 7]
/// ```
///
/// * `^` - first element
/// * `$` - last element
/// * `#` - visited elements
///
/// Length: 8
/// Capacity: 8
/// First index: 0
/// Last index: 7
///
/// Start iteration from: 2
///
/// Should iterate over elements 2..7 - 6 iterations.
#[test]
fn test_cyclic_bounded_vec_iter_from_without_reset_full_8_8_6() {
let mut cyclic_bounded_vec = CyclicBoundedVec::with_capacity(8);
for i in 0..8 {
cyclic_bounded_vec.push(i);
}
assert_eq!(cyclic_bounded_vec.len(), 8);
assert_eq!(cyclic_bounded_vec.capacity(), 8);
assert_eq!(cyclic_bounded_vec.first_index(), 0);
assert_eq!(cyclic_bounded_vec.last_index(), 7);
let elements = cyclic_bounded_vec.iter_from(2).unwrap().collect::<Vec<_>>();
assert_eq!(elements.len(), 6);
assert_eq!(elements.as_slice(), &[&2, &3, &4, &5, &6, &7]);
}
/// Iteration with reset.
///
/// Insert elements over capacity, so the vector resets and starts
/// overwriting elements from the start - 12 elements into a vector with
/// capacity 8.
///
/// The resulting data structure looks like:
///
/// ```
/// # # # # # #
/// $ ^
/// index [0, 1, 2, 3, 4, 5, 6, 7]
/// value [8, 9, 10, 11, 4, 5, 6, 7]
/// ```
///
/// * `^` - first element
/// * `$` - last element
/// * `#` - visited elements
///
/// Length: 8
/// Capacity: 8
/// First: 4
/// Last: 3
///
/// Start iteration from: 6
///
/// Should iterate over elements 6..7 and 8..11 - 6 iterations.
#[test]
fn test_cyclic_bounded_vec_iter_from_reset() {
let mut cyclic_bounded_vec = CyclicBoundedVec::with_capacity(8);
for i in 0..12 {
cyclic_bounded_vec.push(i);
}
assert_eq!(cyclic_bounded_vec.len(), 8);
assert_eq!(cyclic_bounded_vec.capacity(), 8);
assert_eq!(cyclic_bounded_vec.first_index(), 4);
assert_eq!(cyclic_bounded_vec.last_index(), 3);
let elements = cyclic_bounded_vec.iter_from(6).unwrap().collect::<Vec<_>>();
assert_eq!(elements.len(), 6);
assert_eq!(elements.as_slice(), &[&6, &7, &8, &9, &10, &11]);
}
#[test]
fn test_cyclic_bounded_vec_iter_from_out_of_bounds_not_full() {
let mut cyclic_bounded_vec = CyclicBoundedVec::with_capacity(8);
for i in 0..4 {
cyclic_bounded_vec.push(i);
}
// Try `start` values in bounds.
for i in 0..4 {
let elements = cyclic_bounded_vec.iter_from(i).unwrap().collect::<Vec<_>>();
assert_eq!(elements.len(), 4 - i);
let expected = (i..4).collect::<Vec<_>>();
// Just to coerce it to have references...
let expected = expected.iter().collect::<Vec<_>>();
assert_eq!(elements.as_slice(), expected.as_slice());
}
// Try `start` values out of bounds.
for i in 4..1000 {
let elements = cyclic_bounded_vec.iter_from(i);
assert!(matches!(
elements,
Err(BoundedVecError::IterFromOutOfBounds)
));
}
}
#[test]
fn test_cyclic_bounded_vec_iter_from_out_of_bounds_full() {
let mut cyclic_bounded_vec = CyclicBoundedVec::with_capacity(8);
for i in 0..12 {
cyclic_bounded_vec.push(i);
}
// Try different `start` values which are out of bounds.
for start in 8..1000 {
let elements = cyclic_bounded_vec.iter_from(start);
assert!(matches!(
elements,
Err(BoundedVecError::IterFromOutOfBounds)
));
}
}
#[test]
fn test_cyclic_bounded_vec_iter_from_out_of_bounds_iter_from() {
let mut cyclic_bounded_vec = CyclicBoundedVec::with_capacity(8);
for i in 0..8 {
assert!(matches!(
cyclic_bounded_vec.iter_from(i),
Err(BoundedVecError::IterFromOutOfBounds)
));
cyclic_bounded_vec.push(i);
}
}
#[test]
fn test_cyclic_bounded_vec_overwrite() {
let mut cyclic_bounded_vec = CyclicBoundedVec::with_capacity(64);
for i in 0..256 {
cyclic_bounded_vec.push(i);
}
assert_eq!(cyclic_bounded_vec.len(), 64);
assert_eq!(cyclic_bounded_vec.capacity(), 64);
assert_eq!(
cyclic_bounded_vec.iter().collect::<Vec<_>>().as_slice(),
&[
&192, &193, &194, &195, &196, &197, &198, &199, &200, &201, &202, &203, &204, &205,
&206, &207, &208, &209, &210, &211, &212, &213, &214, &215, &216, &217, &218, &219,
&220, &221, &222, &223, &224, &225, &226, &227, &228, &229, &230, &231, &232, &233,
&234, &235, &236, &237, &238, &239, &240, &241, &242, &243, &244, &245, &246, &247,
&248, &249, &250, &251, &252, &253, &254, &255
]
);
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/concurrent/Cargo.toml
|
[package]
name = "light-concurrent-merkle-tree"
version = "1.1.0"
edition = "2021"
description = "Concurrent Merkle tree implementation"
repository = "https://github.com/Lightprotocol/light-protocol"
license = "Apache-2.0"
[features]
heavy-tests = []
solana = [
"light-bounded-vec/solana",
"light-hasher/solana",
"solana-program"
]
[dependencies]
borsh = "0.10"
bytemuck = "1.17"
light-bounded-vec = { path = "../bounded-vec", version = "1.1.0" }
light-hasher = { path = "../hasher", version = "1.1.0" }
light-utils = { version = "1.1.0", path = "../../utils" }
memoffset = "0.9"
solana-program = { workspace = true, optional = true }
thiserror = "1.0"
[dev-dependencies]
ark-bn254 = "0.4"
ark-ff = "0.4"
light-merkle-tree-reference = { path = "../reference", version = "1.1.0" }
light-hash-set = { workspace = true, features = ["solana"] }
rand = "0.8"
solana-program = { workspace = true }
spl-account-compression = { version = "0.3.0", default-features = false}
spl-concurrent-merkle-tree = { version = "0.2.0", default-features = false}
tokio = { workspace = true }
num-bigint = "0.4"
num-traits = "0.2"
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/concurrent
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/concurrent/tests/tests.rs
|
use ark_bn254::Fr;
use ark_ff::{BigInteger, PrimeField, UniformRand};
use light_bounded_vec::{BoundedVec, BoundedVecError, CyclicBoundedVec};
use light_concurrent_merkle_tree::{
changelog::{ChangelogEntry, ChangelogPath},
errors::ConcurrentMerkleTreeError,
zero_copy::ConcurrentMerkleTreeZeroCopyMut,
ConcurrentMerkleTree,
};
use light_hash_set::HashSet;
use light_hasher::{Hasher, Keccak, Poseidon, Sha256};
use light_utils::rand::gen_range_exclude;
use num_bigint::BigUint;
use num_traits::FromBytes;
use rand::{rngs::ThreadRng, seq::SliceRandom, thread_rng, Rng};
use std::cmp;
/// Tests whether append operations work as expected.
fn append<H, const CANOPY: usize>()
where
H: Hasher,
{
const HEIGHT: usize = 4;
const CHANGELOG: usize = 32;
const ROOTS: usize = 256;
let mut merkle_tree =
ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap();
merkle_tree.init().unwrap();
let leaf1 = H::hash(&[1u8; 32]).unwrap();
// The hash of our new leaf and its sibling (a zero value).
//
// H1
// / \
// L1 Z[0]
let h1 = H::hashv(&[&leaf1, &H::zero_bytes()[0]]).unwrap();
// The hash of `h1` and its sibling (a subtree represented by `Z[1]`).
//
// H2
// /-/ \-\
// H1 Z[1]
// / \ / \
// L1 Z[0] Z[0] Z[0]
//
// `Z[1]` represents the whole subtree on the right from `h2`. In the next
// examples, we are just going to show empty subtrees instead of the whole
// hierarchy.
let h2 = H::hashv(&[&h1, &H::zero_bytes()[1]]).unwrap();
// The hash of `h3` and its sibling (a subtree represented by `Z[2]`).
//
// H3
// / \
// H2 Z[2]
// / \
// H1 Z[1]
// / \
// L1 Z[0]
let h3 = H::hashv(&[&h2, &H::zero_bytes()[2]]).unwrap();
// The hash of `h4` and its sibling (a subtree represented by `Z[3]`),
// which is the root.
//
// R
// / \
// H3 Z[3]
// / \
// H2 Z[2]
// / \
// H1 Z[1]
// / \
// L1 Z[0]
let expected_root = H::hashv(&[&h3, &H::zero_bytes()[3]]).unwrap();
let expected_changelog_path = ChangelogPath([Some(leaf1), Some(h1), Some(h2), Some(h3)]);
let expected_filled_subtrees = BoundedVec::from_array(&[leaf1, h1, h2, h3]);
merkle_tree.append(&leaf1).unwrap();
assert_eq!(merkle_tree.changelog_index(), 1);
assert_eq!(
merkle_tree.changelog[merkle_tree.changelog_index()],
ChangelogEntry::new(expected_changelog_path, 0)
);
assert_eq!(merkle_tree.root(), expected_root);
assert_eq!(merkle_tree.roots.last_index(), 1);
assert_eq!(merkle_tree.filled_subtrees, expected_filled_subtrees);
assert_eq!(merkle_tree.next_index(), 1);
assert_eq!(merkle_tree.rightmost_leaf(), leaf1);
// Appending the 2nd leaf should result in recomputing the root due to the
// change of the `h1`, which now is a hash of the two non-zero leafs. So
// when computing hashes from H2 up to the root, we are still going to use
// zero bytes.
//
// The other subtrees still remain the same.
//
// R
// / \
// H3 Z[3]
// / \
// H2 Z[2]
// / \
// H1 Z[1]
// / \
// L1 L2
let leaf2 = H::hash(&[2u8; 32]).unwrap();
let h1 = H::hashv(&[&leaf1, &leaf2]).unwrap();
let h2 = H::hashv(&[&h1, &H::zero_bytes()[1]]).unwrap();
let h3 = H::hashv(&[&h2, &H::zero_bytes()[2]]).unwrap();
let expected_root = H::hashv(&[&h3, &H::zero_bytes()[3]]).unwrap();
let expected_changelog_path = ChangelogPath([Some(leaf2), Some(h1), Some(h2), Some(h3)]);
let expected_filled_subtrees = BoundedVec::from_array(&[leaf1, h1, h2, h3]);
merkle_tree.append(&leaf2).unwrap();
assert_eq!(merkle_tree.changelog_index(), 2);
assert_eq!(
merkle_tree.changelog[merkle_tree.changelog_index()],
ChangelogEntry::new(expected_changelog_path, 1),
);
assert_eq!(merkle_tree.root(), expected_root);
assert_eq!(merkle_tree.roots.last_index(), 2);
assert_eq!(merkle_tree.filled_subtrees, expected_filled_subtrees);
assert_eq!(merkle_tree.next_index(), 2);
assert_eq!(merkle_tree.rightmost_leaf(), leaf2);
// Appending the 3rd leaf alters the next subtree on the right.
// Instead of using Z[1], we will end up with the hash of the new leaf and
// Z[0].
//
// The other subtrees still remain the same.
//
// R
// / \
// H4 Z[3]
// / \
// H3 Z[2]
// / \
// H1 H2
// / \ / \
// L1 L2 L3 Z[0]
let leaf3 = H::hash(&[3u8; 32]).unwrap();
let h1 = H::hashv(&[&leaf1, &leaf2]).unwrap();
let h2 = H::hashv(&[&leaf3, &H::zero_bytes()[0]]).unwrap();
let h3 = H::hashv(&[&h1, &h2]).unwrap();
let h4 = H::hashv(&[&h3, &H::zero_bytes()[2]]).unwrap();
let expected_root = H::hashv(&[&h4, &H::zero_bytes()[3]]).unwrap();
let expected_changelog_path = ChangelogPath([Some(leaf3), Some(h2), Some(h3), Some(h4)]);
let expected_filled_subtrees = BoundedVec::from_array(&[leaf3, h1, h3, h4]);
merkle_tree.append(&leaf3).unwrap();
assert_eq!(merkle_tree.changelog_index(), 3);
assert_eq!(
merkle_tree.changelog[merkle_tree.changelog_index()],
ChangelogEntry::new(expected_changelog_path, 2),
);
assert_eq!(merkle_tree.root(), expected_root);
assert_eq!(merkle_tree.roots.last_index(), 3);
assert_eq!(merkle_tree.filled_subtrees, expected_filled_subtrees);
assert_eq!(merkle_tree.next_index(), 3);
assert_eq!(merkle_tree.rightmost_leaf(), leaf3);
// Appending the 4th leaf alters the next subtree on the right.
// Instead of using Z[1], we will end up with the hash of the new leaf and
// Z[0].
//
// The other subtrees still remain the same.
//
// R
// / \
// H4 Z[3]
// / \
// H3 Z[2]
// / \
// H1 H2
// / \ / \
// L1 L2 L3 L4
let leaf4 = H::hash(&[4u8; 32]).unwrap();
let h1 = H::hashv(&[&leaf1, &leaf2]).unwrap();
let h2 = H::hashv(&[&leaf3, &leaf4]).unwrap();
let h3 = H::hashv(&[&h1, &h2]).unwrap();
let h4 = H::hashv(&[&h3, &H::zero_bytes()[2]]).unwrap();
let expected_root = H::hashv(&[&h4, &H::zero_bytes()[3]]).unwrap();
let expected_changelog_path = ChangelogPath([Some(leaf4), Some(h2), Some(h3), Some(h4)]);
let expected_filled_subtrees = BoundedVec::from_array(&[leaf3, h1, h3, h4]);
merkle_tree.append(&leaf4).unwrap();
assert_eq!(merkle_tree.changelog_index(), 4);
assert_eq!(
merkle_tree.changelog[merkle_tree.changelog_index()],
ChangelogEntry::new(expected_changelog_path, 3),
);
assert_eq!(merkle_tree.root(), expected_root);
assert_eq!(merkle_tree.roots.last_index(), 4);
assert_eq!(merkle_tree.filled_subtrees, expected_filled_subtrees);
assert_eq!(merkle_tree.next_index(), 4);
assert_eq!(merkle_tree.rightmost_leaf(), leaf4);
}
/// Checks whether `append_with_proof` returns correct Merkle proofs.
fn append_with_proof<
H,
const HEIGHT: usize,
const CHANGELOG: usize,
const ROOTS: usize,
const CANOPY: usize,
const N_APPENDS: usize,
>()
where
H: Hasher,
{
let mut merkle_tree =
ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap();
merkle_tree.init().unwrap();
let mut reference_tree = light_merkle_tree_reference::MerkleTree::<H>::new(HEIGHT, CANOPY);
let mut rng = thread_rng();
for i in 0..N_APPENDS {
let leaf: [u8; 32] = Fr::rand(&mut rng)
.into_bigint()
.to_bytes_be()
.try_into()
.unwrap();
let mut proof = BoundedVec::with_capacity(HEIGHT);
merkle_tree.append_with_proof(&leaf, &mut proof).unwrap();
reference_tree.append(&leaf).unwrap();
let reference_proof = reference_tree.get_proof_of_leaf(i, true).unwrap();
assert_eq!(proof, reference_proof);
}
}
/// Performs invalid updates on the given Merkle tree by trying to swap all
/// parameters separately. Asserts the errors that the Merkle tree should
/// return as a part of validation of these inputs.
fn invalid_updates<H, const HEIGHT: usize, const CHANGELOG: usize>(
rng: &mut ThreadRng,
merkle_tree: &mut ConcurrentMerkleTree<H, HEIGHT>,
changelog_index: usize,
old_leaf: &[u8; 32],
new_leaf: &[u8; 32],
leaf_index: usize,
proof: BoundedVec<[u8; 32]>,
) where
H: Hasher,
{
// This test case works only for larger changelogs, where there is a chance
// to encounter conflicting changelog entries.
//
// We assume that it's going to work for changelogs with capacity greater
// than 1. But the smaller the changelog and the more non-conflicting
// operations are done in between, the higher the chance of this check
// failing. If you ever encounter issues with reproducing this error, try
// tuning your changelog size or make sure that conflicting operations are
// done frequently enough.
if CHANGELOG > 1 {
let invalid_changelog_index = 0;
let mut proof_clone = proof.clone();
let res = merkle_tree.update(
invalid_changelog_index,
old_leaf,
new_leaf,
leaf_index,
&mut proof_clone,
);
assert!(matches!(
res,
Err(ConcurrentMerkleTreeError::CannotUpdateLeaf)
));
}
let invalid_old_leaf: [u8; 32] = Fr::rand(rng)
.into_bigint()
.to_bytes_be()
.try_into()
.unwrap();
let mut proof_clone = proof.clone();
let res = merkle_tree.update(
changelog_index,
&invalid_old_leaf,
&new_leaf,
0,
&mut proof_clone,
);
assert!(matches!(
res,
Err(ConcurrentMerkleTreeError::InvalidProof(_, _))
));
let invalid_index_in_range = gen_range_exclude(rng, 0..merkle_tree.next_index(), &[leaf_index]);
let mut proof_clone = proof.clone();
let res = merkle_tree.update(
changelog_index,
old_leaf,
new_leaf,
invalid_index_in_range,
&mut proof_clone,
);
assert!(matches!(
res,
Err(ConcurrentMerkleTreeError::InvalidProof(_, _))
));
// Try pointing to the leaf indices outside the range only if the tree is
// not full. Otherwise, it doesn't make sense and even `gen_range` will
// fail.
let next_index = merkle_tree.next_index();
let limit_leaves = 1 << HEIGHT;
if next_index < limit_leaves {
let invalid_index_outside_range = rng.gen_range(next_index..limit_leaves);
let mut proof_clone = proof.clone();
let res = merkle_tree.update(
changelog_index,
old_leaf,
new_leaf,
invalid_index_outside_range,
&mut proof_clone,
);
assert!(matches!(
res,
Err(ConcurrentMerkleTreeError::CannotUpdateEmpty)
));
}
}
/// Tests whether update operations work as expected.
fn update<H, const CHANGELOG: usize, const ROOTS: usize, const CANOPY: usize>()
where
H: Hasher,
{
const HEIGHT: usize = 4;
let mut merkle_tree =
ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap();
merkle_tree.init().unwrap();
let mut reference_tree = light_merkle_tree_reference::MerkleTree::<H>::new(HEIGHT, CANOPY);
let mut rng = thread_rng();
let leaf1 = H::hash(&[1u8; 32]).unwrap();
let leaf2 = H::hash(&[2u8; 32]).unwrap();
let leaf3 = H::hash(&[3u8; 32]).unwrap();
let leaf4 = H::hash(&[4u8; 32]).unwrap();
// Append 4 leaves.
//
// R
// / \
// H4 Z[3]
// / \
// H3 Z[2]
// / \
// H1 H2
// / \ / \
// L1 L2 L3 L4
let h1 = H::hashv(&[&leaf1, &leaf2]).unwrap();
let h2 = H::hashv(&[&leaf3, &leaf4]).unwrap();
let h3 = H::hashv(&[&h1, &h2]).unwrap();
let h4 = H::hashv(&[&h3, &H::zero_bytes()[2]]).unwrap();
let expected_root = H::hashv(&[&h4, &H::zero_bytes()[3]]).unwrap();
let expected_changelog_path = ChangelogPath([Some(leaf4), Some(h2), Some(h3), Some(h4)]);
let expected_filled_subtrees = BoundedVec::from_array(&[leaf3, h1, h3, h4]);
merkle_tree.append(&leaf1).unwrap();
reference_tree.append(&leaf1).unwrap();
merkle_tree.append(&leaf2).unwrap();
reference_tree.append(&leaf2).unwrap();
merkle_tree.append(&leaf3).unwrap();
reference_tree.append(&leaf3).unwrap();
merkle_tree.append(&leaf4).unwrap();
reference_tree.append(&leaf4).unwrap();
let canopy_levels = [
&[h4, H::zero_bytes()[3]][..],
&[
h3,
H::zero_bytes()[2],
H::zero_bytes()[2],
H::zero_bytes()[2],
][..],
];
let mut expected_canopy = Vec::new();
for canopy_level in 0..CANOPY {
println!("canopy_level: {canopy_level}");
expected_canopy.extend_from_slice(&canopy_levels[canopy_level]);
}
assert_eq!(merkle_tree.changelog_index(), 4 % CHANGELOG);
assert_eq!(
merkle_tree.changelog[merkle_tree.changelog_index()],
ChangelogEntry::new(expected_changelog_path, 3),
);
assert_eq!(merkle_tree.root(), reference_tree.root());
assert_eq!(merkle_tree.root(), expected_root);
assert_eq!(merkle_tree.roots.last_index(), 4);
assert_eq!(merkle_tree.filled_subtrees, expected_filled_subtrees);
assert_eq!(merkle_tree.next_index(), 4);
assert_eq!(merkle_tree.rightmost_leaf(), leaf4);
assert_eq!(merkle_tree.canopy, reference_tree.get_canopy().unwrap());
assert_eq!(merkle_tree.canopy.as_slice(), expected_canopy.as_slice());
// Replace `leaf1`.
let new_leaf1 = [9u8; 32];
// Replacing L1 affects H1 and all parent hashes up to the root.
//
// R
// / \
// *H4* Z[3]
// / \
// *H3* Z[2]
// / \
// *H1* H2
// / \ / \
// *L1* L2 L3 L4
//
// Merkle proof for the replaced leaf L1 is:
// [L2, H2, Z[2], Z[3]]
let changelog_index = merkle_tree.changelog_index();
let proof_raw = &[leaf2, h2, H::zero_bytes()[2], H::zero_bytes()[3]];
let mut proof = BoundedVec::with_capacity(HEIGHT);
for node in &proof_raw[..HEIGHT - CANOPY] {
proof.push(*node).unwrap();
}
invalid_updates::<H, HEIGHT, CHANGELOG>(
&mut rng,
&mut merkle_tree,
changelog_index,
&leaf1,
&new_leaf1,
0,
proof.clone(),
);
merkle_tree
.update(changelog_index, &leaf1, &new_leaf1, 0, &mut proof)
.unwrap();
reference_tree.update(&new_leaf1, 0).unwrap();
let h1 = H::hashv(&[&new_leaf1, &leaf2]).unwrap();
let h2 = H::hashv(&[&leaf3, &leaf4]).unwrap();
let h3 = H::hashv(&[&h1, &h2]).unwrap();
let h4 = H::hashv(&[&h3, &H::zero_bytes()[2]]).unwrap();
let expected_root = H::hashv(&[&h4, &H::zero_bytes()[3]]).unwrap();
let expected_changelog_path = ChangelogPath([Some(new_leaf1), Some(h1), Some(h3), Some(h4)]);
let canopy_levels = [
&[h4, H::zero_bytes()[3]][..],
&[
h3,
H::zero_bytes()[2],
H::zero_bytes()[2],
H::zero_bytes()[2],
][..],
];
let mut expected_canopy = Vec::new();
for canopy_level in 0..CANOPY {
expected_canopy.extend_from_slice(&canopy_levels[canopy_level]);
}
assert_eq!(merkle_tree.changelog_index(), 5 % CHANGELOG);
assert_eq!(
merkle_tree.changelog[merkle_tree.changelog_index()],
ChangelogEntry::new(expected_changelog_path, 0),
);
assert_eq!(merkle_tree.root(), reference_tree.root());
assert_eq!(merkle_tree.root(), expected_root);
assert_eq!(merkle_tree.roots.last_index(), 5);
assert_eq!(merkle_tree.next_index(), 4);
assert_eq!(merkle_tree.rightmost_leaf(), leaf4);
assert_eq!(merkle_tree.canopy, reference_tree.get_canopy().unwrap());
assert_eq!(merkle_tree.canopy.as_slice(), expected_canopy.as_slice());
// Replace `leaf2`.
let new_leaf2 = H::hash(&[8u8; 32]).unwrap();
// Replacing L2 affects H1 and all parent hashes up to the root.
//
// R
// / \
// *H4* Z[3]
// / \
// *H3* Z[2]
// / \
// *H1* H2
// / \ / \
// L1 *L2* L3 L4
//
// Merkle proof for the replaced leaf L2 is:
// [L1, H2, Z[2], Z[3]]
let changelog_index = merkle_tree.changelog_index();
let proof_raw = &[new_leaf1, h2, H::zero_bytes()[2], H::zero_bytes()[3]];
let mut proof = BoundedVec::with_capacity(HEIGHT);
for node in &proof_raw[..HEIGHT - CANOPY] {
proof.push(*node).unwrap();
}
invalid_updates::<H, HEIGHT, CHANGELOG>(
&mut rng,
&mut merkle_tree,
changelog_index,
&leaf2,
&new_leaf2,
1,
proof.clone(),
);
merkle_tree
.update(changelog_index, &leaf2, &new_leaf2, 1, &mut proof)
.unwrap();
reference_tree.update(&new_leaf2, 1).unwrap();
let h1 = H::hashv(&[&new_leaf1, &new_leaf2]).unwrap();
let h2 = H::hashv(&[&leaf3, &leaf4]).unwrap();
let h3 = H::hashv(&[&h1, &h2]).unwrap();
let h4 = H::hashv(&[&h3, &H::zero_bytes()[2]]).unwrap();
let expected_root = H::hashv(&[&h4, &H::zero_bytes()[3]]).unwrap();
let expected_changelog_path = ChangelogPath([Some(new_leaf2), Some(h1), Some(h3), Some(h4)]);
let canopy_levels = [
&[h4, H::zero_bytes()[3]][..],
&[
h3,
H::zero_bytes()[2],
H::zero_bytes()[2],
H::zero_bytes()[2],
][..],
];
let mut expected_canopy = Vec::new();
for canopy_level in 0..CANOPY {
expected_canopy.extend_from_slice(&canopy_levels[canopy_level]);
}
assert_eq!(merkle_tree.changelog_index(), 6 % CHANGELOG);
assert_eq!(
merkle_tree.changelog[merkle_tree.changelog_index()],
ChangelogEntry::new(expected_changelog_path, 1),
);
assert_eq!(merkle_tree.root(), expected_root);
assert_eq!(merkle_tree.roots.last_index(), 6);
assert_eq!(merkle_tree.next_index(), 4);
assert_eq!(merkle_tree.rightmost_leaf(), leaf4);
assert_eq!(merkle_tree.canopy, reference_tree.get_canopy().unwrap());
assert_eq!(merkle_tree.canopy.as_slice(), expected_canopy.as_slice());
// Replace `leaf3`.
let new_leaf3 = H::hash(&[7u8; 32]).unwrap();
// Replacing L3 affects H1 and all parent hashes up to the root.
//
// R
// / \
// *H4* Z[3]
// / \
// *H3* Z[2]
// / \
// H1 *H2*
// / \ / \
// L1 L2 *L3* L4
//
// Merkle proof for the replaced leaf L3 is:
// [L4, H1, Z[2], Z[3]]
let changelog_index = merkle_tree.changelog_index();
let proof_raw = &[leaf4, h1, H::zero_bytes()[2], H::zero_bytes()[3]];
let mut proof = BoundedVec::with_capacity(HEIGHT);
for node in &proof_raw[..HEIGHT - CANOPY] {
proof.push(*node).unwrap();
}
invalid_updates::<H, HEIGHT, CHANGELOG>(
&mut rng,
&mut merkle_tree,
changelog_index,
&leaf3,
&new_leaf3,
2,
proof.clone(),
);
merkle_tree
.update(changelog_index, &leaf3, &new_leaf3, 2, &mut proof)
.unwrap();
reference_tree.update(&new_leaf3, 2).unwrap();
let h1 = H::hashv(&[&new_leaf1, &new_leaf2]).unwrap();
let h2 = H::hashv(&[&new_leaf3, &leaf4]).unwrap();
let h3 = H::hashv(&[&h1, &h2]).unwrap();
let h4 = H::hashv(&[&h3, &H::zero_bytes()[2]]).unwrap();
let expected_root = H::hashv(&[&h4, &H::zero_bytes()[3]]).unwrap();
let expected_changelog_path = ChangelogPath([Some(new_leaf3), Some(h2), Some(h3), Some(h4)]);
let canopy_levels = [
&[h4, H::zero_bytes()[3]][..],
&[
h3,
H::zero_bytes()[2],
H::zero_bytes()[2],
H::zero_bytes()[2],
][..],
];
let mut expected_canopy = Vec::new();
for canopy_level in 0..CANOPY {
expected_canopy.extend_from_slice(&canopy_levels[canopy_level]);
}
assert_eq!(merkle_tree.changelog_index(), 7 % CHANGELOG);
assert_eq!(
merkle_tree.changelog[merkle_tree.changelog_index()],
ChangelogEntry::new(expected_changelog_path, 2)
);
assert_eq!(merkle_tree.root(), expected_root);
assert_eq!(merkle_tree.roots.last_index(), 7);
assert_eq!(merkle_tree.next_index(), 4);
assert_eq!(merkle_tree.rightmost_leaf(), leaf4);
assert_eq!(merkle_tree.canopy, reference_tree.get_canopy().unwrap());
assert_eq!(merkle_tree.canopy.as_slice(), expected_canopy.as_slice());
// Replace `leaf4`.
let new_leaf4 = H::hash(&[6u8; 32]).unwrap();
// Replacing L4 affects H1 and all parent hashes up to the root.
//
// R
// / \
// *H4* Z[3]
// / \
// *H3* Z[2]
// / \
// H1 *H2*
// / \ / \
// L1 L2 L3 *L4*
//
// Merkle proof for the replaced leaf L4 is:
// [L3, H1, Z[2], Z[3]]
let changelog_index = merkle_tree.changelog_index();
let proof_raw = &[new_leaf3, h1, H::zero_bytes()[2], H::zero_bytes()[3]];
let mut proof = BoundedVec::with_capacity(HEIGHT);
for node in &proof_raw[..HEIGHT - CANOPY] {
proof.push(*node).unwrap();
}
invalid_updates::<H, HEIGHT, CHANGELOG>(
&mut rng,
&mut merkle_tree,
changelog_index,
&leaf4,
&new_leaf4,
3,
proof.clone(),
);
merkle_tree
.update(changelog_index, &leaf4, &new_leaf4, 3, &mut proof)
.unwrap();
reference_tree.update(&new_leaf4, 3).unwrap();
let h1 = H::hashv(&[&new_leaf1, &new_leaf2]).unwrap();
let h2 = H::hashv(&[&new_leaf3, &new_leaf4]).unwrap();
let h3 = H::hashv(&[&h1, &h2]).unwrap();
let h4 = H::hashv(&[&h3, &H::zero_bytes()[2]]).unwrap();
let expected_root = H::hashv(&[&h4, &H::zero_bytes()[3]]).unwrap();
let expected_changelog_path = ChangelogPath([Some(new_leaf4), Some(h2), Some(h3), Some(h4)]);
let canopy_levels = [
&[h4, H::zero_bytes()[3]][..],
&[
h3,
H::zero_bytes()[2],
H::zero_bytes()[2],
H::zero_bytes()[2],
][..],
];
let mut expected_canopy = Vec::new();
for canopy_level in 0..CANOPY {
expected_canopy.extend_from_slice(&canopy_levels[canopy_level]);
}
assert_eq!(merkle_tree.changelog_index(), 8 % CHANGELOG);
assert_eq!(
merkle_tree.changelog[merkle_tree.changelog_index()],
ChangelogEntry::new(expected_changelog_path, 3)
);
assert_eq!(merkle_tree.root(), expected_root);
assert_eq!(merkle_tree.roots.last_index(), 8);
assert_eq!(merkle_tree.next_index(), 4);
assert_eq!(merkle_tree.rightmost_leaf(), new_leaf4);
assert_eq!(merkle_tree.canopy, reference_tree.get_canopy().unwrap());
assert_eq!(merkle_tree.canopy.as_slice(), expected_canopy.as_slice());
}
/// Tests whether appending leaves over the limit results in an explicit error.
fn overfill_tree<H>()
where
H: Hasher,
{
const HEIGHT: usize = 2;
const CHANGELOG: usize = 32;
const ROOTS: usize = 32;
const CANOPY: usize = 0;
let mut merkle_tree =
ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap();
merkle_tree.init().unwrap();
for _ in 0..4 {
merkle_tree.append(&[4; 32]).unwrap();
}
assert!(matches!(
merkle_tree.append(&[4; 32]),
Err(ConcurrentMerkleTreeError::TreeFull)
));
}
/// Tests whether performing enough updates to overfill the changelog and root
/// buffer results in graceful reset of the counters.
fn overfill_changelog_and_roots<H>()
where
H: Hasher,
{
const HEIGHT: usize = 2;
const CHANGELOG: usize = 6;
const ROOTS: usize = 8;
const CANOPY: usize = 0;
// Our implementation of concurrent Merkle tree.
let mut merkle_tree =
ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap();
merkle_tree.init().unwrap();
// Reference implementation of Merkle tree which Solana Labs uses for
// testing (and therefore, we as well). We use it mostly to get the Merkle
// proofs.
let mut reference_tree = light_merkle_tree_reference::MerkleTree::<H>::new(HEIGHT, CANOPY);
let mut rng = thread_rng();
// Fill up the tree, producing 4 roots and changelog entries.
for _ in 0..(1 << HEIGHT) {
let leaf: [u8; 32] = Fr::rand(&mut rng)
.into_bigint()
.to_bytes_be()
.try_into()
.unwrap();
merkle_tree.append(&leaf).unwrap();
reference_tree.append(&leaf).unwrap();
}
assert_eq!(merkle_tree.changelog.last_index(), 4);
assert_eq!(merkle_tree.roots.last_index(), 4);
// Update 2 leaves to fill up the changelog. Its counter should reach the
// modulus and get reset.
for i in 0..2 {
let new_leaf: [u8; 32] = Fr::rand(&mut rng)
.into_bigint()
.to_bytes_be()
.try_into()
.unwrap();
let changelog_index = merkle_tree.changelog_index();
let old_leaf = reference_tree.get_leaf(i);
let mut proof = reference_tree.get_proof_of_leaf(i, false).unwrap();
merkle_tree
.update(changelog_index, &old_leaf, &new_leaf, i, &mut proof)
.unwrap();
reference_tree.update(&new_leaf, i).unwrap();
}
assert_eq!(merkle_tree.changelog.last_index(), 0);
assert_eq!(merkle_tree.roots.last_index(), 6);
// Update another 2 leaves to fill up the root. Its counter should reach
// the modulus and get reset. The previously reset counter should get
// incremented.
for i in 0..2 {
let new_leaf: [u8; 32] = Fr::rand(&mut rng)
.into_bigint()
.to_bytes_be()
.try_into()
.unwrap();
let changelog_index = merkle_tree.changelog_index();
let old_leaf = reference_tree.get_leaf(i);
let mut proof = reference_tree.get_proof_of_leaf(i, false).unwrap();
merkle_tree
.update(changelog_index, &old_leaf, &new_leaf, i, &mut proof)
.unwrap();
reference_tree.update(&new_leaf, i).unwrap();
}
assert_eq!(merkle_tree.changelog.last_index(), 2);
assert_eq!(merkle_tree.roots.last_index(), 0);
// The latter updates should keep incrementing the counters.
for i in 0..3 {
let new_leaf: [u8; 32] = Fr::rand(&mut rng)
.into_bigint()
.to_bytes_be()
.try_into()
.unwrap();
let changelog_index = merkle_tree.changelog_index();
let old_leaf = reference_tree.get_leaf(i);
let mut proof = reference_tree.get_proof_of_leaf(i, false).unwrap();
merkle_tree
.update(changelog_index, &old_leaf, &new_leaf, i, &mut proof)
.unwrap();
reference_tree.update(&new_leaf, i).unwrap();
}
assert_eq!(merkle_tree.changelog.last_index(), 5);
assert_eq!(merkle_tree.roots.last_index(), 3);
}
/// Checks whether `append_batch` is compatible with equivalent multiple
/// appends.
fn compat_batch<H, const HEIGHT: usize, const CANOPY: usize>()
where
H: Hasher,
{
const CHANGELOG: usize = 64;
const ROOTS: usize = 256;
let mut rng = thread_rng();
let batch_limit = cmp::min(1 << HEIGHT, CHANGELOG);
for batch_size in 1..batch_limit {
let mut concurrent_mt_1 =
ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap();
concurrent_mt_1.init().unwrap();
// Tree to which are going to append single leaves.
let mut concurrent_mt_2 =
ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap();
concurrent_mt_2.init().unwrap();
// Reference tree for checking the correctness of proofs.
let mut reference_mt = light_merkle_tree_reference::MerkleTree::<H>::new(HEIGHT, CANOPY);
let leaves: Vec<[u8; 32]> = (0..batch_size)
.map(|_| {
Fr::rand(&mut rng)
.into_bigint()
.to_bytes_be()
.try_into()
.unwrap()
})
.collect();
let leaves: Vec<&[u8; 32]> = leaves.iter().collect();
// Append leaves to all Merkle tree implementations.
// Batch append.
concurrent_mt_1.append_batch(leaves.as_slice()).unwrap();
// Singular appends.
for leaf in leaves.iter() {
concurrent_mt_2.append(leaf).unwrap();
}
// Singular appends to reference MT.
for leaf in leaves.iter() {
reference_mt.append(leaf).unwrap();
}
// Check whether roots are the same.
// Skip roots which are an output of singular, non-terminal
// appends - we don't compute them in batch appends and instead,
// emit a "zero root" (just to appease the clients assuming that
// root index is equal to sequence number).
assert_eq!(
concurrent_mt_1
.roots
.iter()
.step_by(batch_size)
.collect::<Vec<_>>()
.as_slice(),
concurrent_mt_2
.roots
.iter()
.step_by(batch_size)
.collect::<Vec<_>>()
.as_slice()
);
assert_eq!(concurrent_mt_1.root(), reference_mt.root());
assert_eq!(concurrent_mt_2.root(), reference_mt.root());
}
}
fn batch_greater_than_changelog<H, const HEIGHT: usize, const CANOPY: usize>()
where
H: Hasher,
{
const CHANGELOG: usize = 64;
const ROOTS: usize = 256;
let mut rng = thread_rng();
let mut concurrent_mt =
ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap();
concurrent_mt.init().unwrap();
for batch_size in (CHANGELOG + 1)..(1 << HEIGHT) {
let leaves: Vec<[u8; 32]> = (0..batch_size)
.map(|_| {
Fr::rand(&mut rng)
.into_bigint()
.to_bytes_be()
.try_into()
.unwrap()
})
.collect();
let leaves: Vec<&[u8; 32]> = leaves.iter().collect();
assert!(matches!(
concurrent_mt.append_batch(leaves.as_slice()),
Err(ConcurrentMerkleTreeError::BatchGreaterThanChangelog(_, _)),
));
}
}
fn compat_canopy<H, const HEIGHT: usize>()
where
H: Hasher,
{
const CHANGELOG: usize = 64;
const ROOTS: usize = 256;
let mut rng = thread_rng();
for canopy_depth in 1..(HEIGHT + 1) {
let batch_limit = cmp::min(1 << HEIGHT, CHANGELOG);
for batch_size in 1..batch_limit {
let mut concurrent_mt_with_canopy =
ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, canopy_depth)
.unwrap();
concurrent_mt_with_canopy.init().unwrap();
let mut concurrent_mt_without_canopy =
ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, 0).unwrap();
concurrent_mt_without_canopy.init().unwrap();
let mut reference_mt_with_canopy =
light_merkle_tree_reference::MerkleTree::<H>::new(HEIGHT, canopy_depth);
let mut reference_mt_without_canopy =
light_merkle_tree_reference::MerkleTree::<H>::new(HEIGHT, 0);
for batch_i in 0..((1 << HEIGHT) / batch_size) {
let leaves: Vec<[u8; 32]> = (0..batch_size)
.map(|_| {
Fr::rand(&mut rng)
.into_bigint()
.to_bytes_be()
.try_into()
.unwrap()
})
.collect();
let leaves: Vec<&[u8; 32]> = leaves.iter().collect();
concurrent_mt_with_canopy
.append_batch(leaves.as_slice())
.unwrap();
concurrent_mt_without_canopy
.append_batch(leaves.as_slice())
.unwrap();
for leaf in leaves {
reference_mt_with_canopy.append(leaf).unwrap();
reference_mt_without_canopy.append(leaf).unwrap();
}
for leaf_i in 0..batch_size {
let leaf_index = (batch_i * batch_size) + leaf_i;
let mut proof_with_canopy = reference_mt_with_canopy
.get_proof_of_leaf(leaf_index, false)
.unwrap();
let proof_without_canopy = reference_mt_without_canopy
.get_proof_of_leaf(leaf_index, true)
.unwrap();
assert_eq!(
proof_with_canopy[..],
proof_without_canopy[..HEIGHT - canopy_depth]
);
concurrent_mt_with_canopy
.update_proof_from_canopy(leaf_index, &mut proof_with_canopy)
.unwrap();
assert_eq!(proof_with_canopy, proof_without_canopy)
}
}
}
}
}
#[test]
fn test_append_keccak_canopy_0() {
append::<Keccak, 0>()
}
#[test]
fn test_append_poseidon_canopy_0() {
append::<Poseidon, 0>()
}
#[test]
fn test_append_sha256_canopy_0() {
append::<Sha256, 0>()
}
#[test]
fn test_append_with_proof_keccak_4_16_16_0_16() {
append_with_proof::<Keccak, 4, 16, 16, 0, 16>()
}
#[test]
fn test_append_with_proof_poseidon_4_16_16_0_16() {
append_with_proof::<Poseidon, 4, 16, 16, 0, 16>()
}
#[test]
fn test_append_with_proof_sha256_4_16_16_0_16() {
append_with_proof::<Sha256, 4, 16, 16, 0, 16>()
}
#[test]
fn test_append_with_proof_keccak_26_1400_2800_0_200() {
append_with_proof::<Keccak, 26, 1400, 2800, 0, 200>()
}
#[test]
fn test_append_with_proof_poseidon_26_1400_2800_0_200() {
append_with_proof::<Poseidon, 26, 1400, 2800, 0, 200>()
}
#[test]
fn test_append_with_proof_sha256_26_1400_2800_0_200() {
append_with_proof::<Sha256, 26, 1400, 2800, 0, 200>()
}
#[test]
fn test_append_with_proof_keccak_26_1400_2800_10_200() {
append_with_proof::<Keccak, 26, 1400, 2800, 10, 200>()
}
#[test]
fn test_append_with_proof_poseidon_26_1400_2800_10_200() {
append_with_proof::<Poseidon, 26, 1400, 2800, 10, 200>()
}
#[test]
fn test_append_with_proof_sha256_26_1400_2800_10_200() {
append_with_proof::<Sha256, 26, 1400, 2800, 10, 200>()
}
#[test]
fn test_update_keccak_height_4_changelog_1_roots_256_canopy_0() {
update::<Keccak, 1, 256, 0>()
}
#[test]
fn test_update_keccak_height_4_changelog_1_roots_256_canopy_1() {
update::<Keccak, 1, 256, 1>()
}
#[test]
fn test_update_keccak_height_4_changelog_1_roots_256_canopy_2() {
update::<Keccak, 1, 256, 2>()
}
#[test]
fn test_update_keccak_height_4_changelog_32_roots_256_canopy_0() {
update::<Keccak, 32, 256, 0>()
}
#[test]
fn test_update_keccak_height_4_changelog_32_roots_256_canopy_1() {
update::<Keccak, 32, 256, 1>()
}
#[test]
fn test_update_keccak_height_4_changelog_32_roots_256_canopy_2() {
update::<Keccak, 32, 256, 2>()
}
#[test]
fn test_update_poseidon_height_4_changelog_1_roots_256_canopy_0() {
update::<Poseidon, 1, 256, 0>()
}
#[test]
fn test_update_poseidon_height_4_changelog_1_roots_256_canopy_1() {
update::<Poseidon, 1, 256, 1>()
}
#[test]
fn test_update_poseidon_height_4_changelog_1_roots_256_canopy_2() {
update::<Poseidon, 1, 256, 2>()
}
#[test]
fn test_update_poseidon_height_4_changelog_32_roots_256_canopy_0() {
update::<Poseidon, 32, 256, 0>()
}
#[test]
fn test_update_poseidon_height_4_changelog_32_roots_256_canopy_1() {
update::<Poseidon, 32, 256, 1>()
}
#[test]
fn test_update_poseidon_height_4_changelog_32_roots_256_canopy_2() {
update::<Poseidon, 32, 256, 2>()
}
#[test]
fn test_update_sha256_height_4_changelog_32_roots_256_canopy_0() {
update::<Sha256, 32, 256, 0>()
}
#[test]
fn test_update_sha256_height_4_changelog_32_roots_256_canopy_1() {
update::<Sha256, 32, 256, 0>()
}
#[test]
fn test_update_sha256_height_4_changelog_32_roots_256_canopy_2() {
update::<Sha256, 32, 256, 0>()
}
#[test]
fn test_overfill_tree_keccak() {
overfill_tree::<Keccak>()
}
#[test]
fn test_overfill_tree_poseidon() {
overfill_tree::<Poseidon>()
}
#[test]
fn test_overfill_tree_sha256() {
overfill_tree::<Sha256>()
}
#[test]
fn test_overfill_changelog_keccak() {
overfill_changelog_and_roots::<Keccak>()
}
#[test]
fn test_compat_batch_keccak_8_canopy_0() {
const HEIGHT: usize = 8;
const CANOPY: usize = 0;
compat_batch::<Keccak, HEIGHT, CANOPY>()
}
#[test]
fn test_compat_batch_poseidon_3_canopy_0() {
const HEIGHT: usize = 3;
const CANOPY: usize = 0;
compat_batch::<Poseidon, HEIGHT, CANOPY>()
}
#[test]
fn test_compat_batch_poseidon_6_canopy_0() {
const HEIGHT: usize = 6;
const CANOPY: usize = 0;
compat_batch::<Poseidon, HEIGHT, CANOPY>()
}
#[test]
fn test_compat_batch_sha256_8_canopy_0() {
const HEIGHT: usize = 8;
const CANOPY: usize = 0;
compat_batch::<Sha256, HEIGHT, CANOPY>()
}
#[cfg(feature = "heavy-tests")]
#[test]
fn test_compat_batch_keccak_16() {
const HEIGHT: usize = 16;
const CANOPY: usize = 0;
compat_batch::<Keccak, HEIGHT, CANOPY>()
}
#[cfg(feature = "heavy-tests")]
#[test]
fn test_compat_batch_poseidon_16() {
const HEIGHT: usize = 16;
const CANOPY: usize = 0;
compat_batch::<Poseidon, HEIGHT, CANOPY>()
}
#[cfg(feature = "heavy-tests")]
#[test]
fn test_compat_batch_sha256_16() {
const HEIGHT: usize = 16;
const CANOPY: usize = 0;
compat_batch::<Sha256, HEIGHT, CANOPY>()
}
#[test]
fn test_batch_greater_than_changelog_keccak_8_canopy_0() {
const HEIGHT: usize = 8;
const CANOPY: usize = 0;
batch_greater_than_changelog::<Keccak, HEIGHT, CANOPY>()
}
#[test]
fn test_batch_greater_than_changelog_poseidon_8_canopy_0() {
const HEIGHT: usize = 8;
const CANOPY: usize = 0;
batch_greater_than_changelog::<Poseidon, HEIGHT, CANOPY>()
}
#[test]
fn test_batch_greater_than_changelog_sha256_8_canopy_0() {
const HEIGHT: usize = 8;
const CANOPY: usize = 0;
batch_greater_than_changelog::<Sha256, HEIGHT, CANOPY>()
}
#[test]
fn test_batch_greater_than_changelog_keccak_8_canopy_4() {
const HEIGHT: usize = 8;
const CANOPY: usize = 4;
batch_greater_than_changelog::<Keccak, HEIGHT, CANOPY>()
}
#[test]
fn test_batch_greater_than_changelog_poseidon_6_canopy_3() {
const HEIGHT: usize = 6;
const CANOPY: usize = 3;
batch_greater_than_changelog::<Poseidon, HEIGHT, CANOPY>()
}
#[test]
fn test_batch_greater_than_changelog_sha256_8_canopy_4() {
const HEIGHT: usize = 8;
const CANOPY: usize = 4;
batch_greater_than_changelog::<Sha256, HEIGHT, CANOPY>()
}
#[test]
fn test_compat_canopy_keccak_8() {
const HEIGHT: usize = 8;
compat_canopy::<Keccak, HEIGHT>()
}
#[test]
fn test_compat_canopy_poseidon_6() {
const HEIGHT: usize = 6;
compat_canopy::<Poseidon, HEIGHT>()
}
#[cfg(feature = "heavy-tests")]
#[test]
fn test_compat_canopy_poseidon_26() {
const HEIGHT: usize = 26;
compat_canopy::<Poseidon, HEIGHT>()
}
#[test]
fn test_compat_canopy_sha256_8() {
const HEIGHT: usize = 8;
compat_canopy::<Sha256, HEIGHT>()
}
/// Compares the internal fields of concurrent Merkle tree implementations, to
/// ensure their consistency.
fn compare_trees<H, const HEIGHT: usize, const MAX_ROOTS: usize>(
concurrent_mt: &ConcurrentMerkleTree<H, HEIGHT>,
spl_concurrent_mt: &spl_concurrent_merkle_tree::concurrent_merkle_tree::ConcurrentMerkleTree<
HEIGHT,
MAX_ROOTS,
>,
) where
H: Hasher,
{
for i in 0..concurrent_mt.changelog.len() {
let changelog_entry = concurrent_mt.changelog[i].clone();
let spl_changelog_entry = spl_concurrent_mt.change_logs[i];
for j in 0..HEIGHT {
let changelog_node = changelog_entry.path[j].unwrap();
let spl_changelog_node = spl_changelog_entry.path[j];
assert_eq!(changelog_node, spl_changelog_node);
}
assert_eq!(changelog_entry.index, spl_changelog_entry.index as u64);
}
assert_eq!(
concurrent_mt.changelog.last_index(),
spl_concurrent_mt.active_index as usize
);
assert_eq!(concurrent_mt.root(), spl_concurrent_mt.get_root());
for i in 0..concurrent_mt.roots.len() {
assert_eq!(
concurrent_mt.roots[i],
spl_concurrent_mt.change_logs[i].root
);
}
assert_eq!(
concurrent_mt.roots.last_index(),
spl_concurrent_mt.active_index as usize
);
assert_eq!(
concurrent_mt.next_index(),
spl_concurrent_mt.rightmost_proof.index as usize
);
assert_eq!(
concurrent_mt.rightmost_leaf(),
spl_concurrent_mt.rightmost_proof.leaf
);
}
/// Checks whether our `append` and `update` implementations are compatible
/// with `append` and `set_leaf` from `spl-concurrent-merkle-tree` crate.
#[tokio::test(flavor = "multi_thread")]
async fn test_spl_compat() {
const HEIGHT: usize = 4;
const CHANGELOG: usize = 64;
const ROOTS: usize = 256;
const CANOPY: usize = 0;
let mut rng = thread_rng();
// Our implementation of concurrent Merkle tree.
let mut concurrent_mt =
ConcurrentMerkleTree::<Keccak, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap();
concurrent_mt.init().unwrap();
// Solana Labs implementation of concurrent Merkle tree.
let mut spl_concurrent_mt = spl_concurrent_merkle_tree::concurrent_merkle_tree::ConcurrentMerkleTree::<HEIGHT, ROOTS>::new();
spl_concurrent_mt.initialize().unwrap();
// Reference implemenetation of Merkle tree which Solana Labs uses for
// testing (and therefore, we as well). We use it mostly to get the Merkle
// proofs.
let mut reference_tree = light_merkle_tree_reference::MerkleTree::<Keccak>::new(HEIGHT, CANOPY);
for i in 0..(1 << HEIGHT) {
let leaf: [u8; 32] = Fr::rand(&mut rng)
.into_bigint()
.to_bytes_be()
.try_into()
.unwrap();
concurrent_mt.append(&leaf).unwrap();
spl_concurrent_mt.append(leaf).unwrap();
reference_tree.append(&leaf).unwrap();
compare_trees(&concurrent_mt, &spl_concurrent_mt);
// For every appended leaf with index greater than 0, update the leaf 0.
// This is done in indexed Merkle trees[0] and it's a great test case
// for rightmost proof updates.
//
// [0] https://docs.aztec.network/concepts/advanced/data_structures/indexed_merkle_tree
if i > 0 {
let new_leaf: [u8; 32] = Fr::rand(&mut rng)
.into_bigint()
.to_bytes_be()
.try_into()
.unwrap();
let root = concurrent_mt.root();
let changelog_index = concurrent_mt.changelog_index();
let old_leaf = reference_tree.get_leaf(0);
let mut proof = reference_tree.get_proof_of_leaf(0, false).unwrap();
concurrent_mt
.update(changelog_index, &old_leaf, &new_leaf, 0, &mut proof)
.unwrap();
spl_concurrent_mt
.set_leaf(root, old_leaf, new_leaf, proof.as_slice(), 0 as u32)
.unwrap();
reference_tree.update(&new_leaf, 0).unwrap();
compare_trees(&concurrent_mt, &spl_concurrent_mt);
}
}
for i in 0..(1 << HEIGHT) {
let new_leaf: [u8; 32] = Fr::rand(&mut rng)
.into_bigint()
.to_bytes_be()
.try_into()
.unwrap();
let root = concurrent_mt.root();
let changelog_index = concurrent_mt.changelog_index();
let old_leaf = reference_tree.get_leaf(i);
let mut proof = reference_tree.get_proof_of_leaf(i, false).unwrap();
concurrent_mt
.update(changelog_index, &old_leaf, &new_leaf, i, &mut proof)
.unwrap();
spl_concurrent_mt
.set_leaf(root, old_leaf, new_leaf, proof.as_slice(), i as u32)
.unwrap();
reference_tree.update(&new_leaf, i).unwrap();
compare_trees(&concurrent_mt, &spl_concurrent_mt);
}
}
fn from_bytes<
H,
const HEIGHT: usize,
const CHANGELOG: usize,
const ROOTS: usize,
const CANOPY: usize,
>()
where
H: Hasher,
{
let mut bytes =
vec![
0u8;
ConcurrentMerkleTree::<H, HEIGHT>::size_in_account(HEIGHT, CHANGELOG, ROOTS, CANOPY)
];
let mut rng = thread_rng();
let mut reference_tree_1 = light_merkle_tree_reference::MerkleTree::<H>::new(HEIGHT, CANOPY);
// Vector of changelog indices after each operation.
let mut leaf_indices = CyclicBoundedVec::with_capacity(CHANGELOG);
// Vector of roots after each operation.
let mut roots = CyclicBoundedVec::with_capacity(CHANGELOG);
// Vector of merkle paths we get from the reference tree after each operation.
let mut merkle_paths = CyclicBoundedVec::with_capacity(CHANGELOG);
// Changelog is always initialized with a changelog path consisting of zero
// bytes. For consistency, we need to assert the 1st zero byte as the first
// expected leaf in the changelog.
let merkle_path = reference_tree_1.get_path_of_leaf(0, true).unwrap();
leaf_indices.push(0);
merkle_paths.push(merkle_path);
{
let mut merkle_tree =
ConcurrentMerkleTreeZeroCopyMut::<H, HEIGHT>::from_bytes_zero_copy_init(
bytes.as_mut_slice(),
HEIGHT,
CANOPY,
CHANGELOG,
ROOTS,
)
.unwrap();
merkle_tree.init().unwrap();
roots.push(merkle_tree.root());
}
let mut reference_tree_2 =
ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap();
reference_tree_2.init().unwrap();
// Try to make the tree full. After each append, update a random leaf.
// Reload the tree from bytes after each action.
for _ in 0..(1 << HEIGHT) {
// Reload the tree.
let mut merkle_tree =
ConcurrentMerkleTreeZeroCopyMut::<H, HEIGHT>::from_bytes_zero_copy_mut(
bytes.as_mut_slice(),
)
.unwrap();
// Append leaf.
let leaf: [u8; 32] = Fr::rand(&mut rng)
.into_bigint()
.to_bytes_be()
.try_into()
.unwrap();
let leaf_index = merkle_tree.next_index();
merkle_tree.append(&leaf).unwrap();
reference_tree_1.append(&leaf).unwrap();
reference_tree_2.append(&leaf).unwrap();
leaf_indices.push(leaf_index);
roots.push(merkle_tree.root());
let merkle_path = reference_tree_1.get_path_of_leaf(leaf_index, true).unwrap();
merkle_paths.push(merkle_path);
assert_eq!(
merkle_tree.filled_subtrees.iter().collect::<Vec<_>>(),
reference_tree_2.filled_subtrees.iter().collect::<Vec<_>>()
);
assert_eq!(
merkle_tree.changelog.iter().collect::<Vec<_>>(),
reference_tree_2.changelog.iter().collect::<Vec<_>>()
);
assert_eq!(
merkle_tree.roots.iter().collect::<Vec<_>>(),
reference_tree_2.roots.iter().collect::<Vec<_>>()
);
assert_eq!(
merkle_tree.canopy.iter().collect::<Vec<_>>(),
reference_tree_2.canopy.iter().collect::<Vec<_>>()
);
assert_eq!(merkle_tree.root(), reference_tree_1.root());
let changelog_entries = merkle_tree
.changelog_entries(merkle_tree.changelog.first_index())
.unwrap()
.collect::<Vec<_>>();
assert_eq!(changelog_entries.len(), merkle_paths.len() - 1);
for ((leaf_index, merkle_path), changelog_entry) in leaf_indices
.iter()
.skip(1)
.zip(merkle_paths.iter().skip(1))
.zip(changelog_entries)
{
assert_eq!(changelog_entry.index, *leaf_index as u64);
for i in 0..HEIGHT {
let changelog_node = changelog_entry.path[i].unwrap();
let path_node = merkle_path[i];
assert_eq!(changelog_node, path_node);
}
}
for (root_1, root_2) in merkle_tree.roots.iter().zip(roots.iter()) {
assert_eq!(root_1, root_2);
}
// Update random leaf.
let leaf_index = rng.gen_range(0..reference_tree_1.leaves().len());
let old_leaf = reference_tree_1.get_leaf(leaf_index);
let new_leaf: [u8; 32] = Fr::rand(&mut rng)
.into_bigint()
.to_bytes_be()
.try_into()
.unwrap();
let mut proof = reference_tree_1
.get_proof_of_leaf(leaf_index, false)
.unwrap();
let changelog_index = merkle_tree.changelog_index();
merkle_tree
.update(
changelog_index,
&old_leaf,
&new_leaf,
leaf_index,
&mut proof,
)
.unwrap();
reference_tree_1.update(&new_leaf, leaf_index).unwrap();
reference_tree_2
.update(
changelog_index,
&old_leaf,
&new_leaf,
leaf_index,
&mut proof,
)
.unwrap();
assert_eq!(merkle_tree.root(), reference_tree_1.root());
leaf_indices.push(leaf_index);
roots.push(merkle_tree.root());
let merkle_path = reference_tree_1.get_path_of_leaf(leaf_index, true).unwrap();
merkle_paths.push(merkle_path);
let changelog_entries = merkle_tree
.changelog_entries(merkle_tree.changelog.first_index())
.unwrap()
.collect::<Vec<_>>();
assert_eq!(changelog_entries.len(), merkle_paths.len() - 1);
for ((leaf_index, merkle_path), changelog_entry) in leaf_indices
.iter()
.skip(1)
.zip(merkle_paths.iter().skip(1))
.zip(changelog_entries)
{
assert_eq!(changelog_entry.index, *leaf_index as u64);
for i in 0..HEIGHT {
let changelog_node = changelog_entry.path[i].unwrap();
let path_node = merkle_path[i];
assert_eq!(changelog_node, path_node);
}
}
for (root_1, root_2) in merkle_tree.roots.iter().zip(roots.iter()) {
assert_eq!(root_1, root_2);
}
}
// Keep updating random leaves in loop.
for _ in 0..1000 {
// Reload the tree.
let mut merkle_tree =
ConcurrentMerkleTreeZeroCopyMut::<H, HEIGHT>::from_bytes_zero_copy_mut(
bytes.as_mut_slice(),
)
.unwrap();
// Update random leaf.
let leaf_index = rng.gen_range(0..reference_tree_1.leaves().len());
let old_leaf = reference_tree_1.get_leaf(leaf_index);
let new_leaf: [u8; 32] = Fr::rand(&mut rng)
.into_bigint()
.to_bytes_be()
.try_into()
.unwrap();
let mut proof = reference_tree_1
.get_proof_of_leaf(leaf_index, false)
.unwrap();
let changelog_index = merkle_tree.changelog_index();
merkle_tree
.update(
changelog_index,
&old_leaf,
&new_leaf,
leaf_index,
&mut proof,
)
.unwrap();
reference_tree_1.update(&new_leaf, leaf_index).unwrap();
reference_tree_2
.update(
changelog_index,
&old_leaf,
&new_leaf,
leaf_index,
&mut proof,
)
.unwrap();
assert_eq!(merkle_tree.root(), reference_tree_1.root());
leaf_indices.push(leaf_index);
roots.push(merkle_tree.root());
let merkle_path = reference_tree_1.get_path_of_leaf(leaf_index, true).unwrap();
merkle_paths.push(merkle_path);
let changelog_entries = merkle_tree
.changelog_entries(merkle_tree.changelog.first_index())
.unwrap()
.collect::<Vec<_>>();
assert_eq!(changelog_entries.len(), merkle_paths.len() - 1);
for ((leaf_index, merkle_path), changelog_entry) in leaf_indices
.iter()
.skip(1)
.zip(merkle_paths.iter().skip(1))
.zip(changelog_entries)
{
assert_eq!(changelog_entry.index, *leaf_index as u64);
for i in 0..HEIGHT {
let changelog_node = changelog_entry.path[i].unwrap();
let path_node = merkle_path[i];
assert_eq!(changelog_node, path_node);
}
}
for (root_1, root_2) in merkle_tree.roots.iter().zip(roots.iter()) {
assert_eq!(root_1, root_2);
}
}
}
#[test]
fn test_from_bytes_keccak_8_256_256() {
const HEIGHT: usize = 8;
const CHANGELOG: usize = 256;
const ROOTS: usize = 256;
const CANOPY: usize = 0;
from_bytes::<Keccak, HEIGHT, CHANGELOG, ROOTS, CANOPY>()
}
#[test]
fn test_from_bytes_poseidon_8_256_256() {
const HEIGHT: usize = 8;
const CHANGELOG: usize = 256;
const ROOTS: usize = 256;
const CANOPY: usize = 0;
from_bytes::<Poseidon, HEIGHT, CHANGELOG, ROOTS, CANOPY>()
}
#[test]
fn test_from_bytes_sha256_8_256_256_0() {
const HEIGHT: usize = 8;
const CHANGELOG: usize = 256;
const ROOTS: usize = 256;
const CANOPY: usize = 0;
from_bytes::<Sha256, HEIGHT, CHANGELOG, ROOTS, CANOPY>()
}
/// Tests the buffer size checks. Buffer size checks should fail any time that
/// a provided byte slice is smaller than the expected size indicated by the
/// tree metadata (height, changelog size, roots size etc.).
///
/// In case of `from_bytes_zero_copy_init`, the metadata are provided with an
/// intention of initializing them. The provided parameters influence the
/// size checks.
///
/// In case of `from_bytes_zero_copy_mut`, the metadata are read from the
/// buffer. Therefore, we end up with two phases of checks:
///
/// 1. Check of the non-dynamic fields, including the metadata structs.
/// Based on size of all non-dynamic fields of `ConcurrentMerkleTree`.
/// 2. If the check was successful, metadata are being read from the buffer.
/// 3. After reading the metadata, we check the buffer size again, now to the
/// full extent, before actually using it.
fn buffer_error<
H,
const HEIGHT: usize,
const CHANGELOG: usize,
const ROOTS: usize,
const CANOPY: usize,
>()
where
H: Hasher,
{
let valid_size =
ConcurrentMerkleTree::<H, HEIGHT>::size_in_account(HEIGHT, CHANGELOG, ROOTS, CANOPY);
// Check that `from_bytes_zero_copy_init` checks the bounds.
for invalid_size in 1..valid_size {
let mut bytes = vec![0u8; invalid_size];
let res = ConcurrentMerkleTreeZeroCopyMut::<H, HEIGHT>::from_bytes_zero_copy_init(
&mut bytes, HEIGHT, CANOPY, CHANGELOG, ROOTS,
);
assert!(matches!(
res,
Err(ConcurrentMerkleTreeError::BufferSize(_, _))
));
}
// Initialize the tree correctly.
let mut bytes = vec![0u8; valid_size];
ConcurrentMerkleTreeZeroCopyMut::<H, HEIGHT>::from_bytes_zero_copy_init(
&mut bytes, HEIGHT, CANOPY, CHANGELOG, ROOTS,
)
.unwrap();
// Check that `from_bytes_zero_copy` mut checks the bounds based on the
// metadata in already existing Merkle tree.
for invalid_size in 1..valid_size {
let bytes = &mut bytes[..invalid_size];
let res = ConcurrentMerkleTreeZeroCopyMut::<H, HEIGHT>::from_bytes_zero_copy_mut(bytes);
assert!(matches!(
res,
Err(ConcurrentMerkleTreeError::BufferSize(_, _))
));
}
}
#[test]
fn test_buffer_error_keccak_8_256_256() {
const HEIGHT: usize = 8;
const CHANGELOG: usize = 256;
const ROOTS: usize = 256;
const CANOPY: usize = 0;
buffer_error::<Keccak, HEIGHT, CHANGELOG, ROOTS, CANOPY>()
}
#[test]
fn test_buffer_error_poseidon_8_256_256() {
const HEIGHT: usize = 8;
const CHANGELOG: usize = 256;
const ROOTS: usize = 256;
const CANOPY: usize = 0;
buffer_error::<Poseidon, HEIGHT, CHANGELOG, ROOTS, CANOPY>()
}
#[test]
fn test_buffer_error_sha256_8_256_256_0() {
const HEIGHT: usize = 8;
const CHANGELOG: usize = 256;
const ROOTS: usize = 256;
const CANOPY: usize = 0;
buffer_error::<Sha256, HEIGHT, CHANGELOG, ROOTS, CANOPY>()
}
fn height_zero<H>()
where
H: Hasher,
{
const HEIGHT: usize = 0;
const CHANGELOG: usize = 256;
const ROOTS: usize = 256;
const CANOPY: usize = 0;
let res = ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY);
assert!(matches!(res, Err(ConcurrentMerkleTreeError::HeightZero)));
}
#[test]
fn test_height_zero_keccak() {
height_zero::<Keccak>()
}
#[test]
fn test_height_zero_poseidon() {
height_zero::<Poseidon>()
}
#[test]
fn test_height_zero_sha256() {
height_zero::<Sha256>()
}
fn changelog_zero<H>()
where
H: Hasher,
{
const HEIGHT: usize = 26;
const CHANGELOG: usize = 0;
const ROOTS: usize = 256;
const CANOPY: usize = 0;
let res = ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY);
assert!(matches!(res, Err(ConcurrentMerkleTreeError::ChangelogZero)));
}
#[test]
fn test_changelog_zero_keccak() {
changelog_zero::<Keccak>()
}
#[test]
fn test_changelog_zero_poseidon() {
changelog_zero::<Poseidon>()
}
#[test]
fn test_changelog_zero_sha256() {
changelog_zero::<Sha256>()
}
fn roots_zero<H>()
where
H: Hasher,
{
const HEIGHT: usize = 26;
const CHANGELOG: usize = 256;
const ROOTS: usize = 0;
const CANOPY: usize = 0;
let res = ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY);
assert!(matches!(res, Err(ConcurrentMerkleTreeError::RootsZero)));
}
#[test]
fn test_roots_zero_keccak() {
roots_zero::<Keccak>()
}
#[test]
fn test_roots_zero_poseidon() {
roots_zero::<Poseidon>()
}
#[test]
fn test_roots_zero_sha256() {
roots_zero::<Sha256>()
}
fn update_with_invalid_proof<H, const HEIGHT: usize>(
merkle_tree: &mut ConcurrentMerkleTree<H, HEIGHT>,
proof_len: usize,
) where
H: Hasher,
{
// It doesn't matter what values do we use. The proof length check
// should happend before checking its correctness.
let mut proof = BoundedVec::from_slice(vec![[5u8; 32]; proof_len].as_slice());
let res = merkle_tree.update(
merkle_tree.changelog_index(),
&H::zero_bytes()[0],
&[4u8; 32],
0,
&mut proof,
);
assert!(matches!(
res,
Err(ConcurrentMerkleTreeError::InvalidProofLength(_, _))
))
}
fn invalid_proof_len<H, const HEIGHT: usize, const CANOPY: usize>()
where
H: Hasher,
{
const CHANGELOG: usize = 256;
const ROOTS: usize = 256;
let mut merkle_tree =
ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap();
merkle_tree.init().unwrap();
// Proof sizes lower than `height - canopy`.
for proof_len in 0..(HEIGHT - CANOPY) {
update_with_invalid_proof(&mut merkle_tree, proof_len);
}
// Proof sizes greater than `height - canopy`.
for proof_len in (HEIGHT - CANOPY + 1)..256 {
update_with_invalid_proof(&mut merkle_tree, proof_len);
}
}
#[test]
fn test_invalid_proof_len_keccak_height_26_canopy_0() {
invalid_proof_len::<Keccak, 26, 0>()
}
#[test]
fn test_invalid_proof_len_keccak_height_26_canopy_10() {
invalid_proof_len::<Keccak, 26, 10>()
}
#[test]
fn test_invalid_proof_len_poseidon_height_26_canopy_0() {
invalid_proof_len::<Poseidon, 26, 0>()
}
#[test]
fn test_invalid_proof_len_poseidon_height_26_canopy_10() {
invalid_proof_len::<Poseidon, 26, 10>()
}
#[test]
fn test_invalid_proof_len_sha256_height_26_canopy_0() {
invalid_proof_len::<Sha256, 26, 0>()
}
#[test]
fn test_invalid_proof_len_sha256_height_26_canopy_10() {
invalid_proof_len::<Sha256, 26, 10>()
}
fn invalid_proof<H, const HEIGHT: usize, const CANOPY: usize>()
where
H: Hasher,
{
const CHANGELOG: usize = 256;
const ROOTS: usize = 256;
let mut merkle_tree =
ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap();
merkle_tree.init().unwrap();
let old_leaf = [5u8; 32];
merkle_tree.append(&old_leaf).unwrap();
let mut rng = thread_rng();
let mut invalid_proof = BoundedVec::with_capacity(HEIGHT);
for _ in 0..(HEIGHT - CANOPY) {
let node: [u8; 32] = Fr::rand(&mut rng)
.into_bigint()
.to_bytes_be()
.try_into()
.unwrap();
invalid_proof.push(node).unwrap();
}
let res = merkle_tree.update(
merkle_tree.changelog_index(),
&old_leaf,
&[6u8; 32],
0,
&mut invalid_proof,
);
assert!(matches!(
res,
Err(ConcurrentMerkleTreeError::InvalidProof(_, _))
));
}
#[test]
fn test_invalid_proof_keccak_height_26_canopy_0() {
invalid_proof::<Keccak, 26, 0>()
}
#[test]
fn test_invalid_proof_keccak_height_26_canopy_10() {
invalid_proof::<Keccak, 26, 10>()
}
#[test]
fn test_invalid_proof_poseidon_height_26_canopy_0() {
invalid_proof::<Poseidon, 26, 0>()
}
#[test]
fn test_invalid_proof_poseidon_height_26_canopy_10() {
invalid_proof::<Poseidon, 26, 10>()
}
#[test]
fn test_invalid_proof_sha256_height_26_canopy_0() {
invalid_proof::<Sha256, 26, 0>()
}
#[test]
fn test_invalid_proof_sha256_height_26_canopy_10() {
invalid_proof::<Sha256, 26, 10>()
}
fn update_empty<H>()
where
H: Hasher,
{
const HEIGHT: usize = 26;
const CHANGELOG: usize = 256;
const ROOTS: usize = 256;
const CANOPY: usize = 0;
let mut merkle_tree =
ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap();
merkle_tree.init().unwrap();
// Try updating all empty leaves in the empty tree.
let mut proof = BoundedVec::from_slice(&H::zero_bytes()[..HEIGHT]);
for leaf_index in 0..(1 << HEIGHT) {
let old_leaf = H::zero_bytes()[0];
let new_leaf = [5u8; 32];
let res = merkle_tree.update(
merkle_tree.changelog_index(),
&old_leaf,
&new_leaf,
leaf_index,
&mut proof,
);
assert!(matches!(
res,
Err(ConcurrentMerkleTreeError::CannotUpdateEmpty)
));
}
}
#[test]
fn test_update_empty_keccak() {
update_empty::<Keccak>()
}
#[test]
fn test_update_empty_poseidon() {
update_empty::<Poseidon>()
}
#[test]
fn test_update_empty_sha256() {
update_empty::<Sha256>()
}
fn append_empty_batch<H>()
where
H: Hasher,
{
const HEIGHT: usize = 26;
const CHANGELOG: usize = 256;
const ROOTS: usize = 256;
const CANOPY: usize = 0;
let mut merkle_tree =
ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap();
merkle_tree.init().unwrap();
let res = merkle_tree.append_batch(&[]);
assert!(matches!(res, Err(ConcurrentMerkleTreeError::EmptyLeaves)));
}
#[test]
fn test_append_empty_batch_keccak() {
append_empty_batch::<Keccak>()
}
#[test]
fn test_append_empty_batch_poseidon() {
append_empty_batch::<Poseidon>()
}
#[test]
fn test_append_empty_batch_sha256() {
append_empty_batch::<Sha256>()
}
/// Reproducible only with Poseidon. Keccak and SHA256 don't return errors, as
/// they don't operate on a prime field.
#[test]
fn hasher_error() {
const HEIGHT: usize = 26;
const CHANGELOG: usize = 256;
const ROOTS: usize = 256;
const CANOPY: usize = 0;
let mut merkle_tree =
ConcurrentMerkleTree::<Poseidon, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap();
merkle_tree.init().unwrap();
// Append a leaf which exceed the modulus.
let res = merkle_tree.append(&[255_u8; 32]);
assert!(matches!(res, Err(ConcurrentMerkleTreeError::Hasher(_))));
}
#[test]
pub fn test_100_nullify_mt() {
for iterations in 1..100 {
println!("iteration: {:?}", iterations);
let mut crank_merkle_tree =
light_merkle_tree_reference::MerkleTree::<light_hasher::Poseidon>::new(26, 10);
let mut onchain_merkle_tree =
ConcurrentMerkleTree::<Poseidon, 26>::new(26, 10, 10, 10).unwrap();
onchain_merkle_tree.init().unwrap();
assert_eq!(onchain_merkle_tree.root(), crank_merkle_tree.root());
let mut queue = HashSet::new(6857, 2400).unwrap();
let mut queue_indices = Vec::new();
for i in 1..1 + iterations {
let mut leaf = [0; 32];
leaf[31] = i as u8;
// onchain this is equivalent to append state (compressed pda program)
onchain_merkle_tree.append(&leaf).unwrap();
crank_merkle_tree.append(&leaf).unwrap();
// onchain the equivalent is nullify state (compressed pda program)
let leaf_bn = BigUint::from_be_bytes(&leaf);
queue.insert(&leaf_bn, 1).unwrap();
let (_, index) = queue.find_element(&leaf_bn, None).unwrap().unwrap();
queue_indices.push(index);
}
assert_eq!(onchain_merkle_tree.root(), crank_merkle_tree.root());
assert_eq!(
onchain_merkle_tree.canopy,
crank_merkle_tree.get_canopy().unwrap()
);
let mut rng = rand::thread_rng();
// Pick random queue indices to nullify.
let queue_indices = queue_indices
.choose_multiple(&mut rng, cmp::min(9, iterations))
.cloned()
.collect::<Vec<_>>();
let change_log_index = onchain_merkle_tree.changelog_index();
let mut nullified_leaf_indices = Vec::with_capacity(queue_indices.len());
// Nullify the leaves we picked.
for queue_index in queue_indices {
let leaf_cell = queue.get_unmarked_bucket(queue_index).unwrap().unwrap();
let leaf_index = crank_merkle_tree
.get_leaf_index(&leaf_cell.value_bytes())
.unwrap()
.clone();
let mut proof = crank_merkle_tree
.get_proof_of_leaf(leaf_index, false)
.unwrap();
onchain_merkle_tree
.update(
change_log_index,
&leaf_cell.value_bytes(),
&[0u8; 32],
leaf_index,
&mut proof,
)
.unwrap();
nullified_leaf_indices.push(leaf_index);
}
for leaf_index in nullified_leaf_indices {
crank_merkle_tree.update(&[0; 32], leaf_index).unwrap();
}
assert_eq!(onchain_merkle_tree.root(), crank_merkle_tree.root());
assert_eq!(
onchain_merkle_tree.canopy,
crank_merkle_tree.get_canopy().unwrap()
);
}
}
const LEAVES_WITH_NULLIFICATIONS: [([u8; 32], Option<usize>); 25] = [
(
[
9, 207, 75, 159, 247, 170, 46, 154, 178, 197, 60, 83, 191, 240, 137, 41, 36, 54, 242,
50, 43, 48, 56, 220, 154, 217, 138, 19, 152, 123, 86, 8,
],
None,
),
(
[
40, 10, 138, 159, 12, 188, 226, 84, 188, 92, 250, 11, 94, 240, 77, 158, 69, 219, 175,
48, 248, 181, 216, 200, 54, 38, 12, 224, 155, 40, 23, 32,
],
None,
),
(
[
11, 36, 94, 177, 195, 5, 4, 35, 75, 253, 31, 235, 68, 201, 79, 197, 199, 23, 214, 86,
196, 2, 41, 249, 246, 138, 184, 248, 245, 66, 184, 244,
],
None,
),
(
[
29, 3, 221, 195, 235, 46, 139, 171, 137, 7, 36, 118, 178, 198, 52, 20, 10, 131, 164, 5,
116, 187, 118, 186, 34, 193, 46, 6, 5, 144, 82, 4,
],
None,
),
(
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0,
],
Some(0),
),
(
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0,
],
Some(1),
),
(
[
6, 146, 149, 76, 49, 159, 84, 164, 203, 159, 181, 165, 21, 204, 111, 149, 87, 255, 46,
82, 162, 181, 99, 178, 247, 27, 166, 174, 212, 39, 163, 106,
],
None,
),
(
[
19, 135, 28, 172, 63, 129, 175, 101, 201, 97, 135, 147, 18, 78, 152, 243, 15, 154, 120,
153, 92, 46, 245, 82, 67, 32, 224, 141, 89, 149, 162, 228,
],
None,
),
(
[
4, 93, 251, 40, 246, 136, 132, 20, 175, 98, 3, 186, 159, 251, 128, 159, 219, 172, 67,
20, 69, 19, 66, 193, 232, 30, 121, 19, 193, 177, 143, 6,
],
None,
),
(
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0,
],
Some(3),
),
(
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0,
],
Some(4),
),
(
[
34, 229, 118, 4, 68, 219, 118, 228, 117, 70, 150, 93, 208, 215, 51, 243, 123, 48, 39,
228, 206, 194, 200, 232, 35, 133, 166, 222, 118, 217, 122, 228,
],
None,
),
(
[
24, 61, 159, 11, 70, 12, 177, 252, 244, 238, 130, 73, 202, 69, 102, 83, 33, 103, 82,
66, 83, 191, 149, 187, 141, 111, 253, 110, 49, 5, 47, 151,
],
None,
),
(
[
29, 239, 118, 17, 75, 98, 148, 167, 142, 190, 223, 175, 98, 255, 153, 111, 127, 169,
62, 234, 90, 89, 90, 70, 218, 161, 233, 150, 89, 173, 19, 1,
],
None,
),
(
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0,
],
Some(6),
),
(
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0,
],
Some(5),
),
(
[
45, 31, 195, 30, 201, 235, 73, 88, 57, 130, 35, 53, 202, 191, 20, 156, 125, 123, 37,
49, 154, 194, 124, 157, 198, 236, 233, 25, 195, 174, 157, 31,
],
None,
),
(
[
5, 59, 32, 123, 40, 100, 50, 132, 2, 194, 104, 95, 21, 23, 52, 56, 125, 198, 102, 210,
24, 44, 99, 255, 185, 255, 151, 249, 67, 167, 189, 85,
],
None,
),
(
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0,
],
Some(9),
),
(
[
36, 131, 231, 53, 12, 14, 62, 144, 170, 248, 90, 226, 125, 178, 99, 87, 101, 226, 179,
43, 110, 130, 233, 194, 112, 209, 74, 219, 154, 48, 41, 148,
],
None,
),
(
[
12, 110, 79, 229, 117, 215, 178, 45, 227, 65, 183, 14, 91, 45, 170, 232, 126, 71, 37,
211, 160, 77, 148, 223, 50, 144, 134, 232, 83, 159, 131, 62,
],
None,
),
(
[
28, 57, 110, 171, 41, 144, 47, 162, 132, 221, 102, 100, 30, 69, 249, 176, 87, 134, 133,
207, 250, 166, 139, 16, 73, 39, 11, 139, 158, 182, 43, 68,
],
None,
),
(
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0,
],
Some(11),
),
(
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0,
],
Some(10),
),
(
[
25, 88, 170, 121, 91, 234, 185, 213, 24, 92, 209, 146, 109, 134, 118, 242, 74, 218, 69,
28, 87, 154, 207, 86, 218, 48, 182, 206, 8, 9, 35, 240,
],
None,
),
];
/// Test correctness of subtree updates during updates.
/// The test data is a sequence of leaves with some nullifications
/// and the result of a randomized tests which has triggered subtree inconsistencies.
/// 1. Test subtree consistency with test data
/// 2. Test subtree consistency of updating the right most leaf
#[test]
fn test_subtree_updates() {
const HEIGHT: usize = 26;
let mut ref_mt =
light_merkle_tree_reference::MerkleTree::<light_hasher::Keccak>::new(HEIGHT, 0);
let mut con_mt =
light_concurrent_merkle_tree::ConcurrentMerkleTree26::<light_hasher::Keccak>::new(
HEIGHT, 1400, 2400, 0,
)
.unwrap();
let mut spl_concurrent_mt =
spl_concurrent_merkle_tree::concurrent_merkle_tree::ConcurrentMerkleTree::<HEIGHT, 256>::new();
spl_concurrent_mt.initialize().unwrap();
con_mt.init().unwrap();
assert_eq!(ref_mt.root(), con_mt.root());
for (_, leaf) in LEAVES_WITH_NULLIFICATIONS.iter().enumerate() {
match leaf.1 {
Some(index) => {
let change_log_index = con_mt.changelog_index();
let mut proof = ref_mt.get_proof_of_leaf(index, false).unwrap();
let old_leaf = ref_mt.get_leaf(index);
let current_root = con_mt.root();
spl_concurrent_mt
.set_leaf(
current_root,
old_leaf,
[0u8; 32],
proof.to_array::<HEIGHT>().unwrap().as_slice(),
index.try_into().unwrap(),
)
.unwrap();
con_mt
.update(change_log_index, &old_leaf, &[0u8; 32], index, &mut proof)
.unwrap();
ref_mt.update(&[0u8; 32], index).unwrap();
}
None => {
con_mt.append(&leaf.0).unwrap();
ref_mt.append(&leaf.0).unwrap();
spl_concurrent_mt.append(leaf.0).unwrap();
}
}
assert_eq!(spl_concurrent_mt.get_root(), ref_mt.root());
assert_eq!(spl_concurrent_mt.get_root(), con_mt.root());
assert_eq!(ref_mt.root(), con_mt.root());
}
let index = con_mt.next_index() - 1;
// test rightmost leaf edge case
let change_log_index = con_mt.changelog_index();
let mut proof = ref_mt.get_proof_of_leaf(index, false).unwrap();
let old_leaf = ref_mt.get_leaf(index);
let current_root = con_mt.root();
spl_concurrent_mt
.set_leaf(
current_root,
old_leaf,
[0u8; 32],
proof.to_array::<HEIGHT>().unwrap().as_slice(),
index.try_into().unwrap(),
)
.unwrap();
con_mt
.update(change_log_index, &old_leaf, &[0u8; 32], index, &mut proof)
.unwrap();
ref_mt.update(&[0u8; 32], index).unwrap();
assert_eq!(spl_concurrent_mt.get_root(), ref_mt.root());
assert_eq!(spl_concurrent_mt.get_root(), con_mt.root());
assert_eq!(ref_mt.root(), con_mt.root());
let leaf = [3u8; 32];
con_mt.append(&leaf).unwrap();
ref_mt.append(&leaf).unwrap();
spl_concurrent_mt.append(leaf).unwrap();
assert_eq!(spl_concurrent_mt.get_root(), ref_mt.root());
assert_eq!(spl_concurrent_mt.get_root(), con_mt.root());
assert_eq!(ref_mt.root(), con_mt.root());
}
/// Tests an update of a leaf which was modified by another updates.
fn update_already_modified_leaf<
H,
// Number of conflicting updates of the same leaf.
const CONFLICTS: usize,
// Number of appends of random leaves before submitting the conflicting
// updates.
const RANDOM_APPENDS_BEFORE_CONFLICTS: usize,
// Number of appends of random leaves after every single conflicting
// update.
const RANDOM_APPENDS_AFTER_EACH_CONFLICT: usize,
>()
where
H: Hasher,
{
const HEIGHT: usize = 26;
const MAX_CHANGELOG: usize = 8;
const MAX_ROOTS: usize = 8;
const CANOPY: usize = 0;
let mut merkle_tree =
ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, MAX_CHANGELOG, MAX_ROOTS, CANOPY).unwrap();
merkle_tree.init().unwrap();
let mut reference_tree = light_merkle_tree_reference::MerkleTree::<H>::new(HEIGHT, CANOPY);
let mut rng = thread_rng();
// Create tree with a single leaf.
let first_leaf: [u8; 32] = Fr::rand(&mut rng)
.into_bigint()
.to_bytes_be()
.try_into()
.unwrap();
merkle_tree.append(&first_leaf).unwrap();
reference_tree.append(&first_leaf).unwrap();
// Save a proof of the first append.
let outdated_changelog_index = merkle_tree.changelog_index();
let mut outdated_proof = reference_tree.get_proof_of_leaf(0, false).unwrap().clone();
let mut old_leaf = first_leaf;
for _ in 0..CONFLICTS {
// Update leaf. Always use an up-to-date proof.
let mut up_to_date_proof = reference_tree.get_proof_of_leaf(0, false).unwrap();
let new_leaf = Fr::rand(&mut rng)
.into_bigint()
.to_bytes_be()
.try_into()
.unwrap();
merkle_tree
.update(
merkle_tree.changelog_index(),
&old_leaf,
&new_leaf,
0,
&mut up_to_date_proof,
)
.unwrap();
reference_tree.update(&new_leaf, 0).unwrap();
old_leaf = new_leaf;
assert_eq!(merkle_tree.root(), reference_tree.root());
}
// Update leaf. This time, try using an outdated proof.
let new_leaf = Fr::rand(&mut rng)
.into_bigint()
.to_bytes_be()
.try_into()
.unwrap();
let res = merkle_tree.update(
outdated_changelog_index,
&first_leaf,
&new_leaf,
0,
&mut outdated_proof,
);
assert!(matches!(
res,
Err(ConcurrentMerkleTreeError::CannotUpdateLeaf)
));
}
#[test]
fn test_update_already_modified_leaf_keccak_1_0_0() {
update_already_modified_leaf::<Keccak, 1, 0, 0>()
}
#[test]
fn test_update_already_modified_leaf_poseidon_1_0_0() {
update_already_modified_leaf::<Poseidon, 1, 0, 0>()
}
#[test]
fn test_update_already_modified_leaf_sha256_1_0_0() {
update_already_modified_leaf::<Sha256, 1, 0, 0>()
}
#[test]
fn test_update_already_modified_leaf_keccak_1_1_1() {
update_already_modified_leaf::<Keccak, 1, 1, 1>()
}
#[test]
fn test_update_already_modified_leaf_poseidon_1_1_1() {
update_already_modified_leaf::<Poseidon, 1, 1, 1>()
}
#[test]
fn test_update_already_modified_leaf_sha256_1_1_1() {
update_already_modified_leaf::<Sha256, 1, 1, 1>()
}
#[test]
fn test_update_already_modified_leaf_keccak_1_2_2() {
update_already_modified_leaf::<Keccak, 1, 2, 2>()
}
#[test]
fn test_update_already_modified_leaf_poseidon_1_2_2() {
update_already_modified_leaf::<Poseidon, 1, 2, 2>()
}
#[test]
fn test_update_already_modified_leaf_sha256_1_2_2() {
update_already_modified_leaf::<Sha256, 1, 2, 2>()
}
#[test]
fn test_update_already_modified_leaf_keccak_2_0_0() {
update_already_modified_leaf::<Keccak, 2, 0, 0>()
}
#[test]
fn test_update_already_modified_leaf_poseidon_2_0_0() {
update_already_modified_leaf::<Poseidon, 2, 0, 0>()
}
#[test]
fn test_update_already_modified_leaf_sha256_2_0_0() {
update_already_modified_leaf::<Sha256, 2, 0, 0>()
}
#[test]
fn test_update_already_modified_leaf_keccak_2_1_1() {
update_already_modified_leaf::<Keccak, 2, 1, 1>()
}
#[test]
fn test_update_already_modified_leaf_poseidon_2_1_1() {
update_already_modified_leaf::<Poseidon, 2, 1, 1>()
}
#[test]
fn test_update_already_modified_leaf_sha256_2_1_1() {
update_already_modified_leaf::<Sha256, 2, 1, 1>()
}
#[test]
fn test_update_already_modified_leaf_keccak_2_2_2() {
update_already_modified_leaf::<Keccak, 2, 2, 2>()
}
#[test]
fn test_update_already_modified_leaf_poseidon_2_2_2() {
update_already_modified_leaf::<Poseidon, 2, 2, 2>()
}
#[test]
fn test_update_already_modified_leaf_sha256_2_2_2() {
update_already_modified_leaf::<Sha256, 2, 2, 2>()
}
#[test]
fn test_update_already_modified_leaf_keccak_4_0_0() {
update_already_modified_leaf::<Keccak, 4, 0, 0>()
}
#[test]
fn test_update_already_modified_leaf_poseidon_4_0_0() {
update_already_modified_leaf::<Poseidon, 4, 0, 0>()
}
#[test]
fn test_update_already_modified_leaf_sha256_4_0_0() {
update_already_modified_leaf::<Sha256, 4, 0, 0>()
}
#[test]
fn test_update_already_modified_leaf_keccak_4_1_1() {
update_already_modified_leaf::<Keccak, 4, 1, 1>()
}
#[test]
fn test_update_already_modified_leaf_poseidon_4_1_1() {
update_already_modified_leaf::<Poseidon, 4, 1, 1>()
}
#[test]
fn test_update_already_modified_leaf_sha256_4_1_1() {
update_already_modified_leaf::<Sha256, 4, 1, 1>()
}
#[test]
fn test_update_already_modified_leaf_keccak_4_4_4() {
update_already_modified_leaf::<Keccak, 4, 4, 4>()
}
#[test]
fn test_update_already_modified_leaf_poseidon_4_4_4() {
update_already_modified_leaf::<Poseidon, 4, 4, 4>()
}
#[test]
fn test_update_already_modified_leaf_sha256_4_4_4() {
update_already_modified_leaf::<Sha256, 4, 4, 4>()
}
/// Checks whether the [`changelog_entries`](ConcurrentMerkleTree::changelog_entries)
/// method returns an iterator with expected entries.
///
/// We expect the `changelog_entries` method to return an iterator with entries
/// newer than the requested index.
///
/// # Examples
///
/// (In the tree) `current_index`: 1
/// (Requested) `changelog_index`: 1
/// Expected iterator: `[]` (empty)
///
/// (In the tree) `current_index`: 3
/// (Requested) `changelog_index`: 1
/// Expected iterator: `[2, 3]` (1 is skipped)
///
/// Changelog capacity: 12
/// (In the tree) `current_index`: 9
/// (Requested) `changelog_index`: 3 (lowed than `current_index`, because the
/// changelog is full and started overwriting values from the head)
/// Expected iterator: `[10, 11, 12, 13, 14, 15]` (9 is skipped)
fn changelog_entries<H>()
where
H: Hasher,
{
const HEIGHT: usize = 26;
const CHANGELOG: usize = 12;
const ROOTS: usize = 16;
const CANOPY: usize = 0;
let mut merkle_tree =
ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap();
merkle_tree.init().unwrap();
merkle_tree
.append(&[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1,
])
.unwrap();
let changelog_entries = merkle_tree
.changelog_entries(1)
.unwrap()
.collect::<Vec<_>>();
assert!(changelog_entries.is_empty());
// Try getting changelog entries out of bounds.
for start in merkle_tree.changelog.len()..1000 {
let changelog_entries = merkle_tree.changelog_entries(start);
assert!(matches!(
changelog_entries,
Err(ConcurrentMerkleTreeError::BoundedVec(
BoundedVecError::IterFromOutOfBounds
))
));
}
merkle_tree
.append(&[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 2,
])
.unwrap();
merkle_tree
.append(&[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 3,
])
.unwrap();
let changelog_leaves = merkle_tree
.changelog_entries(1)
.unwrap()
.map(|changelog_entry| changelog_entry.path[0])
.collect::<Vec<_>>();
assert_eq!(
changelog_leaves.as_slice(),
&[
Some([
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 2
]),
Some([
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 3
])
]
);
// Try getting changelog entries out of bounds.
for start in merkle_tree.changelog.len()..1000 {
let changelog_entries = merkle_tree.changelog_entries(start);
assert!(matches!(
changelog_entries,
Err(ConcurrentMerkleTreeError::BoundedVec(
BoundedVecError::IterFromOutOfBounds
))
));
}
for i in 4_u8..16_u8 {
merkle_tree
.append(&[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, i,
])
.unwrap();
}
let changelog_leaves = merkle_tree
.changelog_entries(9)
.unwrap()
.map(|changelog_entry| changelog_entry.path[0])
.collect::<Vec<_>>();
assert_eq!(
changelog_leaves.as_slice(),
&[
Some([
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 10
]),
Some([
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 11
]),
Some([
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 12
]),
Some([
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 13
]),
Some([
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 14
]),
Some([
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 15
])
]
);
// Try getting changelog entries out of bounds.
for start in merkle_tree.changelog.len()..1000 {
let changelog_entries = merkle_tree.changelog_entries(start);
assert!(matches!(
changelog_entries,
Err(ConcurrentMerkleTreeError::BoundedVec(
BoundedVecError::IterFromOutOfBounds
))
));
}
}
#[test]
fn changelog_entries_keccak() {
changelog_entries::<Keccak>()
}
#[test]
fn changelog_entries_poseidon() {
changelog_entries::<Poseidon>()
}
#[test]
fn changelog_entries_sha256() {
changelog_entries::<Sha256>()
}
/// Checks whether the [`changelog_entries`](ConcurrentMerkleTree::changelog_entries)
/// method returns an iterator with expected entries.
///
/// It tests random insertions and updates and checks the consistency of leaves
/// (`path[0]`) in changelogs.
fn changelog_entries_random<
H,
const HEIGHT: usize,
const CHANGELOG: usize,
const ROOTS: usize,
const CANOPY: usize,
>()
where
H: Hasher,
{
let mut merkle_tree =
ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap();
merkle_tree.init().unwrap();
let mut reference_tree = light_merkle_tree_reference::MerkleTree::<H>::new(HEIGHT, CANOPY);
let mut rng = thread_rng();
let changelog_entries = merkle_tree
.changelog_entries(0)
.unwrap()
.collect::<Vec<_>>();
assert!(changelog_entries.is_empty());
// Requesting changelog entries starting from the current `changelog_index()`
// should always return an empty iterator.
let changelog_entries = merkle_tree
.changelog_entries(merkle_tree.changelog_index())
.unwrap()
.collect::<Vec<_>>();
assert!(changelog_entries.is_empty());
// Vector of changelog indices after each operation.
let mut leaf_indices = CyclicBoundedVec::with_capacity(CHANGELOG);
// Vector of roots after each operation.
let mut roots = CyclicBoundedVec::with_capacity(CHANGELOG);
// Vector of merkle paths we get from the reference tree after each operation.
let mut merkle_paths = CyclicBoundedVec::with_capacity(CHANGELOG);
// Changelog is always initialized with a changelog path consisting of zero
// bytes. For consistency, we need to assert the 1st zero byte as the first
// expected leaf in the changelog.
let merkle_path = reference_tree.get_path_of_leaf(0, true).unwrap();
leaf_indices.push(0);
merkle_paths.push(merkle_path);
roots.push(merkle_tree.root());
for _ in 0..1000 {
// Append random leaf.
let leaf: [u8; 32] = Fr::rand(&mut rng)
.into_bigint()
.to_bytes_be()
.try_into()
.unwrap();
let leaf_index = merkle_tree.next_index();
merkle_tree.append(&leaf).unwrap();
reference_tree.append(&leaf).unwrap();
leaf_indices.push(leaf_index);
roots.push(merkle_tree.root());
let merkle_path = reference_tree.get_path_of_leaf(leaf_index, true).unwrap();
merkle_paths.push(merkle_path);
let changelog_entries = merkle_tree
.changelog_entries(merkle_tree.changelog.first_index())
.unwrap()
.collect::<Vec<_>>();
assert_eq!(changelog_entries.len(), merkle_paths.len() - 1);
for ((leaf_index, merkle_path), changelog_entry) in leaf_indices
.iter()
.skip(1)
.zip(merkle_paths.iter().skip(1))
.zip(changelog_entries)
{
assert_eq!(changelog_entry.index, *leaf_index as u64);
for i in 0..HEIGHT {
let changelog_node = changelog_entry.path[i].unwrap();
let path_node = merkle_path[i];
assert_eq!(changelog_node, path_node);
}
}
// Requesting changelog entries starting from the current `changelog_index()`
// should always return an empty iterator.
let changelog_entries = merkle_tree
.changelog_entries(merkle_tree.changelog_index())
.unwrap()
.collect::<Vec<_>>();
assert!(changelog_entries.is_empty());
// Update random leaf.
let leaf_index = rng.gen_range(0..reference_tree.leaves().len());
let old_leaf = reference_tree.get_leaf(leaf_index);
let new_leaf: [u8; 32] = Fr::rand(&mut rng)
.into_bigint()
.to_bytes_be()
.try_into()
.unwrap();
let mut proof = reference_tree.get_proof_of_leaf(leaf_index, false).unwrap();
merkle_tree
.update(
merkle_tree.changelog_index(),
&old_leaf,
&new_leaf,
leaf_index,
&mut proof,
)
.unwrap();
reference_tree.update(&new_leaf, leaf_index).unwrap();
leaf_indices.push(leaf_index);
roots.push(merkle_tree.root());
let merkle_path = reference_tree.get_path_of_leaf(leaf_index, true).unwrap();
merkle_paths.push(merkle_path);
let changelog_entries = merkle_tree
.changelog_entries(merkle_tree.changelog.first_index())
.unwrap()
.collect::<Vec<_>>();
assert_eq!(changelog_entries.len(), merkle_paths.len() - 1);
for ((leaf_index, merkle_path), changelog_entry) in leaf_indices
.iter()
.skip(1)
.zip(merkle_paths.iter().skip(1))
.zip(changelog_entries)
{
assert_eq!(changelog_entry.index, *leaf_index as u64);
for i in 0..HEIGHT {
let changelog_node = changelog_entry.path[i].unwrap();
let path_node = merkle_path[i];
assert_eq!(changelog_node, path_node);
}
}
// Requesting changelog entries starting from the current `changelog_index()`
// should always return an empty iterator.
let changelog_entries = merkle_tree
.changelog_entries(merkle_tree.changelog_index())
.unwrap()
.collect::<Vec<_>>();
assert!(changelog_entries.is_empty());
}
}
#[test]
fn test_changelog_entries_random_keccak_26_256_256_0() {
const HEIGHT: usize = 26;
const CHANGELOG: usize = 256;
const ROOTS: usize = 256;
const CANOPY: usize = 0;
changelog_entries_random::<Keccak, HEIGHT, CHANGELOG, ROOTS, CANOPY>()
}
#[test]
fn test_changelog_entries_random_keccak_26_256_256_10() {
const HEIGHT: usize = 26;
const CHANGELOG: usize = 256;
const ROOTS: usize = 256;
const CANOPY: usize = 10;
changelog_entries_random::<Keccak, HEIGHT, CHANGELOG, ROOTS, CANOPY>()
}
#[test]
fn test_changelog_entries_random_poseidon_26_256_256_0() {
const HEIGHT: usize = 26;
const CHANGELOG: usize = 256;
const ROOTS: usize = 256;
const CANOPY: usize = 0;
changelog_entries_random::<Poseidon, HEIGHT, CHANGELOG, ROOTS, CANOPY>()
}
#[test]
fn test_changelog_entries_random_poseidon_26_256_256_10() {
const HEIGHT: usize = 26;
const CHANGELOG: usize = 256;
const ROOTS: usize = 256;
const CANOPY: usize = 10;
changelog_entries_random::<Poseidon, HEIGHT, CHANGELOG, ROOTS, CANOPY>()
}
#[test]
fn test_changelog_entries_random_sha256_26_256_256_0() {
const HEIGHT: usize = 26;
const CHANGELOG: usize = 256;
const ROOTS: usize = 256;
const CANOPY: usize = 0;
changelog_entries_random::<Sha256, HEIGHT, CHANGELOG, ROOTS, CANOPY>()
}
#[test]
fn test_changelog_entries_random_sha256_26_256_256_10() {
const HEIGHT: usize = 26;
const CHANGELOG: usize = 256;
const ROOTS: usize = 256;
const CANOPY: usize = 10;
changelog_entries_random::<Sha256, HEIGHT, CHANGELOG, ROOTS, CANOPY>()
}
/// When reading the tests above (`changelog_entries`, `changelog_entries_random`)
/// you might be still wondering why is skipping the **current** changelog element
/// necessary.
///
/// The explanation is that not skipping the current element might produce leaf
/// conflicts. Imagine that we insert a leaf and then we try to immediately update
/// it. Starting the iteration
///
/// This test reproduces that case and serves as a proof that skipping is the
/// right action.
fn changelog_iteration_without_skipping<
H,
const HEIGHT: usize,
const CHANGELOG: usize,
const ROOTS: usize,
const CANOPY: usize,
>()
where
H: Hasher,
{
/// A broken re-implementation of `ConcurrentMerkleTree::update_proof_from_changelog`
/// which reproduces the described issue.
fn update_proof_from_changelog<H, const HEIGHT: usize>(
merkle_tree: &ConcurrentMerkleTree<H, HEIGHT>,
changelog_index: usize,
leaf_index: usize,
proof: &mut BoundedVec<[u8; 32]>,
) -> Result<(), ConcurrentMerkleTreeError>
where
H: Hasher,
{
for changelog_entry in merkle_tree.changelog.iter_from(changelog_index).unwrap() {
changelog_entry.update_proof(leaf_index, proof)?;
}
Ok(())
}
let mut merkle_tree =
ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap();
merkle_tree.init().unwrap();
let mut reference_tree = light_merkle_tree_reference::MerkleTree::<H>::new(HEIGHT, CANOPY);
let mut rng = thread_rng();
let leaf: [u8; 32] = Fr::rand(&mut rng)
.into_bigint()
.to_bytes_be()
.try_into()
.unwrap();
merkle_tree.append(&leaf).unwrap();
reference_tree.append(&leaf).unwrap();
let mut proof = reference_tree.get_proof_of_leaf(0, false).unwrap();
let res =
update_proof_from_changelog(&merkle_tree, merkle_tree.changelog_index(), 0, &mut proof);
assert!(matches!(
res,
Err(ConcurrentMerkleTreeError::CannotUpdateLeaf)
));
}
#[test]
fn test_changelog_interation_without_skipping_keccak_26_16_16_0() {
const HEIGHT: usize = 26;
const CHANGELOG: usize = 16;
const ROOTS: usize = 16;
const CANOPY: usize = 0;
changelog_iteration_without_skipping::<Keccak, HEIGHT, CHANGELOG, ROOTS, CANOPY>()
}
#[test]
fn test_changelog_interation_without_skipping_poseidon_26_16_16_0() {
const HEIGHT: usize = 26;
const CHANGELOG: usize = 16;
const ROOTS: usize = 16;
const CANOPY: usize = 0;
changelog_iteration_without_skipping::<Poseidon, HEIGHT, CHANGELOG, ROOTS, CANOPY>()
}
#[test]
fn test_changelog_interation_without_skipping_sha256_26_16_16_0() {
const HEIGHT: usize = 26;
const CHANGELOG: usize = 16;
const ROOTS: usize = 16;
const CANOPY: usize = 0;
changelog_iteration_without_skipping::<Sha256, HEIGHT, CHANGELOG, ROOTS, CANOPY>()
}
/// Tests an update with an old `changelog_index` and proof, which refers to the
/// state before the changelog wrap-around (enough new operations to overwrite
/// the whole changelog). Such an update should fail,
fn update_changelog_wrap_around<
H,
const HEIGHT: usize,
const CHANGELOG: usize,
const ROOTS: usize,
const CANOPY: usize,
>()
where
H: Hasher,
{
let mut merkle_tree =
ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap();
merkle_tree.init().unwrap();
let mut reference_tree = light_merkle_tree_reference::MerkleTree::<H>::new(HEIGHT, CANOPY);
let mut rng = thread_rng();
// The leaf which we will want to update with an expired changelog.
let leaf: [u8; 32] = Fr::rand(&mut rng)
.into_bigint()
.to_bytes_be()
.try_into()
.unwrap();
let (changelog_index, _) = merkle_tree.append(&leaf).unwrap();
reference_tree.append(&leaf).unwrap();
let mut proof = reference_tree.get_proof_of_leaf(0, false).unwrap();
// Perform enough appends and updates to overfill the changelog
for i in 0..CHANGELOG {
if i % 2 == 0 {
// Append random leaf.
let leaf: [u8; 32] = Fr::rand(&mut rng)
.into_bigint()
.to_bytes_be()
.try_into()
.unwrap();
merkle_tree.append(&leaf).unwrap();
reference_tree.append(&leaf).unwrap();
} else {
// Update random leaf.
let leaf_index = rng.gen_range(1..reference_tree.leaves().len());
let old_leaf = reference_tree.get_leaf(leaf_index);
let new_leaf: [u8; 32] = Fr::rand(&mut rng)
.into_bigint()
.to_bytes_be()
.try_into()
.unwrap();
let mut proof = reference_tree.get_proof_of_leaf(leaf_index, false).unwrap();
merkle_tree
.update(
merkle_tree.changelog_index(),
&old_leaf,
&new_leaf,
leaf_index,
&mut proof,
)
.unwrap();
reference_tree.update(&new_leaf, leaf_index).unwrap();
}
}
// Try to update the original `leaf` with an outdated proof and changelog
// index. Expect an error.
let new_leaf: [u8; 32] = Fr::rand(&mut rng)
.into_bigint()
.to_bytes_be()
.try_into()
.unwrap();
let res = merkle_tree.update(changelog_index, &leaf, &new_leaf, 0, &mut proof);
assert!(matches!(
res,
Err(ConcurrentMerkleTreeError::InvalidProof(_, _))
));
// Try to update the original `leaf` with an up-to-date proof and changelog
// index. Expect a success.
let changelog_index = merkle_tree.changelog_index();
let mut proof = reference_tree.get_proof_of_leaf(0, false).unwrap();
merkle_tree
.update(changelog_index, &leaf, &new_leaf, 0, &mut proof)
.unwrap();
}
#[test]
fn test_update_changelog_wrap_around_keccak_26_256_512_0() {
const HEIGHT: usize = 26;
const CHANGELOG: usize = 256;
const ROOTS: usize = 256;
const CANOPY: usize = 0;
update_changelog_wrap_around::<Keccak, HEIGHT, CHANGELOG, ROOTS, CANOPY>()
}
#[test]
fn test_update_changelog_wrap_around_poseidon_26_256_512_0() {
const HEIGHT: usize = 26;
const CHANGELOG: usize = 256;
const ROOTS: usize = 256;
const CANOPY: usize = 0;
update_changelog_wrap_around::<Poseidon, HEIGHT, CHANGELOG, ROOTS, CANOPY>()
}
#[test]
fn test_update_changelog_wrap_around_sha256_26_256_512_0() {
const HEIGHT: usize = 26;
const CHANGELOG: usize = 256;
const ROOTS: usize = 256;
const CANOPY: usize = 0;
update_changelog_wrap_around::<Sha256, HEIGHT, CHANGELOG, ROOTS, CANOPY>()
}
#[test]
fn test_append_batch() {
let mut tree = ConcurrentMerkleTree::<Sha256, 2>::new(2, 2, 2, 1).unwrap();
tree.init().unwrap();
let leaf_0 = [0; 32];
let leaf_1 = [1; 32];
tree.append_batch(&[&leaf_0, &leaf_1]).unwrap();
let change_log_0 = &tree
.changelog
.get(tree.changelog.first_index())
.unwrap()
.path;
let change_log_1 = &tree
.changelog
.get(tree.changelog.last_index())
.unwrap()
.path;
let path_0 = ChangelogPath([Some(leaf_0), None]);
let path_1 = ChangelogPath([
Some(leaf_1),
Some(Sha256::hashv(&[&leaf_0, &leaf_1]).unwrap()),
]);
assert_eq!(change_log_1, &path_1);
assert_eq!(change_log_0, &path_0);
}
/// Tests that updating proof with changelog entries with incomplete paths (coming
/// from batched appends) works.
#[test]
fn test_append_batch_and_update() {
let mut tree = ConcurrentMerkleTree::<Sha256, 3>::new(3, 10, 10, 0).unwrap();
tree.init().unwrap();
let mut reference_tree = light_merkle_tree_reference::MerkleTree::<Sha256>::new(3, 0);
// Append two leaves.
let leaf_0 = [0; 32];
let leaf_1 = [1; 32];
tree.append_batch(&[&leaf_0, &leaf_1]).unwrap();
reference_tree.append(&leaf_0).unwrap();
reference_tree.append(&leaf_1).unwrap();
let changelog_index = tree.changelog_index();
let mut proof_leaf_0 = reference_tree.get_proof_of_leaf(0, false).unwrap();
let mut proof_leaf_1 = reference_tree.get_proof_of_leaf(1, false).unwrap();
// Append another two leaves.
let leaf_2 = [2; 32];
let leaf_3 = [3; 32];
tree.append_batch(&[&leaf_2, &leaf_3]).unwrap();
reference_tree.append(&leaf_2).unwrap();
reference_tree.append(&leaf_3).unwrap();
let changelog_entry_leaf_2 = &tree.changelog[3];
// Make sure that the non-terminal changelog entry has `None` nodes.
assert_eq!(
changelog_entry_leaf_2.path,
ChangelogPath([Some([2; 32]), None, None])
);
let changelog_entry_leaf_3 = &tree.changelog[4];
// And that the terminal one has no `None` nodes.
assert_eq!(
changelog_entry_leaf_3.path,
ChangelogPath([
Some([3; 32]),
Some([
39, 243, 47, 187, 250, 194, 251, 187, 206, 88, 177, 7, 82, 20, 75, 90, 116, 70,
212, 185, 30, 75, 169, 15, 253, 238, 48, 94, 145, 89, 128, 232
]),
Some([
211, 95, 81, 105, 147, 137, 218, 126, 236, 124, 229, 235, 2, 100, 12, 109, 49, 140,
245, 26, 227, 158, 202, 137, 11, 188, 123, 132, 236, 181, 218, 104
])
])
);
// The tree (only the used fragment) looks like:
//
// _ H2 _
// / \
// H0 H1
// / \ / \
// L0 L1 L2 L3
// Update `leaf_0`. Expect a success.
let new_leaf_0 = [10; 32];
tree.update(changelog_index, &leaf_0, &new_leaf_0, 0, &mut proof_leaf_0)
.unwrap();
// Update `leaf_1`. Expect a success.
let new_leaf_1 = [20; 32];
tree.update(changelog_index, &leaf_1, &new_leaf_1, 1, &mut proof_leaf_1)
.unwrap();
}
/// Makes sure canopy works by:
///
/// 1. Appending 3 leaves.
/// 2. Updating the first leaf.
/// 3. Updating the second leaf.
fn update_with_canopy<H>()
where
H: Hasher,
{
let mut tree = ConcurrentMerkleTree::<H, 2>::new(2, 2, 2, 1).unwrap();
tree.init().unwrap();
let leaf_0 = [0; 32];
let leaf_1 = [1; 32];
let leaf_2 = [2; 32];
tree.append(&leaf_0).unwrap();
tree.append(&leaf_1).unwrap();
tree.append(&leaf_2).unwrap();
let old_canopy = tree.canopy.as_slice()[0].clone();
let new_leaf_0 = [1; 32];
let mut leaf_0_proof = BoundedVec::with_capacity(2);
leaf_0_proof.push(leaf_1).unwrap();
tree.update(
tree.changelog_index(),
&leaf_0,
&new_leaf_0,
0,
&mut leaf_0_proof,
)
.unwrap();
let new_canopy = tree.canopy.as_slice()[0].clone();
assert_ne!(old_canopy, new_canopy);
let new_leaf_2 = [3; 32];
let mut leaf_2_proof = BoundedVec::with_capacity(2);
leaf_2_proof.push([0; 32]).unwrap();
tree.update(
tree.changelog_index(),
&leaf_2,
&new_leaf_2,
2,
&mut leaf_2_proof,
)
.unwrap();
}
#[test]
fn test_update_with_canopy_keccak() {
update_with_canopy::<Keccak>()
}
#[test]
fn test_update_with_canopy_poseidon() {
update_with_canopy::<Poseidon>()
}
#[test]
fn test_update_with_canopy_sha256() {
update_with_canopy::<Sha256>()
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/concurrent
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/concurrent/src/copy.rs
|
use std::ops::Deref;
use light_bounded_vec::{BoundedVecMetadata, CyclicBoundedVecMetadata};
use light_hasher::Hasher;
use light_utils::offset::copy::{read_bounded_vec_at, read_cyclic_bounded_vec_at, read_value_at};
use memoffset::{offset_of, span_of};
use crate::{errors::ConcurrentMerkleTreeError, ConcurrentMerkleTree};
#[derive(Debug)]
pub struct ConcurrentMerkleTreeCopy<H, const HEIGHT: usize>(ConcurrentMerkleTree<H, HEIGHT>)
where
H: Hasher;
impl<H, const HEIGHT: usize> ConcurrentMerkleTreeCopy<H, HEIGHT>
where
H: Hasher,
{
pub fn struct_from_bytes_copy(
bytes: &[u8],
) -> Result<(ConcurrentMerkleTree<H, HEIGHT>, usize), ConcurrentMerkleTreeError> {
let expected_size = ConcurrentMerkleTree::<H, HEIGHT>::non_dyn_fields_size();
if bytes.len() < expected_size {
return Err(ConcurrentMerkleTreeError::BufferSize(
expected_size,
bytes.len(),
));
}
let height = usize::from_le_bytes(
bytes[span_of!(ConcurrentMerkleTree<H, HEIGHT>, height)]
.try_into()
.unwrap(),
);
let canopy_depth = usize::from_le_bytes(
bytes[span_of!(ConcurrentMerkleTree<H, HEIGHT>, canopy_depth)]
.try_into()
.unwrap(),
);
let mut offset = offset_of!(ConcurrentMerkleTree<H, HEIGHT>, next_index);
let next_index = unsafe { read_value_at(bytes, &mut offset) };
let sequence_number = unsafe { read_value_at(bytes, &mut offset) };
let rightmost_leaf = unsafe { read_value_at(bytes, &mut offset) };
let filled_subtrees_metadata: BoundedVecMetadata =
unsafe { read_value_at(bytes, &mut offset) };
let changelog_metadata: CyclicBoundedVecMetadata =
unsafe { read_value_at(bytes, &mut offset) };
let roots_metadata: CyclicBoundedVecMetadata = unsafe { read_value_at(bytes, &mut offset) };
let canopy_metadata: BoundedVecMetadata = unsafe { read_value_at(bytes, &mut offset) };
let expected_size = ConcurrentMerkleTree::<H, HEIGHT>::size_in_account(
height,
changelog_metadata.capacity(),
roots_metadata.capacity(),
canopy_depth,
);
if bytes.len() < expected_size {
return Err(ConcurrentMerkleTreeError::BufferSize(
expected_size,
bytes.len(),
));
}
let filled_subtrees =
unsafe { read_bounded_vec_at(bytes, &mut offset, &filled_subtrees_metadata) };
let changelog =
unsafe { read_cyclic_bounded_vec_at(bytes, &mut offset, &changelog_metadata) };
let roots = unsafe { read_cyclic_bounded_vec_at(bytes, &mut offset, &roots_metadata) };
let canopy = unsafe { read_bounded_vec_at(bytes, &mut offset, &canopy_metadata) };
let mut merkle_tree = ConcurrentMerkleTree::new(
height,
changelog_metadata.capacity(),
roots_metadata.capacity(),
canopy_depth,
)?;
// SAFETY: Tree is initialized.
unsafe {
*merkle_tree.next_index = next_index;
*merkle_tree.sequence_number = sequence_number;
*merkle_tree.rightmost_leaf = rightmost_leaf;
}
merkle_tree.filled_subtrees = filled_subtrees;
merkle_tree.changelog = changelog;
merkle_tree.roots = roots;
merkle_tree.canopy = canopy;
Ok((merkle_tree, offset))
}
pub fn from_bytes_copy(bytes: &[u8]) -> Result<Self, ConcurrentMerkleTreeError> {
let (merkle_tree, _) = Self::struct_from_bytes_copy(bytes)?;
merkle_tree.check_size_constraints()?;
Ok(Self(merkle_tree))
}
}
impl<H, const HEIGHT: usize> Deref for ConcurrentMerkleTreeCopy<H, HEIGHT>
where
H: Hasher,
{
type Target = ConcurrentMerkleTree<H, HEIGHT>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[cfg(test)]
mod test {
use crate::zero_copy::ConcurrentMerkleTreeZeroCopyMut;
use super::*;
use ark_bn254::Fr;
use ark_ff::{BigInteger, PrimeField, UniformRand};
use light_hasher::Poseidon;
use rand::{thread_rng, Rng};
fn from_bytes_copy<
const HEIGHT: usize,
const CHANGELOG: usize,
const ROOTS: usize,
const CANOPY_DEPTH: usize,
const OPERATIONS: usize,
>() {
let mut mt_1 =
ConcurrentMerkleTree::<Poseidon, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY_DEPTH)
.unwrap();
mt_1.init().unwrap();
// Create a buffer with random bytes - the `*_init` method should
// initialize the buffer gracefully and the randomness shouldn't cause
// undefined behavior.
let mut bytes = vec![
0u8;
ConcurrentMerkleTree::<Poseidon, HEIGHT>::size_in_account(
HEIGHT,
CHANGELOG,
ROOTS,
CANOPY_DEPTH
)
];
thread_rng().fill(bytes.as_mut_slice());
// Initialize a Merkle tree on top of a byte slice.
{
let mut mt =
ConcurrentMerkleTreeZeroCopyMut::<Poseidon, HEIGHT>::from_bytes_zero_copy_init(
bytes.as_mut_slice(),
HEIGHT,
CANOPY_DEPTH,
CHANGELOG,
ROOTS,
)
.unwrap();
mt.init().unwrap();
// Ensure that it was properly initialized.
assert_eq!(mt.height, HEIGHT);
assert_eq!(mt.canopy_depth, CANOPY_DEPTH);
assert_eq!(mt.next_index(), 0);
assert_eq!(mt.sequence_number(), 0);
assert_eq!(mt.rightmost_leaf(), Poseidon::zero_bytes()[0]);
assert_eq!(mt.filled_subtrees.capacity(), HEIGHT);
assert_eq!(mt.filled_subtrees.len(), HEIGHT);
assert_eq!(mt.changelog.capacity(), CHANGELOG);
assert_eq!(mt.changelog.len(), 1);
assert_eq!(mt.roots.capacity(), ROOTS);
assert_eq!(mt.roots.len(), 1);
assert_eq!(
mt.canopy.capacity(),
ConcurrentMerkleTree::<Poseidon, HEIGHT>::canopy_size(CANOPY_DEPTH)
);
assert_eq!(mt.root(), Poseidon::zero_bytes()[HEIGHT]);
}
let mut rng = thread_rng();
for _ in 0..OPERATIONS {
// Reload the tree from bytes on each iteration.
let mut mt_2 =
ConcurrentMerkleTreeZeroCopyMut::<Poseidon, HEIGHT>::from_bytes_zero_copy_mut(
&mut bytes,
)
.unwrap();
let leaf: [u8; 32] = Fr::rand(&mut rng)
.into_bigint()
.to_bytes_be()
.try_into()
.unwrap();
mt_1.append(&leaf).unwrap();
mt_2.append(&leaf).unwrap();
assert_eq!(mt_1, *mt_2);
}
// Read a copy of that Merkle tree.
let mt_2 = ConcurrentMerkleTreeCopy::<Poseidon, HEIGHT>::from_bytes_copy(&bytes).unwrap();
assert_eq!(mt_1.height, mt_2.height);
assert_eq!(mt_1.canopy_depth, mt_2.canopy_depth);
assert_eq!(mt_1.next_index(), mt_2.next_index());
assert_eq!(mt_1.sequence_number(), mt_2.sequence_number());
assert_eq!(mt_1.rightmost_leaf(), mt_2.rightmost_leaf());
assert_eq!(
mt_1.filled_subtrees.as_slice(),
mt_2.filled_subtrees.as_slice()
);
}
#[test]
fn test_from_bytes_copy_26_1400_2400_10_256_1024() {
const HEIGHT: usize = 26;
const CHANGELOG_SIZE: usize = 1400;
const ROOTS: usize = 2400;
const CANOPY_DEPTH: usize = 10;
const OPERATIONS: usize = 1024;
from_bytes_copy::<HEIGHT, CHANGELOG_SIZE, ROOTS, CANOPY_DEPTH, OPERATIONS>()
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/concurrent
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/concurrent/src/zero_copy.rs
|
use std::{
marker::PhantomData,
mem,
ops::{Deref, DerefMut},
};
use light_bounded_vec::{
BoundedVec, BoundedVecMetadata, CyclicBoundedVec, CyclicBoundedVecMetadata,
};
use light_hasher::Hasher;
use light_utils::offset::zero_copy::{read_array_like_ptr_at, read_ptr_at, write_at};
use memoffset::{offset_of, span_of};
use crate::{errors::ConcurrentMerkleTreeError, ConcurrentMerkleTree};
#[derive(Debug)]
pub struct ConcurrentMerkleTreeZeroCopy<'a, H, const HEIGHT: usize>
where
H: Hasher,
{
merkle_tree: mem::ManuallyDrop<ConcurrentMerkleTree<H, HEIGHT>>,
// The purpose of this field is ensuring that the wrapper does not outlive
// the buffer.
_bytes: &'a [u8],
}
impl<'a, H, const HEIGHT: usize> ConcurrentMerkleTreeZeroCopy<'a, H, HEIGHT>
where
H: Hasher,
{
pub fn struct_from_bytes_zero_copy(
bytes: &'a [u8],
) -> Result<(ConcurrentMerkleTree<H, HEIGHT>, usize), ConcurrentMerkleTreeError> {
let expected_size = ConcurrentMerkleTree::<H, HEIGHT>::non_dyn_fields_size();
if bytes.len() < expected_size {
return Err(ConcurrentMerkleTreeError::BufferSize(
expected_size,
bytes.len(),
));
}
let height = usize::from_le_bytes(
bytes[span_of!(ConcurrentMerkleTree<H, HEIGHT>, height)]
.try_into()
.unwrap(),
);
let canopy_depth = usize::from_le_bytes(
bytes[span_of!(ConcurrentMerkleTree<H, HEIGHT>, canopy_depth)]
.try_into()
.unwrap(),
);
let mut offset = offset_of!(ConcurrentMerkleTree<H, HEIGHT>, next_index);
let next_index = unsafe { read_ptr_at(bytes, &mut offset) };
let sequence_number = unsafe { read_ptr_at(bytes, &mut offset) };
let rightmost_leaf = unsafe { read_ptr_at(bytes, &mut offset) };
let filled_subtrees_metadata = unsafe { read_ptr_at(bytes, &mut offset) };
let changelog_metadata: *mut CyclicBoundedVecMetadata =
unsafe { read_ptr_at(bytes, &mut offset) };
let roots_metadata: *mut CyclicBoundedVecMetadata =
unsafe { read_ptr_at(bytes, &mut offset) };
let canopy_metadata = unsafe { read_ptr_at(bytes, &mut offset) };
let expected_size = ConcurrentMerkleTree::<H, HEIGHT>::size_in_account(
height,
unsafe { (*changelog_metadata).capacity() },
unsafe { (*roots_metadata).capacity() },
canopy_depth,
);
if bytes.len() < expected_size {
return Err(ConcurrentMerkleTreeError::BufferSize(
expected_size,
bytes.len(),
));
}
let filled_subtrees = unsafe {
BoundedVec::from_raw_parts(
filled_subtrees_metadata,
read_array_like_ptr_at(bytes, &mut offset, height),
)
};
let changelog = unsafe {
CyclicBoundedVec::from_raw_parts(
changelog_metadata,
read_array_like_ptr_at(bytes, &mut offset, (*changelog_metadata).capacity()),
)
};
let roots = unsafe {
CyclicBoundedVec::from_raw_parts(
roots_metadata,
read_array_like_ptr_at(bytes, &mut offset, (*roots_metadata).capacity()),
)
};
let canopy = unsafe {
BoundedVec::from_raw_parts(
canopy_metadata,
read_array_like_ptr_at(bytes, &mut offset, (*canopy_metadata).capacity()),
)
};
let merkle_tree = ConcurrentMerkleTree {
height,
canopy_depth,
next_index,
sequence_number,
rightmost_leaf,
filled_subtrees,
changelog,
roots,
canopy,
_hasher: PhantomData,
};
merkle_tree.check_size_constraints()?;
Ok((merkle_tree, offset))
}
pub fn from_bytes_zero_copy(bytes: &'a [u8]) -> Result<Self, ConcurrentMerkleTreeError> {
let (merkle_tree, _) = Self::struct_from_bytes_zero_copy(bytes)?;
merkle_tree.check_size_constraints()?;
Ok(Self {
merkle_tree: mem::ManuallyDrop::new(merkle_tree),
_bytes: bytes,
})
}
}
impl<'a, H, const HEIGHT: usize> Deref for ConcurrentMerkleTreeZeroCopy<'a, H, HEIGHT>
where
H: Hasher,
{
type Target = ConcurrentMerkleTree<H, HEIGHT>;
fn deref(&self) -> &Self::Target {
&self.merkle_tree
}
}
impl<'a, H, const HEIGHT: usize> Drop for ConcurrentMerkleTreeZeroCopy<'a, H, HEIGHT>
where
H: Hasher,
{
fn drop(&mut self) {
// SAFETY: Don't do anything here! Why?
//
// * Primitive fields of `ConcurrentMerkleTree` implement `Copy`,
// therefore `drop()` has no effect on them - Rust drops them when
// they go out of scope.
// * Don't drop the dynamic fields (`filled_subtrees`, `roots` etc.). In
// `ConcurrentMerkleTreeZeroCopy`, they are backed by buffers provided
// by the caller. These buffers are going to be eventually deallocated.
// Performing an another `drop()` here would result double `free()`
// which would result in aborting the program (either with `SIGABRT`
// or `SIGSEGV`).
}
}
#[derive(Debug)]
pub struct ConcurrentMerkleTreeZeroCopyMut<'a, H, const HEIGHT: usize>(
ConcurrentMerkleTreeZeroCopy<'a, H, HEIGHT>,
)
where
H: Hasher;
impl<'a, H, const HEIGHT: usize> ConcurrentMerkleTreeZeroCopyMut<'a, H, HEIGHT>
where
H: Hasher,
{
pub fn from_bytes_zero_copy_mut(
bytes: &'a mut [u8],
) -> Result<Self, ConcurrentMerkleTreeError> {
Ok(Self(ConcurrentMerkleTreeZeroCopy::from_bytes_zero_copy(
bytes,
)?))
}
pub fn fill_non_dyn_fields_in_buffer(
bytes: &mut [u8],
height: usize,
canopy_depth: usize,
changelog_capacity: usize,
roots_capacity: usize,
) -> Result<usize, ConcurrentMerkleTreeError> {
let expected_size = ConcurrentMerkleTree::<H, HEIGHT>::size_in_account(
height,
changelog_capacity,
roots_capacity,
canopy_depth,
);
if bytes.len() < expected_size {
return Err(ConcurrentMerkleTreeError::BufferSize(
expected_size,
bytes.len(),
));
}
bytes[span_of!(ConcurrentMerkleTree<H, HEIGHT>, height)]
.copy_from_slice(&height.to_le_bytes());
bytes[span_of!(ConcurrentMerkleTree<H, HEIGHT>, canopy_depth)]
.copy_from_slice(&canopy_depth.to_le_bytes());
let mut offset = offset_of!(ConcurrentMerkleTree<H, HEIGHT>, next_index);
// next_index
write_at::<usize>(bytes, &0_usize.to_le_bytes(), &mut offset);
// sequence_number
write_at::<usize>(bytes, &0_usize.to_le_bytes(), &mut offset);
// rightmost_leaf
write_at::<[u8; 32]>(bytes, &H::zero_bytes()[0], &mut offset);
// filled_subtrees (metadata)
let filled_subtrees_metadata = BoundedVecMetadata::new(height);
write_at::<BoundedVecMetadata>(bytes, &filled_subtrees_metadata.to_le_bytes(), &mut offset);
// changelog (metadata)
let changelog_metadata = CyclicBoundedVecMetadata::new(changelog_capacity);
write_at::<CyclicBoundedVecMetadata>(bytes, &changelog_metadata.to_le_bytes(), &mut offset);
// roots (metadata)
let roots_metadata = CyclicBoundedVecMetadata::new(roots_capacity);
write_at::<CyclicBoundedVecMetadata>(bytes, &roots_metadata.to_le_bytes(), &mut offset);
// canopy (metadata)
let canopy_size = ConcurrentMerkleTree::<H, HEIGHT>::canopy_size(canopy_depth);
let canopy_metadata = BoundedVecMetadata::new(canopy_size);
write_at::<BoundedVecMetadata>(bytes, &canopy_metadata.to_le_bytes(), &mut offset);
Ok(offset)
}
pub fn from_bytes_zero_copy_init(
bytes: &'a mut [u8],
height: usize,
canopy_depth: usize,
changelog_capacity: usize,
roots_capacity: usize,
) -> Result<Self, ConcurrentMerkleTreeError> {
Self::fill_non_dyn_fields_in_buffer(
bytes,
height,
canopy_depth,
changelog_capacity,
roots_capacity,
)?;
Self::from_bytes_zero_copy_mut(bytes)
}
}
impl<'a, H, const HEIGHT: usize> Deref for ConcurrentMerkleTreeZeroCopyMut<'a, H, HEIGHT>
where
H: Hasher,
{
type Target = ConcurrentMerkleTree<H, HEIGHT>;
fn deref(&self) -> &Self::Target {
&self.0.merkle_tree
}
}
impl<'a, H, const HEIGHT: usize> DerefMut for ConcurrentMerkleTreeZeroCopyMut<'a, H, HEIGHT>
where
H: Hasher,
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0.merkle_tree
}
}
#[cfg(test)]
mod test {
use super::*;
use ark_bn254::Fr;
use ark_ff::{BigInteger, PrimeField, UniformRand};
use light_hasher::Poseidon;
use rand::{thread_rng, Rng};
fn load_from_bytes<
const HEIGHT: usize,
const CHANGELOG: usize,
const ROOTS: usize,
const CANOPY_DEPTH: usize,
const OPERATIONS: usize,
>() {
let mut mt_1 =
ConcurrentMerkleTree::<Poseidon, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY_DEPTH)
.unwrap();
mt_1.init().unwrap();
// Create a buffer with random bytes - the `*_init` method should
// initialize the buffer gracefully and the randomness shouldn't cause
// undefined behavior.
let mut bytes = vec![
0u8;
ConcurrentMerkleTree::<Poseidon, HEIGHT>::size_in_account(
HEIGHT,
CHANGELOG,
ROOTS,
CANOPY_DEPTH
)
];
thread_rng().fill(bytes.as_mut_slice());
// Initialize a Merkle tree on top of a byte slice.
{
let mut mt =
ConcurrentMerkleTreeZeroCopyMut::<Poseidon, HEIGHT>::from_bytes_zero_copy_init(
bytes.as_mut_slice(),
HEIGHT,
CANOPY_DEPTH,
CHANGELOG,
ROOTS,
)
.unwrap();
mt.init().unwrap();
// Ensure that it was properly initialized.
assert_eq!(mt.height, HEIGHT);
assert_eq!(mt.canopy_depth, CANOPY_DEPTH,);
assert_eq!(mt.next_index(), 0);
assert_eq!(mt.sequence_number(), 0);
assert_eq!(mt.rightmost_leaf(), Poseidon::zero_bytes()[0]);
assert_eq!(mt.filled_subtrees.capacity(), HEIGHT);
assert_eq!(mt.filled_subtrees.len(), HEIGHT);
assert_eq!(mt.changelog.capacity(), CHANGELOG);
assert_eq!(mt.changelog.len(), 1);
assert_eq!(mt.roots.capacity(), ROOTS);
assert_eq!(mt.roots.len(), 1);
assert_eq!(
mt.canopy.capacity(),
ConcurrentMerkleTree::<Poseidon, HEIGHT>::canopy_size(CANOPY_DEPTH)
);
assert_eq!(mt.root(), Poseidon::zero_bytes()[HEIGHT]);
}
let mut rng = thread_rng();
for _ in 0..OPERATIONS {
// Reload the tree from bytes on each iteration.
let mut mt_2 =
ConcurrentMerkleTreeZeroCopyMut::<Poseidon, HEIGHT>::from_bytes_zero_copy_mut(
&mut bytes,
)
.unwrap();
let leaf: [u8; 32] = Fr::rand(&mut rng)
.into_bigint()
.to_bytes_be()
.try_into()
.unwrap();
mt_1.append(&leaf).unwrap();
mt_2.append(&leaf).unwrap();
assert_eq!(mt_1, *mt_2);
}
}
#[test]
fn test_load_from_bytes_22_256_256_0_1024() {
load_from_bytes::<22, 256, 256, 0, 1024>()
}
#[test]
fn test_load_from_bytes_22_256_256_10_1024() {
load_from_bytes::<22, 256, 256, 10, 1024>()
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/concurrent
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/concurrent/src/lib.rs
|
use std::{
alloc::{self, handle_alloc_error, Layout},
iter::Skip,
marker::PhantomData,
mem,
};
use changelog::ChangelogPath;
use light_bounded_vec::{
BoundedVec, BoundedVecMetadata, CyclicBoundedVec, CyclicBoundedVecIterator,
CyclicBoundedVecMetadata,
};
pub use light_hasher;
use light_hasher::Hasher;
pub mod changelog;
pub mod copy;
pub mod errors;
pub mod event;
pub mod hash;
pub mod zero_copy;
use crate::{
changelog::ChangelogEntry,
errors::ConcurrentMerkleTreeError,
hash::{compute_parent_node, compute_root},
};
/// [Concurrent Merkle tree](https://drive.google.com/file/d/1BOpa5OFmara50fTvL0VIVYjtg-qzHCVc/view)
/// which allows for multiple requests of updating leaves, without making any
/// of the requests invalid, as long as they are not modyfing the same leaf.
///
/// When any of the above happens, some of the concurrent requests are going to
/// be invalid, forcing the clients to re-generate the Merkle proof. But that's
/// still better than having such a failure after any update happening in the
/// middle of requesting the update.
///
/// Due to ability to make a decent number of concurrent update requests to be
/// valid, no lock is necessary.
#[repr(C)]
#[derive(Debug)]
// TODO(vadorovsky): The only reason why are we still keeping `HEIGHT` as a
// const generic here is that removing it would require keeping a `BoundecVec`
// inside `CyclicBoundedVec`. Casting byte slices to such nested vector is not
// a trivial task, but we might eventually do it at some point.
pub struct ConcurrentMerkleTree<H, const HEIGHT: usize>
where
H: Hasher,
{
pub height: usize,
pub canopy_depth: usize,
pub next_index: *mut usize,
pub sequence_number: *mut usize,
pub rightmost_leaf: *mut [u8; 32],
/// Hashes of subtrees.
pub filled_subtrees: BoundedVec<[u8; 32]>,
/// History of Merkle proofs.
pub changelog: CyclicBoundedVec<ChangelogEntry<HEIGHT>>,
/// History of roots.
pub roots: CyclicBoundedVec<[u8; 32]>,
/// Cached upper nodes.
pub canopy: BoundedVec<[u8; 32]>,
pub _hasher: PhantomData<H>,
}
pub type ConcurrentMerkleTree26<H> = ConcurrentMerkleTree<H, 26>;
impl<H, const HEIGHT: usize> ConcurrentMerkleTree<H, HEIGHT>
where
H: Hasher,
{
/// Number of nodes to include in canopy, based on `canopy_depth`.
#[inline(always)]
pub fn canopy_size(canopy_depth: usize) -> usize {
(1 << (canopy_depth + 1)) - 2
}
/// Size of the struct **without** dynamically sized fields (`BoundedVec`,
/// `CyclicBoundedVec`).
pub fn non_dyn_fields_size() -> usize {
// height
mem::size_of::<usize>()
// changelog_capacity
+ mem::size_of::<usize>()
// next_index
+ mem::size_of::<usize>()
// sequence_number
+ mem::size_of::<usize>()
// rightmost_leaf
+ mem::size_of::<[u8; 32]>()
// filled_subtrees (metadata)
+ mem::size_of::<BoundedVecMetadata>()
// changelog (metadata)
+ mem::size_of::<CyclicBoundedVecMetadata>()
// roots (metadata)
+ mem::size_of::<CyclicBoundedVecMetadata>()
// canopy (metadata)
+ mem::size_of::<BoundedVecMetadata>()
}
// TODO(vadorovsky): Make a macro for that.
pub fn size_in_account(
height: usize,
changelog_size: usize,
roots_size: usize,
canopy_depth: usize,
) -> usize {
// non-dynamic fields
Self::non_dyn_fields_size()
// filled_subtrees
+ mem::size_of::<[u8; 32]>() * height
// changelog
+ mem::size_of::<ChangelogEntry<HEIGHT>>() * changelog_size
// roots
+ mem::size_of::<[u8; 32]>() * roots_size
// canopy
+ mem::size_of::<[u8; 32]>() * Self::canopy_size(canopy_depth)
}
fn check_size_constraints_new(
height: usize,
changelog_size: usize,
roots_size: usize,
canopy_depth: usize,
) -> Result<(), ConcurrentMerkleTreeError> {
if height == 0 || HEIGHT == 0 {
return Err(ConcurrentMerkleTreeError::HeightZero);
}
if height != HEIGHT {
return Err(ConcurrentMerkleTreeError::InvalidHeight(HEIGHT));
}
if canopy_depth > height {
return Err(ConcurrentMerkleTreeError::CanopyGeThanHeight);
}
// Changelog needs to be at least 1, because it's used for storing
// Merkle paths in `append`/`append_batch`.
if changelog_size == 0 {
return Err(ConcurrentMerkleTreeError::ChangelogZero);
}
if roots_size == 0 {
return Err(ConcurrentMerkleTreeError::RootsZero);
}
Ok(())
}
fn check_size_constraints(&self) -> Result<(), ConcurrentMerkleTreeError> {
Self::check_size_constraints_new(
self.height,
self.changelog.capacity(),
self.roots.capacity(),
self.canopy_depth,
)
}
pub fn new(
height: usize,
changelog_size: usize,
roots_size: usize,
canopy_depth: usize,
) -> Result<Self, ConcurrentMerkleTreeError> {
Self::check_size_constraints_new(height, changelog_size, roots_size, canopy_depth)?;
let layout = Layout::new::<usize>();
let next_index = unsafe { alloc::alloc(layout) as *mut usize };
if next_index.is_null() {
handle_alloc_error(layout);
}
unsafe { *next_index = 0 };
let layout = Layout::new::<usize>();
let sequence_number = unsafe { alloc::alloc(layout) as *mut usize };
if sequence_number.is_null() {
handle_alloc_error(layout);
}
unsafe { *sequence_number = 0 };
let layout = Layout::new::<[u8; 32]>();
let rightmost_leaf = unsafe { alloc::alloc(layout) as *mut [u8; 32] };
if rightmost_leaf.is_null() {
handle_alloc_error(layout);
}
unsafe { *rightmost_leaf = [0u8; 32] };
Ok(Self {
height,
canopy_depth,
next_index,
sequence_number,
rightmost_leaf,
filled_subtrees: BoundedVec::with_capacity(height),
changelog: CyclicBoundedVec::with_capacity(changelog_size),
roots: CyclicBoundedVec::with_capacity(roots_size),
canopy: BoundedVec::with_capacity(Self::canopy_size(canopy_depth)),
_hasher: PhantomData,
})
}
/// Initializes the Merkle tree.
pub fn init(&mut self) -> Result<(), ConcurrentMerkleTreeError> {
self.check_size_constraints()?;
// Initialize root.
let root = H::zero_bytes()[self.height];
self.roots.push(root);
// Initialize changelog.
let path = ChangelogPath::from_fn(|i| Some(H::zero_bytes()[i]));
let changelog_entry = ChangelogEntry { path, index: 0 };
self.changelog.push(changelog_entry);
// Initialize filled subtrees.
for i in 0..self.height {
self.filled_subtrees.push(H::zero_bytes()[i]).unwrap();
}
// Initialize canopy.
for level_i in 0..self.canopy_depth {
let level_nodes = 1 << (level_i + 1);
for _ in 0..level_nodes {
let node = H::zero_bytes()[self.height - level_i - 1];
self.canopy.push(node)?;
}
}
Ok(())
}
/// Returns the index of the current changelog entry.
pub fn changelog_index(&self) -> usize {
self.changelog.last_index()
}
/// Returns the index of the current root in the tree's root buffer.
pub fn root_index(&self) -> usize {
self.roots.last_index()
}
/// Returns the current root.
pub fn root(&self) -> [u8; 32] {
// PANICS: This should never happen - there is always a root in the
// tree and `self.root_index()` should always point to an existing index.
self.roots[self.root_index()]
}
pub fn current_index(&self) -> usize {
let next_index = self.next_index();
if next_index > 0 {
next_index - 1
} else {
next_index
}
}
pub fn next_index(&self) -> usize {
unsafe { *self.next_index }
}
fn inc_next_index(&mut self) -> Result<(), ConcurrentMerkleTreeError> {
unsafe {
*self.next_index = self
.next_index()
.checked_add(1)
.ok_or(ConcurrentMerkleTreeError::IntegerOverflow)?;
}
Ok(())
}
pub fn sequence_number(&self) -> usize {
unsafe { *self.sequence_number }
}
fn inc_sequence_number(&mut self) -> Result<(), ConcurrentMerkleTreeError> {
unsafe {
*self.sequence_number = self
.sequence_number()
.checked_add(1)
.ok_or(ConcurrentMerkleTreeError::IntegerOverflow)?;
}
Ok(())
}
pub fn rightmost_leaf(&self) -> [u8; 32] {
unsafe { *self.rightmost_leaf }
}
fn set_rightmost_leaf(&mut self, leaf: &[u8; 32]) {
unsafe { *self.rightmost_leaf = *leaf };
}
pub fn update_proof_from_canopy(
&self,
leaf_index: usize,
proof: &mut BoundedVec<[u8; 32]>,
) -> Result<(), ConcurrentMerkleTreeError> {
let mut node_index = ((1 << self.height) + leaf_index) >> (self.height - self.canopy_depth);
while node_index > 1 {
// `node_index - 2` maps to the canopy index.
let canopy_index = node_index - 2;
let canopy_index = if canopy_index % 2 == 0 {
canopy_index + 1
} else {
canopy_index - 1
};
proof.push(self.canopy[canopy_index])?;
node_index >>= 1;
}
Ok(())
}
/// Returns an iterator with changelog entries newer than the requested
/// `changelog_index`.
pub fn changelog_entries(
&self,
changelog_index: usize,
) -> Result<Skip<CyclicBoundedVecIterator<'_, ChangelogEntry<HEIGHT>>>, ConcurrentMerkleTreeError>
{
// `CyclicBoundedVec::iter_from` returns an iterator which includes also
// the element indicated by the provided index.
//
// However, we want to iterate only on changelog events **newer** than
// the provided one.
//
// Calling `iter_from(changelog_index + 1)` wouldn't work. If
// `changelog_index` points to the newest changelog entry,
// `changelog_index + 1` would point to the **oldest** changelog entry.
// That would result in iterating over the whole changelog - from the
// oldest to the newest element.
Ok(self.changelog.iter_from(changelog_index)?.skip(1))
}
/// Updates the given Merkle proof.
///
/// The update is performed by checking whether there are any new changelog
/// entries and whether they contain changes which affect the current
/// proof. To be precise, for each changelog entry, it's done in the
/// following steps:
///
/// * Check if the changelog entry was directly updating the `leaf_index`
/// we are trying to update.
/// * If no (we check that condition first, since it's more likely),
/// it means that there is a change affecting the proof, but not the
/// leaf.
/// Check which element from our proof was affected by the change
/// (using the `critbit_index` method) and update it (copy the new
/// element from the changelog to our updated proof).
/// * If yes, it means that the same leaf we want to update was already
/// updated. In such case, updating the proof is not possible.
pub fn update_proof_from_changelog(
&self,
changelog_index: usize,
leaf_index: usize,
proof: &mut BoundedVec<[u8; 32]>,
) -> Result<(), ConcurrentMerkleTreeError> {
// Iterate over changelog entries starting from the requested
// `changelog_index`.
//
// Since we are interested only in subsequent, new changelog entries,
// skip the first result.
for changelog_entry in self.changelog_entries(changelog_index)? {
changelog_entry.update_proof(leaf_index, proof)?;
}
Ok(())
}
/// Checks whether the given Merkle `proof` for the given `node` (with index
/// `i`) is valid. The proof is valid when computing parent node hashes using
/// the whole path of the proof gives the same result as the given `root`.
pub fn validate_proof(
&self,
leaf: &[u8; 32],
leaf_index: usize,
proof: &BoundedVec<[u8; 32]>,
) -> Result<(), ConcurrentMerkleTreeError> {
let expected_root = self.root();
let computed_root = compute_root::<H>(leaf, leaf_index, proof)?;
if computed_root == expected_root {
Ok(())
} else {
Err(ConcurrentMerkleTreeError::InvalidProof(
expected_root,
computed_root,
))
}
}
/// Updates the leaf under `leaf_index` with the `new_leaf` value.
///
/// 1. Computes the new path and root from `new_leaf` and Merkle proof
/// (`proof`).
/// 2. Stores the new path as the latest changelog entry and increments the
/// latest changelog index.
/// 3. Stores the latest root and increments the latest root index.
/// 4. If new leaf is at the rightmost index, stores it as the new
/// rightmost leaft and stores the Merkle proof as the new rightmost
/// proof.
///
/// # Validation
///
/// This method doesn't validate the proof. Caller is responsible for
/// doing that before.
fn update_leaf_in_tree(
&mut self,
new_leaf: &[u8; 32],
leaf_index: usize,
proof: &BoundedVec<[u8; 32]>,
) -> Result<(usize, usize), ConcurrentMerkleTreeError> {
let mut changelog_entry = ChangelogEntry::default_with_index(leaf_index);
let mut current_node = *new_leaf;
for (level, sibling) in proof.iter().enumerate() {
changelog_entry.path[level] = Some(current_node);
current_node = compute_parent_node::<H>(¤t_node, sibling, leaf_index, level)?;
}
self.inc_sequence_number()?;
self.roots.push(current_node);
// Check if the leaf is the last leaf in the tree.
if self.next_index() < (1 << self.height) {
changelog_entry.update_proof(self.next_index(), &mut self.filled_subtrees)?;
// Check if we updated the rightmost leaf.
if leaf_index >= self.current_index() {
self.set_rightmost_leaf(new_leaf);
}
}
self.changelog.push(changelog_entry);
if self.canopy_depth > 0 {
self.update_canopy(self.changelog.last_index(), 1);
}
Ok((self.changelog.last_index(), self.sequence_number()))
}
/// Replaces the `old_leaf` under the `leaf_index` with a `new_leaf`, using
/// the given `proof` and `changelog_index` (pointing to the changelog entry
/// which was the newest at the time of preparing the proof).
#[inline(never)]
pub fn update(
&mut self,
changelog_index: usize,
old_leaf: &[u8; 32],
new_leaf: &[u8; 32],
leaf_index: usize,
proof: &mut BoundedVec<[u8; 32]>,
) -> Result<(usize, usize), ConcurrentMerkleTreeError> {
let expected_proof_len = self.height - self.canopy_depth;
if proof.len() != expected_proof_len {
return Err(ConcurrentMerkleTreeError::InvalidProofLength(
expected_proof_len,
proof.len(),
));
}
if leaf_index >= self.next_index() {
return Err(ConcurrentMerkleTreeError::CannotUpdateEmpty);
}
if self.canopy_depth > 0 {
self.update_proof_from_canopy(leaf_index, proof)?;
}
if changelog_index != self.changelog_index() {
self.update_proof_from_changelog(changelog_index, leaf_index, proof)?;
}
self.validate_proof(old_leaf, leaf_index, proof)?;
self.update_leaf_in_tree(new_leaf, leaf_index, proof)
}
/// Appends a new leaf to the tree.
pub fn append(&mut self, leaf: &[u8; 32]) -> Result<(usize, usize), ConcurrentMerkleTreeError> {
self.append_batch(&[leaf])
}
/// Appends a new leaf to the tree. Saves Merkle proof to the provided
/// `proof` reference.
pub fn append_with_proof(
&mut self,
leaf: &[u8; 32],
proof: &mut BoundedVec<[u8; 32]>,
) -> Result<(usize, usize), ConcurrentMerkleTreeError> {
self.append_batch_with_proofs(&[leaf], &mut [proof])
}
/// Appends a batch of new leaves to the tree.
pub fn append_batch(
&mut self,
leaves: &[&[u8; 32]],
) -> Result<(usize, usize), ConcurrentMerkleTreeError> {
self.append_batch_common::<false>(leaves, None)
}
/// Appends a batch of new leaves to the tree. Saves Merkle proofs to the
/// provided `proofs` slice.
pub fn append_batch_with_proofs(
&mut self,
leaves: &[&[u8; 32]],
proofs: &mut [&mut BoundedVec<[u8; 32]>],
) -> Result<(usize, usize), ConcurrentMerkleTreeError> {
self.append_batch_common::<true>(leaves, Some(proofs))
}
/// Appends a batch of new leaves to the tree.
///
/// This method contains the common logic and is not intended for external
/// use. Callers should choose between [`append_batch`](ConcurrentMerkleTree::append_batch)
/// and [`append_batch_with_proofs`](ConcurrentMerkleTree::append_batch_with_proofs).
fn append_batch_common<
// The only purpose of this const generic is to force compiler to
// produce separate functions, with and without proof.
//
// Unfortunately, using `Option` is not enough:
//
// https://godbolt.org/z/fEMMfMdPc
// https://godbolt.org/z/T3dxnjMzz
//
// Using the const generic helps and ends up generating two separate
// functions:
//
// https://godbolt.org/z/zGnM7Ycn1
const WITH_PROOFS: bool,
>(
&mut self,
leaves: &[&[u8; 32]],
// Slice for saving Merkle proofs.
//
// Currently it's used only for indexed Merkle trees.
mut proofs: Option<&mut [&mut BoundedVec<[u8; 32]>]>,
) -> Result<(usize, usize), ConcurrentMerkleTreeError> {
if leaves.is_empty() {
return Err(ConcurrentMerkleTreeError::EmptyLeaves);
}
if (self.next_index() + leaves.len() - 1) >= 1 << self.height {
return Err(ConcurrentMerkleTreeError::TreeFull);
}
if leaves.len() > self.changelog.capacity() {
return Err(ConcurrentMerkleTreeError::BatchGreaterThanChangelog(
leaves.len(),
self.changelog.capacity(),
));
}
let first_changelog_index = (self.changelog.last_index() + 1) % self.changelog.capacity();
let first_sequence_number = self.sequence_number() + 1;
for (leaf_i, leaf) in leaves.iter().enumerate() {
let mut current_index = self.next_index();
self.changelog
.push(ChangelogEntry::<HEIGHT>::default_with_index(current_index));
let changelog_index = self.changelog_index();
let mut current_node = **leaf;
self.changelog[changelog_index].path[0] = Some(**leaf);
for i in 0..self.height {
let is_left = current_index % 2 == 0;
if is_left {
// If the current node is on the left side:
//
// U
// / \
// CUR SIB
// / \
// N N
//
// * The sibling (on the right) is a "zero node".
// * That "zero node" becomes a part of Merkle proof.
// * The upper (next current) node is `H(cur, Ø)`.
let empty_node = H::zero_bytes()[i];
if WITH_PROOFS {
// PANICS: `proofs` should be always `Some` at this point.
proofs.as_mut().unwrap()[leaf_i].push(empty_node)?;
}
self.filled_subtrees[i] = current_node;
// For all non-terminal leaves, stop computing parents as
// soon as we are on the left side.
// Computation of the parent nodes is going to happen in
// the next iterations.
if leaf_i < leaves.len() - 1 {
break;
}
current_node = H::hashv(&[¤t_node, &empty_node])?;
} else {
// If the current node is on the right side:
//
// U
// / \
// SIB CUR
// / \
// N N
// * The sigling on the left is a "filled subtree".
// * That "filled subtree" becomes a part of Merkle proof.
// * The upper (next current) node is `H(sib, cur)`.
if WITH_PROOFS {
// PANICS: `proofs` should be always `Some` at this point.
proofs.as_mut().unwrap()[leaf_i].push(self.filled_subtrees[i])?;
}
current_node = H::hashv(&[&self.filled_subtrees[i], ¤t_node])?;
}
if i < self.height - 1 {
self.changelog[changelog_index].path[i + 1] = Some(current_node);
}
current_index /= 2;
}
if leaf_i == leaves.len() - 1 {
self.roots.push(current_node);
} else {
// Photon returns only the sequence number and we use it in the
// JS client and forester to derive the root index. Therefore,
// we need to emit a "zero root" to not break that property.
self.roots.push([0u8; 32]);
}
self.inc_next_index()?;
self.inc_sequence_number()?;
self.set_rightmost_leaf(leaf);
}
if self.canopy_depth > 0 {
self.update_canopy(first_changelog_index, leaves.len());
}
Ok((first_changelog_index, first_sequence_number))
}
fn update_canopy(&mut self, first_changelog_index: usize, num_leaves: usize) {
for i in 0..num_leaves {
let changelog_index = (first_changelog_index + i) % self.changelog.capacity();
for (i, path_node) in self.changelog[changelog_index]
.path
.iter()
.rev()
.take(self.canopy_depth)
.enumerate()
{
if let Some(path_node) = path_node {
let level = self.height - i - 1;
let index = (1 << (self.height - level))
+ (self.changelog[changelog_index].index >> level);
// `index - 2` maps to the canopy index.
self.canopy[(index - 2) as usize] = *path_node;
}
}
}
}
}
impl<H, const HEIGHT: usize> Drop for ConcurrentMerkleTree<H, HEIGHT>
where
H: Hasher,
{
fn drop(&mut self) {
let layout = Layout::new::<usize>();
unsafe { alloc::dealloc(self.next_index as *mut u8, layout) };
let layout = Layout::new::<usize>();
unsafe { alloc::dealloc(self.sequence_number as *mut u8, layout) };
let layout = Layout::new::<[u8; 32]>();
unsafe { alloc::dealloc(self.rightmost_leaf as *mut u8, layout) };
}
}
impl<H, const HEIGHT: usize> PartialEq for ConcurrentMerkleTree<H, HEIGHT>
where
H: Hasher,
{
fn eq(&self, other: &Self) -> bool {
self.height.eq(&other.height)
&& self.canopy_depth.eq(&other.canopy_depth)
&& self.next_index().eq(&other.next_index())
&& self.sequence_number().eq(&other.sequence_number())
&& self.rightmost_leaf().eq(&other.rightmost_leaf())
&& self
.filled_subtrees
.as_slice()
.eq(other.filled_subtrees.as_slice())
&& self.changelog.iter().eq(other.changelog.iter())
&& self.roots.iter().eq(other.roots.iter())
&& self.canopy.as_slice().eq(other.canopy.as_slice())
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/concurrent
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/concurrent/src/event.rs
|
use borsh::{BorshDeserialize, BorshSerialize};
#[derive(BorshDeserialize, BorshSerialize, Debug)]
pub struct MerkleTreeEvents {
pub events: Vec<MerkleTreeEvent>,
}
/// Event containing the Merkle path of the given
/// [`StateMerkleTree`](light_merkle_tree_program::state::StateMerkleTree)
/// change. Indexers can use this type of events to re-build a non-sparse
/// version of state Merkle tree.
#[derive(BorshDeserialize, BorshSerialize, Debug)]
#[repr(C)]
pub enum MerkleTreeEvent {
V1(ChangelogEvent),
V2(NullifierEvent),
V3(IndexedMerkleTreeEvent),
}
/// Node of the Merkle path with an index representing the position in a
/// non-sparse Merkle tree.
#[derive(BorshDeserialize, BorshSerialize, Debug, Eq, PartialEq)]
pub struct PathNode {
pub node: [u8; 32],
pub index: u32,
}
/// Version 1 of the [`ChangelogEvent`](light_merkle_tree_program::state::ChangelogEvent).
#[derive(BorshDeserialize, BorshSerialize, Debug)]
pub struct ChangelogEvent {
/// Public key of the tree.
pub id: [u8; 32],
// Merkle paths.
pub paths: Vec<Vec<PathNode>>,
/// Number of successful operations on the on-chain tree.
pub seq: u64,
/// Changelog event index.
pub index: u32,
}
#[derive(BorshDeserialize, BorshSerialize, Debug)]
pub struct NullifierEvent {
/// Public key of the tree.
pub id: [u8; 32],
/// Indices of leaves that were nullified.
/// Nullified means updated with [0u8;32].
pub nullified_leaves_indices: Vec<u64>,
/// Number of successful operations on the on-chain tree.
/// seq corresponds to leaves[0].
/// seq + 1 corresponds to leaves[1].
pub seq: u64,
}
#[derive(Debug, Default, Clone, Copy, BorshSerialize, BorshDeserialize, Eq, PartialEq)]
pub struct RawIndexedElement<I>
where
I: Clone,
{
pub value: [u8; 32],
pub next_index: I,
pub next_value: [u8; 32],
pub index: I,
}
#[derive(BorshDeserialize, BorshSerialize, Debug, Clone)]
pub struct IndexedMerkleTreeUpdate<I>
where
I: Clone,
{
pub new_low_element: RawIndexedElement<I>,
/// Leaf hash in new_low_element.index.
pub new_low_element_hash: [u8; 32],
pub new_high_element: RawIndexedElement<I>,
/// Leaf hash in new_high_element.index,
/// is equivalent with next_index.
pub new_high_element_hash: [u8; 32],
}
#[derive(BorshDeserialize, BorshSerialize, Debug)]
pub struct IndexedMerkleTreeEvent {
/// Public key of the tree.
pub id: [u8; 32],
pub updates: Vec<IndexedMerkleTreeUpdate<usize>>,
/// Number of successful operations on the on-chain tree.
/// seq corresponds to leaves[0].
/// seq + 1 corresponds to leaves[1].
pub seq: u64,
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/concurrent
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/concurrent/src/errors.rs
|
use light_bounded_vec::BoundedVecError;
use light_hasher::errors::HasherError;
use thiserror::Error;
#[derive(Debug, Error)]
pub enum ConcurrentMerkleTreeError {
#[error("Integer overflow")]
IntegerOverflow,
#[error("Invalid height, it has to be greater than 0")]
HeightZero,
#[error("Invalud height, expected {0}")]
InvalidHeight(usize),
#[error("Invalid changelog size, it has to be greater than 0. Changelog is used for storing Merkle paths during appends.")]
ChangelogZero,
#[error("Invalid number of roots, it has to be greater than 0")]
RootsZero,
#[error("Canopy depth has to be lower than height")]
CanopyGeThanHeight,
#[error("Merkle tree is full, cannot append more leaves.")]
TreeFull,
#[error("Number of leaves ({0}) exceeds the changelog capacity ({1}).")]
BatchGreaterThanChangelog(usize, usize),
#[error("Invalid proof length, expected {0}, got {1}.")]
InvalidProofLength(usize, usize),
#[error("Invalid Merkle proof, expected root: {0:?}, the provided proof produces root: {1:?}")]
InvalidProof([u8; 32], [u8; 32]),
#[error("Attempting to update the leaf which was updated by an another newest change.")]
CannotUpdateLeaf,
#[error("Cannot update the empty leaf")]
CannotUpdateEmpty,
#[error("The batch of leaves is empty")]
EmptyLeaves,
#[error("Invalid buffer size, expected {0}, got {1}")]
BufferSize(usize, usize),
#[error("Hasher error: {0}")]
Hasher(#[from] HasherError),
#[error("Bounded vector error: {0}")]
BoundedVec(#[from] BoundedVecError),
}
// NOTE(vadorovsky): Unfortunately, we need to do it by hand. `num_derive::ToPrimitive`
// doesn't support data-carrying enums.
#[cfg(feature = "solana")]
impl From<ConcurrentMerkleTreeError> for u32 {
fn from(e: ConcurrentMerkleTreeError) -> u32 {
match e {
ConcurrentMerkleTreeError::IntegerOverflow => 10001,
ConcurrentMerkleTreeError::HeightZero => 10002,
ConcurrentMerkleTreeError::InvalidHeight(_) => 10003,
ConcurrentMerkleTreeError::ChangelogZero => 10004,
ConcurrentMerkleTreeError::RootsZero => 10005,
ConcurrentMerkleTreeError::CanopyGeThanHeight => 10006,
ConcurrentMerkleTreeError::TreeFull => 10007,
ConcurrentMerkleTreeError::BatchGreaterThanChangelog(_, _) => 10008,
ConcurrentMerkleTreeError::InvalidProofLength(_, _) => 10009,
ConcurrentMerkleTreeError::InvalidProof(_, _) => 10010,
ConcurrentMerkleTreeError::CannotUpdateLeaf => 10011,
ConcurrentMerkleTreeError::CannotUpdateEmpty => 10012,
ConcurrentMerkleTreeError::EmptyLeaves => 10013,
ConcurrentMerkleTreeError::BufferSize(_, _) => 10014,
ConcurrentMerkleTreeError::Hasher(e) => e.into(),
ConcurrentMerkleTreeError::BoundedVec(e) => e.into(),
}
}
}
#[cfg(feature = "solana")]
impl From<ConcurrentMerkleTreeError> for solana_program::program_error::ProgramError {
fn from(e: ConcurrentMerkleTreeError) -> Self {
solana_program::program_error::ProgramError::Custom(e.into())
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/concurrent
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/concurrent/src/hash.rs
|
use light_bounded_vec::BoundedVec;
use light_hasher::Hasher;
use crate::errors::ConcurrentMerkleTreeError;
/// Returns the hash of the parent node based on the provided `node` (with its
/// `node_index`) and `sibling` (with its `sibling_index`).
pub fn compute_parent_node<H>(
node: &[u8; 32],
sibling: &[u8; 32],
node_index: usize,
level: usize,
) -> Result<[u8; 32], ConcurrentMerkleTreeError>
where
H: Hasher,
{
let is_left = (node_index >> level) & 1 == 0;
let hash = if is_left {
H::hashv(&[node, sibling])?
} else {
H::hashv(&[sibling, node])?
};
Ok(hash)
}
/// Computes the root for the given `leaf` (with index `i`) and `proof`. It
/// doesn't perform the validation of the provided `proof`.
pub fn compute_root<H>(
leaf: &[u8; 32],
leaf_index: usize,
proof: &BoundedVec<[u8; 32]>,
) -> Result<[u8; 32], ConcurrentMerkleTreeError>
where
H: Hasher,
{
let mut node = *leaf;
for (level, sibling) in proof.iter().enumerate() {
node = compute_parent_node::<H>(&node, sibling, leaf_index, level)?;
}
Ok(node)
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/concurrent
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/concurrent/src/changelog.rs
|
use std::ops::{Deref, DerefMut};
use light_bounded_vec::BoundedVec;
use crate::errors::ConcurrentMerkleTreeError;
#[derive(Clone, Debug, PartialEq, Eq)]
#[repr(transparent)]
pub struct ChangelogPath<const HEIGHT: usize>(pub [Option<[u8; 32]>; HEIGHT]);
impl<const HEIGHT: usize> ChangelogPath<HEIGHT> {
pub fn from_fn<F>(cb: F) -> Self
where
F: FnMut(usize) -> Option<[u8; 32]>,
{
Self(std::array::from_fn(cb))
}
/// Checks whether the path is equal to the provided [`BoundedVec`].
///
/// [`ChangelogPath`] might contain `None` nodes at the end, which
/// mean that it does not define them, but the following changelog
/// paths are expected to overwrite them.
///
/// Therefore, the comparison ends on the first encountered first
/// `None`. If all `Some` nodes are equal to the corresponding ones
/// in the provided vector, the result is `true`.
pub fn eq_to(&self, other: BoundedVec<[u8; 32]>) -> bool {
if other.len() != HEIGHT {
return false;
}
for i in 0..HEIGHT {
let changelog_node = self.0[i];
let path_node = other[i];
match changelog_node {
Some(changelog_node) => {
if changelog_node != path_node {
return false;
}
}
None => break,
}
}
true
}
}
impl<const HEIGHT: usize> Default for ChangelogPath<HEIGHT> {
fn default() -> Self {
Self([None; HEIGHT])
}
}
impl<const HEIGHT: usize> Deref for ChangelogPath<HEIGHT> {
type Target = [Option<[u8; 32]>; HEIGHT];
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<const HEIGHT: usize> DerefMut for ChangelogPath<HEIGHT> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
#[repr(C)]
pub struct ChangelogEntry<const HEIGHT: usize> {
// Path of the changelog.
pub path: ChangelogPath<HEIGHT>,
// Index of the affected leaf.
pub index: u64,
}
pub type ChangelogEntry22 = ChangelogEntry<22>;
pub type ChangelogEntry26 = ChangelogEntry<26>;
pub type ChangelogEntry32 = ChangelogEntry<32>;
pub type ChangelogEntry40 = ChangelogEntry<40>;
impl<const HEIGHT: usize> ChangelogEntry<HEIGHT> {
pub fn new(path: ChangelogPath<HEIGHT>, index: usize) -> Self {
let index = index as u64;
Self { path, index }
}
pub fn default_with_index(index: usize) -> Self {
Self {
path: ChangelogPath::default(),
index: index as u64,
}
}
pub fn index(&self) -> usize {
self.index as usize
}
/// Returns an intersection index in the changelog entry which affects the
/// provided path.
///
/// Determining it can be done by taking a XOR of the leaf index (which was
/// directly updated in the changelog entry) and the leaf index we are
/// trying to update.
///
/// The number of bytes in the binary representations of the indexes is
/// determined by the height of the tree. For example, for the tree with
/// height 4, update attempt of leaf under index 2 and changelog affecting
/// index 4, critbit would be:
///
/// 2 ^ 4 = 0b_0010 ^ 0b_0100 = 0b_0110 = 6
fn intersection_index(&self, leaf_index: usize) -> usize {
let padding = 64 - HEIGHT;
let common_path_len = ((leaf_index ^ self.index()) << padding).leading_zeros() as usize;
(HEIGHT - 1) - common_path_len
}
pub fn update_proof(
&self,
leaf_index: usize,
proof: &mut BoundedVec<[u8; 32]>,
) -> Result<(), ConcurrentMerkleTreeError> {
if leaf_index != self.index() {
let intersection_index = self.intersection_index(leaf_index);
if let Some(node) = self.path[intersection_index] {
proof[intersection_index] = node;
}
} else {
// This case means that the leaf we are trying to update was
// already updated. Therefore, the right thing to do is to notify
// the caller to sync the local Merkle tree and update the leaf,
// if necessary.
return Err(ConcurrentMerkleTreeError::CannotUpdateLeaf);
}
Ok(())
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/reference/Cargo.toml
|
[package]
name = "light-merkle-tree-reference"
version = "1.1.0"
description = "Non-sparse reference Merkle tree implementation"
repository = "https://github.com/Lightprotocol/light-protocol"
license = "Apache-2.0"
edition = "2021"
[dependencies]
light-bounded-vec = { path = "../bounded-vec", version = "1.1.0" }
light-hasher = { path = "../hasher", version = "1.1.0" }
thiserror = "1.0"
log = "0.4.20"
num-bigint = "0.4"
[dev-dependencies]
hex = "0.4"
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/reference
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/reference/tests/tests.rs
|
use light_bounded_vec::BoundedVec;
use light_hasher::{Hasher, Keccak, Poseidon, Sha256};
use light_merkle_tree_reference::MerkleTree;
fn append<H>(canopy_depth: usize)
where
H: Hasher,
{
const HEIGHT: usize = 4;
let mut mt = MerkleTree::<H>::new(4, canopy_depth);
let leaf_1 = [1_u8; 32];
mt.append(&leaf_1).unwrap();
// The hash of our new leaf and its sibling (a zero value).
//
// H1
// / \
// L1 Z[0]
let h1 = H::hashv(&[&leaf_1, &H::zero_bytes()[0]]).unwrap();
// The hash of `h1` and its sibling (a subtree represented by `Z[1]`).
//
// H2
// /-/ \-\
// H1 Z[1]
// / \ / \
// L1 Z[0] Z[0] Z[0]
//
// `Z[1]` represents the whole subtree on the right from `h2`. In the next
// examples, we are just going to show empty subtrees instead of the whole
// hierarchy.
let h2 = H::hashv(&[&h1, &H::zero_bytes()[1]]).unwrap();
// The hash of `h3` and its sibling (a subtree represented by `Z[2]`).
//
// H3
// / \
// H2 Z[2]
// / \
// H1 Z[1]
// / \
// L1 Z[0]
let h3 = H::hashv(&[&h2, &H::zero_bytes()[2]]).unwrap();
// The hash of `h4` and its sibling (a subtree represented by `Z[3]`),
// which is the root.
//
// R
// / \
// H3 Z[3]
// / \
// H2 Z[2]
// / \
// H1 Z[1]
// / \
// L1 Z[0]
let expected_root = H::hashv(&[&h3, &H::zero_bytes()[3]]).unwrap();
assert_eq!(mt.root(), expected_root);
// The Merkle path of L1 consists of nodes from L1 up to the root.
// In this case: L1, H1, H2, H3.
//
// R
// / \
// *H3* Z[3]
// / \
// *H2* Z[2]
// / \
// *H1* Z[1]
// / \
// *L1* Z[0]
let expected_merkle_path = &[leaf_1, h1, h2, h3];
let full_merkle_path = mt.get_path_of_leaf(0, true).unwrap();
assert_eq!(full_merkle_path.as_slice(), expected_merkle_path);
let partial_merkle_path = mt.get_path_of_leaf(0, false).unwrap();
assert_eq!(
partial_merkle_path.as_slice(),
&expected_merkle_path[..HEIGHT - canopy_depth]
);
// The Merkle proof consists of siblings of L1 and all its parent
// nodes. In this case, these are just zero bytes: Z[0], Z[1], Z[2],
// Z[3].
//
// R
// / \
// H3 *Z[3]*
// / \
// H2 *Z[2]*
// / \
// H1 *Z[1]*
// / \
// L1 *Z[0]*
let expected_merkle_proof = &H::zero_bytes()[..HEIGHT];
let full_merkle_proof = mt.get_proof_of_leaf(0, true).unwrap();
assert_eq!(full_merkle_proof.as_slice(), expected_merkle_proof);
let partial_merkle_proof = mt.get_proof_of_leaf(0, false).unwrap();
assert_eq!(
partial_merkle_proof.as_slice(),
&expected_merkle_proof[..HEIGHT - canopy_depth]
);
// Appending the 2nd leaf should result in recomputing the root due to the
// change of the `h1`, which now is a hash of the two non-zero leafs. So
// when computing hashes from H2 up to the root, we are still going to use
// zero bytes.
//
// The other subtrees still remain the same.
//
// R
// / \
// H3 Z[3]
// / \
// H2 Z[2]
// / \
// H1 Z[1]
// / \
// L1 L2
let leaf_2 = H::hash(&[2u8; 32]).unwrap();
mt.append(&leaf_2).unwrap();
let h1 = H::hashv(&[&leaf_1, &leaf_2]).unwrap();
let h2 = H::hashv(&[&h1, &H::zero_bytes()[1]]).unwrap();
let h3 = H::hashv(&[&h2, &H::zero_bytes()[2]]).unwrap();
let expected_root = H::hashv(&[&h3, &H::zero_bytes()[3]]).unwrap();
assert_eq!(mt.root(), expected_root);
// The Merkle path of L2 consists of nodes from L2 up to the root.
// In this case: L2, H1, H2, H3.
//
// R
// / \
// *H3* Z[3]
// / \
// *H2* Z[2]
// / \
// *H1* Z[1]
// / \
// L1 *L2*
let expected_merkle_path = &[leaf_2, h1, h2, h3];
let full_merkle_path = mt.get_path_of_leaf(1, true).unwrap();
assert_eq!(full_merkle_path.as_slice(), expected_merkle_path);
let partial_merkle_path = mt.get_path_of_leaf(1, false).unwrap();
assert_eq!(
partial_merkle_path.as_slice(),
&expected_merkle_path[..HEIGHT - canopy_depth]
);
// The Merkle proof consists of siblings of L2 and all its parent
// nodes. In this case, these are: L1, Z[1], Z[2], Z[3].
//
// R
// / \
// H3 *Z[3]*
// / \
// H2 *Z[2]*
// / \
// H1 *Z[1]*
// / \
// *L1* L2
let expected_merkle_proof = &[
leaf_1,
H::zero_bytes()[1],
H::zero_bytes()[2],
H::zero_bytes()[3],
];
let full_merkle_proof = mt.get_proof_of_leaf(1, true).unwrap();
assert_eq!(full_merkle_proof.as_slice(), expected_merkle_proof);
let partial_merkle_proof = mt.get_proof_of_leaf(1, false).unwrap();
assert_eq!(
partial_merkle_proof.as_slice(),
&expected_merkle_proof[..HEIGHT - canopy_depth]
);
// Appending the 3rd leaf alters the next subtree on the right.
// Instead of using Z[1], we will end up with the hash of the new leaf and
// Z[0].
//
// The other subtrees still remain the same.
//
// R
// / \
// H4 Z[3]
// / \
// H3 Z[2]
// / \
// H1 H2
// / \ / \
// L1 L2 L3 Z[0]
let leaf_3 = H::hash(&[3u8; 32]).unwrap();
mt.append(&leaf_3).unwrap();
let h1 = H::hashv(&[&leaf_1, &leaf_2]).unwrap();
let h2 = H::hashv(&[&leaf_3, &H::zero_bytes()[0]]).unwrap();
let h3 = H::hashv(&[&h1, &h2]).unwrap();
let h4 = H::hashv(&[&h3, &H::zero_bytes()[2]]).unwrap();
let expected_root = H::hashv(&[&h4, &H::zero_bytes()[3]]).unwrap();
assert_eq!(mt.root(), expected_root);
// The Merkle path of L3 consists of nodes from L3 up to the root.
// In this case: L3, H2, H3, H4.
//
// R
// / \
// *H4* Z[3]
// / \
// *H3* Z[2]
// / \
// H1 *H2*
// / \ / \
// L1 L2 *L3* Z[0]
let expected_merkle_path = &[leaf_3, h2, h3, h4];
let full_merkle_path = mt.get_path_of_leaf(2, true).unwrap();
assert_eq!(full_merkle_path.as_slice(), expected_merkle_path);
let partial_merkle_path = mt.get_path_of_leaf(2, false).unwrap();
assert_eq!(
partial_merkle_path.as_slice(),
&expected_merkle_path[..HEIGHT - canopy_depth]
);
// The Merkle proof consists of siblings of L2 and all its parent
// nodes. In this case, these are: Z[0], H1, Z[2], Z[3].
//
// R
// / \
// H4 *Z[3]*
// / \
// H3 *Z[2]*
// / \
// *H1* H2
// / \ / \
// L1 L2 L3 *Z[0]*
let expected_merkle_proof = &[
H::zero_bytes()[0],
h1,
H::zero_bytes()[2],
H::zero_bytes()[3],
];
let full_merkle_proof = mt.get_proof_of_leaf(2, true).unwrap();
assert_eq!(full_merkle_proof.as_slice(), expected_merkle_proof);
let partial_merkle_proof = mt.get_proof_of_leaf(2, false).unwrap();
assert_eq!(
partial_merkle_proof.as_slice(),
&expected_merkle_proof[..HEIGHT - canopy_depth]
);
// Appending the 4th leaf alters the next subtree on the right.
// Instead of using Z[1], we will end up with the hash of the new leaf and
// Z[0].
//
// The other subtrees still remain the same.
//
// R
// / \
// H4 Z[3]
// / \
// H3 Z[2]
// / \
// H1 H2
// / \ / \
// L1 L2 L3 L4
let leaf_4 = H::hash(&[4u8; 32]).unwrap();
mt.append(&leaf_4).unwrap();
let h1 = H::hashv(&[&leaf_1, &leaf_2]).unwrap();
let h2 = H::hashv(&[&leaf_3, &leaf_4]).unwrap();
let h3 = H::hashv(&[&h1, &h2]).unwrap();
let h4 = H::hashv(&[&h3, &H::zero_bytes()[2]]).unwrap();
let expected_root = H::hashv(&[&h4, &H::zero_bytes()[3]]).unwrap();
assert_eq!(mt.root(), expected_root);
}
#[test]
fn test_append_keccak_4_0() {
append::<Keccak>(0)
}
#[test]
fn test_append_poseidon_4_0() {
append::<Poseidon>(0)
}
#[test]
fn test_append_sha256_4_0() {
append::<Sha256>(0)
}
#[test]
fn test_append_keccak_4_1() {
append::<Keccak>(1)
}
#[test]
fn test_append_poseidon_4_1() {
append::<Poseidon>(1)
}
#[test]
fn test_append_sha256_4_1() {
append::<Sha256>(1)
}
#[test]
fn test_append_keccak_4_2() {
append::<Keccak>(2)
}
#[test]
fn test_append_poseidon_4_2() {
append::<Poseidon>(2)
}
#[test]
fn test_append_sha256_4_2() {
append::<Sha256>(2)
}
fn update<H>()
where
H: Hasher,
{
const HEIGHT: usize = 4;
const CANOPY: usize = 0;
let mut merkle_tree = MerkleTree::<H>::new(HEIGHT, CANOPY);
let leaf1 = H::hash(&[1u8; 32]).unwrap();
// The hash of our new leaf and its sibling (a zero value).
//
// H1
// / \
// L1 Z[0]
let h1 = H::hashv(&[&leaf1, &H::zero_bytes()[0]]).unwrap();
// The hash of `h1` and its sibling (a subtree represented by `Z[1]`).
//
// H2
// /-/ \-\
// H1 Z[1]
// / \ / \
// L1 Z[0] Z[0] Z[0]
//
// `Z[1]` represents the whole subtree on the right from `h2`. In the next
// examples, we are just going to show empty subtrees instead of the whole
// hierarchy.
let h2 = H::hashv(&[&h1, &H::zero_bytes()[1]]).unwrap();
// The hash of `h3` and its sibling (a subtree represented by `Z[2]`).
//
// H3
// / \
// H2 Z[2]
// / \
// H1 Z[1]
// / \
// L1 Z[0]
let h3 = H::hashv(&[&h2, &H::zero_bytes()[2]]).unwrap();
// The hash of `h4` and its sibling (a subtree represented by `Z[3]`),
// which is the root.
//
// R
// / \
// H3 Z[3]
// / \
// H2 Z[2]
// / \
// H1 Z[1]
// / \
// L1 Z[0]
let expected_root = H::hashv(&[&h3, &H::zero_bytes()[3]]).unwrap();
// The Merkle path is:
// [L1, H1, H2, H3]
let expected_path = BoundedVec::from_array(&[leaf1, h1, h2, h3]);
let expected_proof = BoundedVec::from_array(&[
H::zero_bytes()[0],
H::zero_bytes()[1],
H::zero_bytes()[2],
H::zero_bytes()[3],
]);
merkle_tree.append(&leaf1).unwrap();
assert_eq!(merkle_tree.root(), expected_root);
assert_eq!(
merkle_tree.get_path_of_leaf(0, false).unwrap(),
expected_path
);
assert_eq!(
merkle_tree.get_proof_of_leaf(0, false).unwrap(),
expected_proof
);
// Appending the 2nd leaf should result in recomputing the root due to the
// change of the `h1`, which now is a hash of the two non-zero leafs. So
// when computing all hashes up to the root, we are still going to use
// zero bytes from 1 to 8.
//
// The other subtrees still remain the same.
//
// R
// / \
// H3 Z[3]
// / \
// H2 Z[2]
// / \
// H1 Z[1]
// / \
// L1 L2
let leaf2 = H::hash(&[2u8; 32]).unwrap();
let h1 = H::hashv(&[&leaf1, &leaf2]).unwrap();
let h2 = H::hashv(&[&h1, &H::zero_bytes()[1]]).unwrap();
let h3 = H::hashv(&[&h2, &H::zero_bytes()[2]]).unwrap();
let expected_root = H::hashv(&[&h3, &H::zero_bytes()[3]]).unwrap();
// The Merkle path is:
// [L2, H1, H2, H3]
let expected_path = BoundedVec::from_array(&[leaf2, h1, h2, h3]);
let expected_proof = BoundedVec::from_array(&[
leaf1,
H::zero_bytes()[1],
H::zero_bytes()[2],
H::zero_bytes()[3],
]);
merkle_tree.append(&leaf2).unwrap();
assert_eq!(merkle_tree.root(), expected_root);
assert_eq!(
merkle_tree.get_path_of_leaf(1, false).unwrap(),
expected_path
);
assert_eq!(
merkle_tree.get_proof_of_leaf(1, false).unwrap(),
expected_proof
);
// Appending the 3rd leaf alters the next subtree on the right.
// Instead of using Z[1], we will end up with the hash of the new leaf and
// Z[0].
//
// The other subtrees still remain the same.
//
// R
// / \
// H4 Z[3]
// / \
// H3 Z[2]
// / \
// H1 H2
// / \ / \
// L1 L2 L3 Z[0]
let leaf3 = H::hash(&[3u8; 32]).unwrap();
let h1 = H::hashv(&[&leaf1, &leaf2]).unwrap();
let h2 = H::hashv(&[&leaf3, &H::zero_bytes()[0]]).unwrap();
let h3 = H::hashv(&[&h1, &h2]).unwrap();
let h4 = H::hashv(&[&h3, &H::zero_bytes()[2]]).unwrap();
let expected_root = H::hashv(&[&h4, &H::zero_bytes()[3]]).unwrap();
// The Merkle path is:
// [L3, H2, H3, H4]
let expected_path = BoundedVec::from_array(&[leaf3, h2, h3, h4]);
let expected_proof = BoundedVec::from_array(&[
H::zero_bytes()[0],
h1,
H::zero_bytes()[2],
H::zero_bytes()[3],
]);
merkle_tree.append(&leaf3).unwrap();
assert_eq!(merkle_tree.root(), expected_root);
assert_eq!(
merkle_tree.get_path_of_leaf(2, false).unwrap(),
expected_path
);
assert_eq!(
merkle_tree.get_proof_of_leaf(2, false).unwrap(),
expected_proof
);
// Appending the 4th leaf alters the next subtree on the right.
// Instead of using Z[1], we will end up with the hash of the new leaf and
// Z[0].
//
// The other subtrees still remain the same.
//
// R
// / \
// H4 Z[3]
// / \
// H3 Z[2]
// / \
// H1 H2
// / \ / \
// L1 L2 L3 L4
let leaf4 = H::hash(&[4u8; 32]).unwrap();
let h1 = H::hashv(&[&leaf1, &leaf2]).unwrap();
let h2 = H::hashv(&[&leaf3, &leaf4]).unwrap();
let h3 = H::hashv(&[&h1, &h2]).unwrap();
let h4 = H::hashv(&[&h3, &H::zero_bytes()[2]]).unwrap();
let expected_root = H::hashv(&[&h4, &H::zero_bytes()[3]]).unwrap();
// The Merkle path is:
// [L4, H2, H3, H4]
let expected_path = BoundedVec::from_array(&[leaf4, h2, h3, h4]);
let expected_proof =
BoundedVec::from_array(&[leaf3, h1, H::zero_bytes()[2], H::zero_bytes()[3]]);
merkle_tree.append(&leaf4).unwrap();
assert_eq!(merkle_tree.root(), expected_root);
assert_eq!(
merkle_tree.get_path_of_leaf(3, false).unwrap(),
expected_path
);
assert_eq!(
merkle_tree.get_proof_of_leaf(3, false).unwrap(),
expected_proof
);
// Update `leaf1`.
let new_leaf1 = [9u8; 32];
// Updating L1 affects H1 and all parent hashes up to the root.
//
// R
// / \
// *H4* Z[3]
// / \
// *H3* Z[2]
// / \
// *H1* H2
// / \ / \
// *L1* L2 L3 L4
//
// Merkle proof for the replaced leaf L1 is:
// [L2, H2, Z[2], Z[3]]
//
// Our Merkle tree implementation should be smart enough to fill up the
// proof with zero bytes, so we can skip them and just define the proof as:
// [L2, H2]
merkle_tree.update(&new_leaf1, 0).unwrap();
let h1 = H::hashv(&[&new_leaf1, &leaf2]).unwrap();
let h2 = H::hashv(&[&leaf3, &leaf4]).unwrap();
let h3 = H::hashv(&[&h1, &h2]).unwrap();
let h4 = H::hashv(&[&h3, &H::zero_bytes()[2]]).unwrap();
let expected_root = H::hashv(&[&h4, &H::zero_bytes()[3]]).unwrap();
// The Merkle path is:
// [L1, H1, H3, H4]
let expected_path = BoundedVec::from_array(&[new_leaf1, h1, h3, h4]);
let expected_proof =
BoundedVec::from_array(&[leaf2, h2, H::zero_bytes()[2], H::zero_bytes()[3]]);
assert_eq!(merkle_tree.root(), expected_root);
assert_eq!(
merkle_tree.get_path_of_leaf(0, false).unwrap(),
expected_path
);
assert_eq!(
merkle_tree.get_proof_of_leaf(0, false).unwrap(),
expected_proof
);
// Update `leaf2`.
let new_leaf2 = H::hash(&[8u8; 32]).unwrap();
// Updating L2 affects H1 and all parent hashes up to the root.
//
// R
// / \
// *H4* Z[3]
// / \
// *H3* Z[2]
// / \
// *H1* H2
// / \ / \
// L1 *L2* L3 L4
//
// Merkle proof for the replaced leaf L2 is:
// [L1, H2]
merkle_tree.update(&new_leaf2, 1).unwrap();
let h1 = H::hashv(&[&new_leaf1, &new_leaf2]).unwrap();
let h2 = H::hashv(&[&leaf3, &leaf4]).unwrap();
let h3 = H::hashv(&[&h1, &h2]).unwrap();
let h4 = H::hashv(&[&h3, &H::zero_bytes()[2]]).unwrap();
let expected_root = H::hashv(&[&h4, &H::zero_bytes()[3]]).unwrap();
// The Merkle path is:
// [L2, H1, H3, H4]
let expected_path = BoundedVec::from_array(&[new_leaf2, h1, h3, h4]);
let expected_proof =
BoundedVec::from_array(&[new_leaf1, h2, H::zero_bytes()[2], H::zero_bytes()[3]]);
assert_eq!(merkle_tree.root(), expected_root);
assert_eq!(
merkle_tree.get_path_of_leaf(1, false).unwrap(),
expected_path
);
assert_eq!(
merkle_tree.get_proof_of_leaf(1, false).unwrap(),
expected_proof
);
// Update `leaf3`.
let new_leaf3 = H::hash(&[7u8; 32]).unwrap();
// Updating L3 affects H1 and all parent hashes up to the root.
//
// R
// / \
// *H4* Z[3]
// / \
// *H3* Z[2]
// / \
// H1 *H2*
// / \ / \
// L1 L2 *L3* L4
//
// Merkle proof for the replaced leaf L3 is:
// [L4, H1]
merkle_tree.update(&new_leaf3, 2).unwrap();
let h1 = H::hashv(&[&new_leaf1, &new_leaf2]).unwrap();
let h2 = H::hashv(&[&new_leaf3, &leaf4]).unwrap();
let h3 = H::hashv(&[&h1, &h2]).unwrap();
let h4 = H::hashv(&[&h3, &H::zero_bytes()[2]]).unwrap();
let expected_root = H::hashv(&[&h4, &H::zero_bytes()[3]]).unwrap();
// The Merkle path is:
// [L3, H2, H3, H4]
let expected_path = BoundedVec::from_array(&[new_leaf3, h2, h3, h4]);
let expected_proof =
BoundedVec::from_array(&[leaf4, h1, H::zero_bytes()[2], H::zero_bytes()[3]]);
assert_eq!(merkle_tree.root(), expected_root);
assert_eq!(
merkle_tree.get_path_of_leaf(2, false).unwrap(),
expected_path
);
assert_eq!(
merkle_tree.get_proof_of_leaf(2, false).unwrap(),
expected_proof
);
// Update `leaf4`.
let new_leaf4 = H::hash(&[6u8; 32]).unwrap();
// Updating L4 affects H1 and all parent hashes up to the root.
//
// R
// / \
// *H4* Z[3]
// / \
// *H3* Z[2]
// / \
// H1 *H2*
// / \ / \
// L1 L2 L3 *L4*
//
// Merkle proof for the replaced leaf L4 is:
// [L3, H1]
merkle_tree.update(&new_leaf4, 3).unwrap();
let h1 = H::hashv(&[&new_leaf1, &new_leaf2]).unwrap();
let h2 = H::hashv(&[&new_leaf3, &new_leaf4]).unwrap();
let h3 = H::hashv(&[&h1, &h2]).unwrap();
let h4 = H::hashv(&[&h3, &H::zero_bytes()[2]]).unwrap();
let expected_root = H::hashv(&[&h4, &H::zero_bytes()[3]]).unwrap();
// The Merkle path is:
// [L4, H2, H3, H4]
let expected_path = BoundedVec::from_array(&[new_leaf4, h2, h3, h4]);
let expected_proof =
BoundedVec::from_array(&[new_leaf3, h1, H::zero_bytes()[2], H::zero_bytes()[3]]);
assert_eq!(merkle_tree.root(), expected_root);
assert_eq!(
merkle_tree.get_path_of_leaf(3, false).unwrap(),
expected_path
);
assert_eq!(
merkle_tree.get_proof_of_leaf(3, false).unwrap(),
expected_proof
);
}
#[test]
fn test_update_keccak() {
update::<Keccak>()
}
#[test]
fn test_update_poseidon() {
update::<Poseidon>()
}
#[test]
fn test_update_sha256() {
update::<Sha256>()
}
#[test]
fn test_sequence_number() {
let mut merkle_tree = MerkleTree::<Poseidon>::new(4, 0);
assert_eq!(merkle_tree.sequence_number, 0);
let leaf1 = Poseidon::hash(&[1u8; 32]).unwrap();
merkle_tree.append(&leaf1).unwrap();
assert_eq!(merkle_tree.sequence_number, 1);
let leaf2 = Poseidon::hash(&[2u8; 32]).unwrap();
merkle_tree.update(&leaf2, 0).unwrap();
assert_eq!(merkle_tree.sequence_number, 2);
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/reference
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/reference/src/lib.rs
|
pub mod sparse_merkle_tree;
use std::marker::PhantomData;
use light_bounded_vec::{BoundedVec, BoundedVecError};
use light_hasher::{errors::HasherError, Hasher};
use thiserror::Error;
#[derive(Debug, Error)]
pub enum ReferenceMerkleTreeError {
#[error("Leaf {0} does not exist")]
LeafDoesNotExist(usize),
#[error("Hasher error: {0}")]
Hasher(#[from] HasherError),
#[error("Invalid proof length provided: {0} required {1}")]
InvalidProofLength(usize, usize),
}
#[derive(Debug, Clone)]
pub struct MerkleTree<H>
where
H: Hasher,
{
pub height: usize,
pub capacity: usize,
pub canopy_depth: usize,
pub layers: Vec<Vec<[u8; 32]>>,
pub roots: Vec<[u8; 32]>,
pub rightmost_index: usize,
pub sequence_number: usize,
_hasher: PhantomData<H>,
}
impl<H> MerkleTree<H>
where
H: Hasher,
{
pub fn new(height: usize, canopy_depth: usize) -> Self {
Self {
height,
capacity: 1 << height,
canopy_depth,
layers: vec![Vec::new(); height],
roots: vec![H::zero_bytes()[height]],
rightmost_index: 0,
sequence_number: 0,
_hasher: PhantomData,
}
}
/// Number of nodes to include in canopy, based on `canopy_depth`.
pub fn canopy_size(&self) -> usize {
(1 << (self.canopy_depth + 1)) - 2
}
fn update_upper_layers(&mut self, mut i: usize) -> Result<(), HasherError> {
for level in 1..self.height {
i /= 2;
let left_index = i * 2;
let right_index = i * 2 + 1;
let left_child = self.layers[level - 1]
.get(left_index)
.cloned()
.unwrap_or(H::zero_bytes()[level - 1]);
let right_child = self.layers[level - 1]
.get(right_index)
.cloned()
.unwrap_or(H::zero_bytes()[level - 1]);
let node = H::hashv(&[&left_child[..], &right_child[..]])?;
if self.layers[level].len() > i {
// A node already exists and we are overwriting it.
self.layers[level][i] = node;
} else {
// A node didn't exist before.
self.layers[level].push(node);
}
}
let left_child = &self.layers[self.height - 1]
.first()
.cloned()
.unwrap_or(H::zero_bytes()[self.height - 1]);
let right_child = &self.layers[self.height - 1]
.get(1)
.cloned()
.unwrap_or(H::zero_bytes()[self.height - 1]);
let root = H::hashv(&[&left_child[..], &right_child[..]])?;
self.roots.push(root);
Ok(())
}
pub fn append(&mut self, leaf: &[u8; 32]) -> Result<(), HasherError> {
self.layers[0].push(*leaf);
let i = self.rightmost_index;
self.rightmost_index += 1;
self.update_upper_layers(i)?;
self.sequence_number += 1;
Ok(())
}
pub fn append_batch(&mut self, leaves: &[&[u8; 32]]) -> Result<(), HasherError> {
for leaf in leaves {
self.append(leaf)?;
}
Ok(())
}
pub fn update(
&mut self,
leaf: &[u8; 32],
leaf_index: usize,
) -> Result<(), ReferenceMerkleTreeError> {
*self.layers[0]
.get_mut(leaf_index)
.ok_or(ReferenceMerkleTreeError::LeafDoesNotExist(leaf_index))? = *leaf;
self.update_upper_layers(leaf_index)?;
self.sequence_number += 1;
Ok(())
}
pub fn root(&self) -> [u8; 32] {
// PANICS: We always initialize the Merkle tree with a
// root (from zero bytes), so the following should never
// panic.
self.roots.last().cloned().unwrap()
}
pub fn get_path_of_leaf(
&self,
mut index: usize,
full: bool,
) -> Result<BoundedVec<[u8; 32]>, BoundedVecError> {
let mut path = BoundedVec::with_capacity(self.height);
let limit = match full {
true => self.height,
false => self.height - self.canopy_depth,
};
for level in 0..limit {
let node = self.layers[level]
.get(index)
.cloned()
.unwrap_or(H::zero_bytes()[level]);
path.push(node)?;
index /= 2;
}
Ok(path)
}
pub fn get_proof_of_leaf(
&self,
mut index: usize,
full: bool,
) -> Result<BoundedVec<[u8; 32]>, BoundedVecError> {
let mut proof = BoundedVec::with_capacity(self.height);
let limit = match full {
true => self.height,
false => self.height - self.canopy_depth,
};
for level in 0..limit {
let is_left = index % 2 == 0;
let sibling_index = if is_left { index + 1 } else { index - 1 };
let node = self.layers[level]
.get(sibling_index)
.cloned()
.unwrap_or(H::zero_bytes()[level]);
proof.push(node)?;
index /= 2;
}
Ok(proof)
}
pub fn get_canopy(&self) -> Result<BoundedVec<[u8; 32]>, BoundedVecError> {
if self.canopy_depth == 0 {
return Ok(BoundedVec::with_capacity(0));
}
let mut canopy = BoundedVec::with_capacity(self.canopy_size());
let mut num_nodes_in_level = 2;
for i in 0..self.canopy_depth {
let level = self.height - 1 - i;
for j in 0..num_nodes_in_level {
let node = self.layers[level]
.get(j)
.cloned()
.unwrap_or(H::zero_bytes()[level]);
canopy.push(node)?;
}
num_nodes_in_level *= 2;
}
Ok(canopy)
}
pub fn get_leaf(&self, leaf_index: usize) -> [u8; 32] {
self.layers[0]
.get(leaf_index)
.cloned()
.unwrap_or(H::zero_bytes()[0])
}
pub fn get_leaf_index(&self, leaf: &[u8; 32]) -> Option<usize> {
self.layers[0].iter().position(|node| node == leaf)
}
pub fn leaves(&self) -> &[[u8; 32]] {
self.layers[0].as_slice()
}
pub fn verify(
&self,
leaf: &[u8; 32],
proof: &BoundedVec<[u8; 32]>,
leaf_index: usize,
) -> Result<bool, ReferenceMerkleTreeError> {
if leaf_index >= self.capacity {
return Err(ReferenceMerkleTreeError::LeafDoesNotExist(leaf_index));
}
if proof.len() != self.height {
return Err(ReferenceMerkleTreeError::InvalidProofLength(
proof.len(),
self.height,
));
}
let mut computed_hash = *leaf;
let mut current_index = leaf_index;
for sibling_hash in proof.iter() {
let is_left = current_index % 2 == 0;
let hashes = if is_left {
[&computed_hash[..], &sibling_hash[..]]
} else {
[&sibling_hash[..], &computed_hash[..]]
};
computed_hash = H::hashv(&hashes)?;
// Move to the parent index for the next iteration
current_index /= 2;
}
// Compare the computed hash to the last known root
Ok(computed_hash == self.root())
}
/// Returns the filled subtrees of the Merkle tree.
/// Subtrees are the rightmost left node of each level.
/// Subtrees can be used for efficient append operations.
pub fn get_subtrees(&self) -> Vec<[u8; 32]> {
let mut subtrees = H::zero_bytes()[0..self.height].to_vec();
if self.layers.last().and_then(|layer| layer.first()).is_some() {
for level in (0..self.height).rev() {
if let Some(left_child) = self.layers.get(level).and_then(|layer| {
if layer.len() % 2 == 0 {
layer.get(layer.len() - 2)
} else {
layer.last()
}
}) {
subtrees[level] = *left_child;
}
}
}
subtrees
}
}
#[cfg(test)]
mod tests {
use light_hasher::{zero_bytes::poseidon::ZERO_BYTES, Poseidon};
use super::*;
const TREE_AFTER_1_UPDATE: [[u8; 32]; 4] = [
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1,
],
[
0, 122, 243, 70, 226, 211, 4, 39, 158, 121, 224, 169, 243, 2, 63, 119, 18, 148, 167,
138, 203, 112, 231, 63, 144, 175, 226, 124, 173, 64, 30, 129,
],
[
4, 163, 62, 195, 162, 201, 237, 49, 131, 153, 66, 155, 106, 112, 192, 40, 76, 131, 230,
239, 224, 130, 106, 36, 128, 57, 172, 107, 60, 247, 103, 194,
],
[
7, 118, 172, 114, 242, 52, 137, 62, 111, 106, 113, 139, 123, 161, 39, 255, 86, 13, 105,
167, 223, 52, 15, 29, 137, 37, 106, 178, 49, 44, 226, 75,
],
];
const TREE_AFTER_2_UPDATES: [[u8; 32]; 4] = [
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 2,
],
[
0, 122, 243, 70, 226, 211, 4, 39, 158, 121, 224, 169, 243, 2, 63, 119, 18, 148, 167,
138, 203, 112, 231, 63, 144, 175, 226, 124, 173, 64, 30, 129,
],
[
18, 102, 129, 25, 152, 42, 192, 218, 100, 215, 169, 202, 77, 24, 100, 133, 45, 152, 17,
121, 103, 9, 187, 226, 182, 36, 35, 35, 126, 255, 244, 140,
],
[
11, 230, 92, 56, 65, 91, 231, 137, 40, 92, 11, 193, 90, 225, 123, 79, 82, 17, 212, 147,
43, 41, 126, 223, 49, 2, 139, 211, 249, 138, 7, 12,
],
];
#[test]
fn test_subtrees() {
let tree_depth = 4;
let mut tree = MerkleTree::<Poseidon>::new(tree_depth, 0); // Replace TestHasher with your specific hasher.
let subtrees = tree.get_subtrees();
for (i, subtree) in subtrees.iter().enumerate() {
assert_eq!(*subtree, ZERO_BYTES[i]);
}
let mut leaf_0: [u8; 32] = [0; 32];
leaf_0[31] = 1;
tree.append(&leaf_0).unwrap();
tree.append(&leaf_0).unwrap();
let subtrees = tree.get_subtrees();
for (i, subtree) in subtrees.iter().enumerate() {
assert_eq!(*subtree, TREE_AFTER_1_UPDATE[i]);
}
let mut leaf_1: [u8; 32] = [0; 32];
leaf_1[31] = 2;
tree.append(&leaf_1).unwrap();
tree.append(&leaf_1).unwrap();
let subtrees = tree.get_subtrees();
for (i, subtree) in subtrees.iter().enumerate() {
assert_eq!(*subtree, TREE_AFTER_2_UPDATES[i]);
}
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/reference
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/reference/src/sparse_merkle_tree.rs
|
use light_hasher::Hasher;
use num_bigint::BigUint;
use std::marker::PhantomData;
#[derive(Clone, Debug)]
pub struct SparseMerkleTree<H: Hasher, const HEIGHT: usize> {
subtrees: [[u8; 32]; HEIGHT],
next_index: usize,
root: [u8; 32],
_hasher: PhantomData<H>,
}
impl<H, const HEIGHT: usize> SparseMerkleTree<H, HEIGHT>
where
H: Hasher,
{
pub fn new(subtrees: [[u8; 32]; HEIGHT], next_index: usize) -> Self {
Self {
subtrees,
next_index,
root: [0u8; 32],
_hasher: PhantomData,
}
}
pub fn new_empty() -> Self {
Self {
subtrees: H::zero_bytes()[0..HEIGHT].try_into().unwrap(),
next_index: 0,
root: H::zero_bytes()[HEIGHT],
_hasher: PhantomData,
}
}
pub fn append(&mut self, leaf: [u8; 32]) -> [[u8; 32]; HEIGHT] {
let mut current_index = self.next_index;
let mut current_level_hash = leaf;
let mut left;
let mut right;
let mut proof: [[u8; 32]; HEIGHT] = [[0u8; 32]; HEIGHT];
for (i, (subtree, zero_byte)) in self
.subtrees
.iter_mut()
.zip(H::zero_bytes().iter())
.enumerate()
{
if current_index % 2 == 0 {
left = current_level_hash;
right = *zero_byte;
*subtree = current_level_hash;
proof[i] = right;
} else {
left = *subtree;
right = current_level_hash;
proof[i] = left;
}
current_level_hash = H::hashv(&[&left, &right]).unwrap();
current_index /= 2;
}
self.root = current_level_hash;
self.next_index += 1;
proof
}
pub fn root(&self) -> [u8; 32] {
self.root
}
pub fn get_subtrees(&self) -> [[u8; 32]; HEIGHT] {
self.subtrees
}
pub fn get_height(&self) -> usize {
HEIGHT
}
pub fn get_next_index(&self) -> usize {
self.next_index
}
}
pub fn arr_to_string(arr: [u8; 32]) -> String {
format!("0x{}", BigUint::from_bytes_be(&arr).to_str_radix(16))
}
#[cfg(test)]
mod test {
use crate::MerkleTree;
use super::*;
use light_hasher::Poseidon;
#[test]
fn test_sparse_merkle_tree() {
let height = 4;
let mut merkle_tree = SparseMerkleTree::<Poseidon, 4>::new_empty();
let mut reference_merkle_tree = MerkleTree::<Poseidon>::new(height, 0);
let leaf = [1u8; 32];
merkle_tree.append(leaf);
reference_merkle_tree.append(&leaf).unwrap();
assert_eq!(merkle_tree.root(), reference_merkle_tree.root());
let subtrees = merkle_tree.get_subtrees();
let reference_subtrees = reference_merkle_tree.get_subtrees();
assert_eq!(subtrees.to_vec(), reference_subtrees);
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/rollup.config.js
|
import typescript from '@rollup/plugin-typescript';
import nodePolyfills from 'rollup-plugin-polyfill-node';
import dts from 'rollup-plugin-dts';
import resolve from '@rollup/plugin-node-resolve';
import commonjs from '@rollup/plugin-commonjs';
const rolls = (fmt, env) => ({
input: 'src/index.ts',
output: {
dir: `dist/${fmt}/${env}`,
format: fmt,
entryFileNames: `[name].${fmt === 'cjs' ? 'cjs' : 'js'}`,
sourcemap: true,
},
external: [
'@coral-xyz/anchor',
'@solana/web3.js',
'@noble/hashes',
'buffer',
'superstruct',
'tweetnacl',
],
plugins: [
typescript({
target: fmt === 'es' ? 'ES2022' : 'ES2017',
outDir: `dist/${fmt}/${env}`,
rootDir: 'src',
}),
commonjs(),
resolve({
browser: env === 'browser',
preferBuiltins: env === 'node',
}),
env === 'browser' ? nodePolyfills() : undefined,
].filter(Boolean),
onwarn(warning, warn) {
if (warning.code !== 'CIRCULAR_DEPENDENCY') {
warn(warning);
}
},
});
const typesConfig = {
input: 'src/index.ts',
output: [{ file: 'dist/types/index.d.ts', format: 'es' }],
plugins: [dts()],
};
export default [
rolls('cjs', 'browser'),
rolls('es', 'browser'),
rolls('cjs', 'node'),
typesConfig,
];
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tsconfig.test.json
|
{
"compilerOptions": {
"esModuleInterop": true,
"rootDirs": ["src", "tests"]
},
"extends": "./tsconfig.json",
"include": ["./tests/**/*.ts", "vitest.config.ts"]
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/package.json
|
{
"name": "@lightprotocol/stateless.js",
"version": "0.16.0",
"description": "JavaScript API for Light and ZK Compression",
"sideEffects": false,
"main": "dist/cjs/node/index.cjs",
"type": "module",
"exports": {
".": {
"require": "./dist/cjs/node/index.cjs",
"types": "./dist/types/index.d.ts",
"default": "./dist/cjs/node/index.cjs"
},
"./browser": {
"import": "./dist/es/browser/index.js",
"require": "./dist/cjs/browser/index.cjs",
"types": "./dist/types/index.d.ts"
}
},
"types": "./dist/types/index.d.ts",
"files": [
"dist"
],
"keywords": [
"zk",
"compression",
"stateless",
"solana"
],
"maintainers": [
{
"name": "Light Protocol Maintainers",
"email": "friends@lightprotocol.com"
}
],
"license": "Apache-2.0",
"dependencies": {
"@coral-xyz/anchor": "0.29.0",
"@solana/web3.js": "1.95.3",
"@noble/hashes": "1.5.0",
"buffer": "6.0.3",
"superstruct": "2.0.2",
"tweetnacl": "1.0.3"
},
"devDependencies": {
"@lightprotocol/hasher.rs": "workspace:*",
"@esbuild-plugins/node-globals-polyfill": "^0.2.3",
"@lightprotocol/programs": "workspace:*",
"@playwright/test": "^1.47.1",
"@rollup/plugin-babel": "^6.0.4",
"@rollup/plugin-commonjs": "^26.0.1",
"@rollup/plugin-json": "^6.1.0",
"@rollup/plugin-node-resolve": "^15.2.3",
"@rollup/plugin-replace": "^5.0.7",
"@rollup/plugin-terser": "^0.4.4",
"@rollup/plugin-typescript": "^11.1.6",
"@types/bn.js": "^5.1.5",
"@types/node": "^22.5.5",
"@typescript-eslint/eslint-plugin": "^7.13.1",
"@typescript-eslint/parser": "^7.13.1",
"eslint": "^8.56.0",
"eslint-plugin-n": "^17.10.2",
"eslint-plugin-promise": "^7.1.0",
"eslint-plugin-vitest": "^0.5.4",
"http-server": "^14.1.1",
"playwright": "^1.47.1",
"prettier": "^3.3.3",
"rimraf": "^6.0.1",
"rollup": "^4.21.3",
"rollup-plugin-dts": "^6.1.1",
"rollup-plugin-polyfill-node": "^0.13.0",
"ts-node": "^10.9.2",
"tslib": "^2.7.0",
"typescript": "^5.6.2",
"vitest": "^2.1.1"
},
"scripts": {
"test": "pnpm test:unit:all && pnpm test:e2e:all",
"test-all": "vitest run",
"test:unit:all": "vitest run tests/unit",
"test-validator": "./../../cli/test_bin/run test-validator --prover-run-mode rpc",
"test:e2e:transfer": "pnpm test-validator && vitest run tests/e2e/transfer.test.ts --reporter=verbose",
"test:e2e:compress": "pnpm test-validator && vitest run tests/e2e/compress.test.ts --reporter=verbose",
"test:e2e:test-rpc": "pnpm test-validator && vitest run tests/e2e/test-rpc.test.ts",
"test:e2e:rpc-interop": "pnpm test-validator && vitest run tests/e2e/rpc-interop.test.ts",
"test:e2e:browser": "pnpm playwright test",
"test:e2e:all": "pnpm test-validator && vitest run tests/e2e/test-rpc.test.ts && vitest run tests/e2e/compress.test.ts && vitest run tests/e2e/transfer.test.ts && vitest run tests/e2e/rpc-interop.test.ts",
"test:index": "vitest run tests/e2e/program.test.ts",
"test:e2e:serde": "vitest run tests/e2e/serde.test.ts",
"test:verbose": "vitest run --reporter=verbose",
"test:testnet": "vitest run tests/e2e/testnet.test.ts --reporter=verbose",
"pull-idls": "../../scripts/push-stateless-js-idls.sh && ../../scripts/push-compressed-token-idl.sh",
"build": "rimraf dist && pnpm pull-idls && pnpm build:bundle",
"build:bundle": "rollup -c",
"format": "prettier --write .",
"lint": "eslint ."
},
"nx": {
"targets": {
"build": {
"inputs": [
"{workspaceRoot}/cli",
"{workspaceRoot}/target/idl",
"{workspaceRoot}/target/types"
]
}
}
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/.prettierrc
|
{
"semi": true,
"trailingComma": "all",
"singleQuote": true,
"printWidth": 80,
"useTabs": false,
"tabWidth": 4,
"bracketSpacing": true,
"arrowParens": "avoid"
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tsconfig.json
|
{
"$schema": "https://json.schemastore.org/tsconfig",
"compilerOptions": {
"importHelpers": true,
"outDir": "./dist",
"esModuleInterop": true,
"allowSyntheticDefaultImports": true,
"strict": true,
"declaration": true,
"target": "ESNext",
"module": "ESNext",
"moduleResolution": "Node",
"lib": ["ESNext", "DOM"],
"types": ["node"],
"skipLibCheck": false
},
"include": ["./src/**/*.ts", "playwright.config.ts", "rollup.config.js"]
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/.eslintignore
|
node_modules
lib
dist
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/playwright.config.ts
|
import { PlaywrightTestConfig } from '@playwright/test';
const config: PlaywrightTestConfig = {
webServer: {
command: 'npx http-server -p 4004 -c-1',
port: 4004,
cwd: '.',
timeout: 15 * 1000,
},
testDir: './tests/e2e/browser',
};
export default config;
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/vitest.config.ts
|
import { defineConfig } from 'vitest/config';
export default defineConfig({
test: {
include: ['tests/**/*.test.ts'],
exclude: process.env.EXCLUDE_E2E ? ['tests/e2e/**'] : [],
testTimeout: 30000,
},
});
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/.eslintrc.json
|
{
"root": true,
"ignorePatterns": ["node_modules", "lib"],
"parser": "@typescript-eslint/parser",
"parserOptions": {
"project": ["./tsconfig.json", "./tsconfig.test.json"]
},
"plugins": ["@typescript-eslint", "vitest"],
"extends": [
"eslint:recommended",
"plugin:@typescript-eslint/eslint-recommended",
"plugin:@typescript-eslint/recommended"
],
"rules": {
"@typescript-eslint/ban-ts-comment": 0,
"@typescript-eslint/no-explicit-any": 0,
"@typescript-eslint/no-var-requires": 0,
"@typescript-eslint/no-unused-vars": 0,
"no-prototype-builtins": 0
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/unit
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/unit/merkle-tree/merkle-tree.test.ts
|
import { IndexedArray } from '../../../src/test-helpers/merkle-tree/indexed-array';
import { beforeAll, describe, expect, it } from 'vitest';
import { IndexedElement } from '../../../src/test-helpers/merkle-tree/indexed-array';
import { HIGHEST_ADDRESS_PLUS_ONE } from '../../../src/constants';
import { bn } from '../../../src/state';
import { MerkleTree } from '../../../src/test-helpers/merkle-tree';
describe('MerkleTree', () => {
let WasmFactory: any;
const refIndexedMerkleTreeInitedRoot = [
33, 133, 56, 184, 142, 166, 110, 161, 4, 140, 169, 247, 115, 33, 15,
181, 76, 89, 48, 126, 58, 86, 204, 81, 16, 121, 185, 77, 75, 152, 43,
15,
];
const refIndexedMerkleTreeRootWithOneAppend = [
31, 159, 196, 171, 68, 16, 213, 28, 158, 200, 223, 91, 244, 193, 188,
162, 50, 68, 54, 244, 116, 44, 153, 65, 209, 9, 47, 98, 126, 89, 131,
158,
];
const refIndexedMerkleTreeRootWithTwoAppends = [
1, 185, 99, 233, 59, 202, 51, 222, 224, 31, 119, 180, 76, 104, 72, 27,
152, 12, 236, 78, 81, 60, 87, 158, 237, 1, 176, 9, 155, 166, 108, 89,
];
const refIndexedMerkleTreeRootWithThreeAppends = [
41, 143, 181, 2, 66, 117, 37, 226, 134, 212, 45, 95, 114, 60, 189, 18,
44, 155, 132, 148, 41, 54, 131, 106, 61, 120, 237, 168, 118, 198, 63,
116,
];
const refIndexedArrayElem0 = new IndexedElement(0, bn(0), 2);
const refIndexedArrayElem1 = new IndexedElement(
1,
HIGHEST_ADDRESS_PLUS_ONE,
0,
);
const refIndexedArrayElem2 = new IndexedElement(2, bn(30), 1);
describe('IndexedArray', () => {
beforeAll(async () => {
WasmFactory = (await import('@lightprotocol/hasher.rs'))
.WasmFactory;
});
it('should findLowElementIndex', () => {
const indexedArray = new IndexedArray(
[
refIndexedArrayElem0,
refIndexedArrayElem1,
refIndexedArrayElem2,
],
2,
1,
);
expect(indexedArray.findLowElementIndex(bn(29))).toEqual(0);
expect(() => indexedArray.findLowElementIndex(bn(30))).toThrow();
expect(indexedArray.findLowElementIndex(bn(31))).toEqual(2);
});
it('should findLowElement', () => {
const indexedArray = new IndexedArray(
[
refIndexedArrayElem0,
refIndexedArrayElem1,
refIndexedArrayElem2,
],
2,
1,
);
const [lowElement, nextValue] = indexedArray.findLowElement(bn(29));
expect(lowElement).toEqual(refIndexedArrayElem0);
expect(nextValue).toEqual(bn(30));
expect(() => indexedArray.findLowElement(bn(30))).toThrow();
const [lowElement2, nextValue2] = indexedArray.findLowElement(
bn(31),
);
expect(lowElement2).toEqual(refIndexedArrayElem2);
expect(nextValue2).toEqual(HIGHEST_ADDRESS_PLUS_ONE);
});
it('should appendWithLowElementIndex', () => {
const indexedArray = new IndexedArray(
[
new IndexedElement(0, bn(0), 1),
new IndexedElement(1, HIGHEST_ADDRESS_PLUS_ONE, 0),
],
1,
1,
);
const newElement = indexedArray.appendWithLowElementIndex(
0,
bn(30),
);
expect(newElement.newElement).toEqual(refIndexedArrayElem2);
expect(newElement.newLowElement).toEqual(refIndexedArrayElem0);
expect(newElement.newElementNextValue).toEqual(
HIGHEST_ADDRESS_PLUS_ONE,
);
});
it('should append', () => {
const indexedArray = new IndexedArray(
[
new IndexedElement(0, bn(0), 1),
new IndexedElement(1, HIGHEST_ADDRESS_PLUS_ONE, 0),
],
1,
1,
);
const newElement = indexedArray.append(bn(30));
expect(newElement.newElement).toEqual(refIndexedArrayElem2);
expect(newElement.newLowElement).toEqual(refIndexedArrayElem0);
expect(newElement.newElementNextValue).toEqual(
HIGHEST_ADDRESS_PLUS_ONE,
);
});
it('should append 3 times and match merkle trees', async () => {
const lightWasm = await WasmFactory.getInstance();
const indexedArray = IndexedArray.default();
indexedArray.init();
let hash0 = indexedArray.hashElement(lightWasm, 0);
let hash1 = indexedArray.hashElement(lightWasm, 1);
let leaves = [hash0, hash1].map(leaf => bn(leaf!).toString());
let tree = new MerkleTree(26, lightWasm, leaves);
expect(tree.root()).toEqual(
bn(refIndexedMerkleTreeInitedRoot).toString(),
);
// 1st
const newElement = indexedArray.append(bn(30));
expect(newElement.newElement).toEqual(refIndexedArrayElem2);
expect(newElement.newLowElement).toEqual(refIndexedArrayElem0);
expect(newElement.newElementNextValue).toEqual(
HIGHEST_ADDRESS_PLUS_ONE,
);
hash0 = indexedArray.hashElement(lightWasm, 0);
hash1 = indexedArray.hashElement(lightWasm, 1);
let hash2 = indexedArray.hashElement(lightWasm, 2);
leaves = [hash0, hash1, hash2].map(leaf => bn(leaf!).toString());
tree = new MerkleTree(26, lightWasm, leaves);
expect(tree.root()).toEqual(
bn(refIndexedMerkleTreeRootWithOneAppend).toString(),
);
// 2nd
let refItems0 = new IndexedElement(0, bn(0), 2);
let refItems1 = new IndexedElement(1, HIGHEST_ADDRESS_PLUS_ONE, 0);
let refItems2 = new IndexedElement(2, bn(30), 3);
let refItems3 = new IndexedElement(3, bn(42), 1);
const newElement2 = indexedArray.append(bn(42));
expect(newElement2.newElement).toEqual(refItems3);
expect(newElement2.newLowElement).toEqual(refItems2);
expect(newElement2.newElementNextValue).toEqual(
HIGHEST_ADDRESS_PLUS_ONE,
);
expect(indexedArray.elements[0].equals(refItems0)).toBeTruthy();
expect(indexedArray.elements[1].equals(refItems1)).toBeTruthy();
expect(indexedArray.elements[2].equals(refItems2)).toBeTruthy();
expect(indexedArray.elements[3].equals(refItems3)).toBeTruthy();
hash0 = indexedArray.hashElement(lightWasm, 0);
hash1 = indexedArray.hashElement(lightWasm, 1);
hash2 = indexedArray.hashElement(lightWasm, 2);
let hash3 = indexedArray.hashElement(lightWasm, 3);
leaves = [hash0, hash1, hash2, hash3].map(leaf =>
bn(leaf!).toString(),
);
tree = new MerkleTree(26, lightWasm, leaves);
expect(tree.root()).toEqual(
bn(refIndexedMerkleTreeRootWithTwoAppends).toString(),
);
// 3rd
refItems0 = new IndexedElement(0, bn(0), 4);
refItems1 = new IndexedElement(1, HIGHEST_ADDRESS_PLUS_ONE, 0);
refItems2 = new IndexedElement(2, bn(30), 3);
refItems3 = new IndexedElement(3, bn(42), 1);
const refItems4 = new IndexedElement(4, bn(12), 2);
const newElement3 = indexedArray.append(bn(12));
expect(newElement3.newElement).toEqual(refItems4);
expect(newElement3.newLowElement).toEqual(refItems0);
expect(newElement3.newElementNextValue).toEqual(bn(30));
expect(indexedArray.elements[0].equals(refItems0)).toBeTruthy();
expect(indexedArray.elements[1].equals(refItems1)).toBeTruthy();
expect(indexedArray.elements[2].equals(refItems2)).toBeTruthy();
expect(indexedArray.elements[3].equals(refItems3)).toBeTruthy();
expect(indexedArray.elements[4].equals(refItems4)).toBeTruthy();
hash0 = indexedArray.hashElement(lightWasm, 0);
hash1 = indexedArray.hashElement(lightWasm, 1);
hash2 = indexedArray.hashElement(lightWasm, 2);
hash3 = indexedArray.hashElement(lightWasm, 3);
const hash4 = indexedArray.hashElement(lightWasm, 4);
leaves = [hash0, hash1, hash2, hash3, hash4].map(leaf =>
bn(leaf!).toString(),
);
tree = new MerkleTree(26, lightWasm, leaves);
expect(tree.root()).toEqual(
bn(refIndexedMerkleTreeRootWithThreeAppends).toString(),
);
});
});
});
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/unit
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/unit/utils/conversion.test.ts
|
import { describe, it, expect } from 'vitest';
import { toArray } from '../../../src/utils/conversion';
import { calculateComputeUnitPrice } from '../../../src/utils';
describe('toArray', () => {
it('should return same array if array is passed', () => {
const arr = [1, 2, 3];
expect(toArray(arr)).toBe(arr);
});
it('should wrap non-array in array', () => {
const value = 42;
expect(toArray(value)).toEqual([42]);
});
describe('calculateComputeUnitPrice', () => {
it('calculates correct price', () => {
expect(calculateComputeUnitPrice(1000, 200000)).toBe(5000); // 1000 lamports / 200k CU = 5000 microlamports/CU
expect(calculateComputeUnitPrice(100, 50000)).toBe(2000); // 100 lamports / 50k CU = 2000 microlamports/CU
expect(calculateComputeUnitPrice(1, 1000000)).toBe(1); // 1 lamport / 1M CU = 1 microlamport/CU
});
});
});
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/unit
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/unit/utils/validation.test.ts
|
import { describe, it, expect } from 'vitest';
import { validateSufficientBalance } from '../../../src/utils/validation';
import { bn } from '../../../src/state';
describe('validateSufficientBalance', () => {
it('should not throw error for positive balance', () => {
expect(() => validateSufficientBalance(bn(100))).not.toThrow();
});
it('should not throw error for zero balance', () => {
expect(() => validateSufficientBalance(bn(0))).not.toThrow();
});
it('should throw error for negative balance', () => {
expect(() => validateSufficientBalance(bn(-1))).toThrow();
});
});
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/unit
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/unit/state/compressed-account.test.ts
|
import { describe, it, expect } from 'vitest';
import {
createCompressedAccount,
createCompressedAccountWithMerkleContext,
createMerkleContext,
} from '../../../src/state/compressed-account';
import { PublicKey } from '@solana/web3.js';
import { bn } from '../../../src/state';
describe('createCompressedAccount function', () => {
it('should create a compressed account with default values', () => {
const owner = PublicKey.unique();
const account = createCompressedAccount(owner);
expect(account).toEqual({
owner,
lamports: bn(0),
address: null,
data: null,
});
});
it('should create a compressed account with provided values', () => {
const owner = PublicKey.unique();
const lamports = bn(100);
const data = {
discriminator: [0],
data: Buffer.from(new Uint8Array([1, 2, 3])),
dataHash: [0],
};
const address = Array.from(PublicKey.unique().toBytes());
const account = createCompressedAccount(owner, lamports, data, address);
expect(account).toEqual({
owner,
lamports,
address,
data,
});
});
});
describe('createCompressedAccountWithMerkleContext function', () => {
it('should create a compressed account with merkle context', () => {
const owner = PublicKey.unique();
const merkleTree = PublicKey.unique();
const nullifierQueue = PublicKey.unique();
const hash = new Array(32).fill(1);
const leafIndex = 0;
const merkleContext = createMerkleContext(
merkleTree,
nullifierQueue,
hash,
leafIndex,
);
const accountWithMerkleContext =
createCompressedAccountWithMerkleContext(merkleContext, owner);
expect(accountWithMerkleContext).toEqual({
owner,
lamports: bn(0),
address: null,
data: null,
merkleTree,
nullifierQueue,
hash,
leafIndex,
readOnly: false,
});
});
});
describe('createMerkleContext function', () => {
it('should create a merkle context', () => {
const merkleTree = PublicKey.unique();
const nullifierQueue = PublicKey.unique();
const hash = new Array(32).fill(1);
const leafIndex = 0;
const merkleContext = createMerkleContext(
merkleTree,
nullifierQueue,
hash,
leafIndex,
);
expect(merkleContext).toEqual({
merkleTree,
nullifierQueue,
hash,
leafIndex,
});
});
});
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/unit
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/unit/state/bn254.test.ts
|
import { describe, it, expect } from 'vitest';
import { createBN254, encodeBN254toBase58 } from '../../../src/state/BN254';
import { bn } from '../../../src/state';
import { PublicKey } from '@solana/web3.js';
import { FIELD_SIZE } from '../../../src/constants';
describe('createBN254 function', () => {
it('should create a BN254 from a string', () => {
const bigint = createBN254('100');
expect(bigint.toNumber()).toBe(100);
});
it('should create a BN254 from a number', () => {
const bigint = createBN254(100);
expect(bigint.toNumber()).toBe(100);
});
it('should create a BN254 from a bigint', () => {
const bigint = createBN254(bn(100));
expect(bigint.toNumber()).toBe(100);
});
it('should create a BN254 from a Buffer', () => {
const bigint = createBN254(Buffer.from([100]));
expect(bigint.toNumber()).toBe(100);
});
it('should create a BN254 from a Uint8Array', () => {
const bigint = createBN254(new Uint8Array([100]));
expect(bigint.toNumber()).toBe(100);
});
it('should create a BN254 from a number[]', () => {
const bigint = createBN254([100]);
expect(bigint.toNumber()).toBe(100);
});
it('should create a BN254 from a base58 string', () => {
const bigint = createBN254('2j', 'base58');
expect(bigint.toNumber()).toBe(bn(100).toNumber());
});
});
describe('encodeBN254toBase58 function', () => {
it('should convert a BN254 to a base58 string, pad to 32 implicitly', () => {
const bigint = createBN254('100');
const base58 = encodeBN254toBase58(bigint);
expect(base58).toBe('11111111111111111111111111111112j');
});
it('should match transformation via pubkey', () => {
const refHash = [
13, 225, 248, 105, 237, 121, 108, 70, 70, 197, 240, 130, 226, 236,
129, 58, 213, 50, 236, 99, 216, 99, 91, 201, 141, 76, 196, 33, 41,
181, 236, 187,
];
const base58 = encodeBN254toBase58(bn(refHash));
const pubkeyConv = new PublicKey(refHash).toBase58();
expect(base58).toBe(pubkeyConv);
});
it('should pad to 32 bytes converting BN to Pubkey', () => {
const refHash31 = [
13, 225, 248, 105, 237, 121, 108, 70, 70, 197, 240, 130, 226, 236,
129, 58, 213, 50, 236, 99, 216, 99, 91, 201, 141, 76, 196, 33, 41,
181, 236,
];
const base58 = encodeBN254toBase58(bn(refHash31));
expect(
createBN254(base58, 'base58').toArray('be', 32),
).to.be.deep.equal([0].concat(refHash31));
});
it('should throw an error for a value that is too large', () => {
expect(() => createBN254(FIELD_SIZE)).toThrow(
'Value is too large. Max <254 bits',
);
});
});
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/unit
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/unit/instruction/pack-compressed-accounts.test.ts
|
import { describe, expect, it } from 'vitest';
import { PublicKey } from '@solana/web3.js';
import { padOutputStateMerkleTrees } from '../../../src/instruction/pack-compressed-accounts';
describe('padOutputStateMerkleTrees', () => {
const treeA: any = PublicKey.unique();
const treeB: any = PublicKey.unique();
const treeC: any = PublicKey.unique();
const accA: any = { merkleTree: treeA };
const accB: any = { merkleTree: treeB };
const accC: any = { merkleTree: treeC };
it('should use the 0th state tree of input state if no output state trees are provided', () => {
const result = padOutputStateMerkleTrees(undefined, 3, [accA, accB]);
expect(result).toEqual([treeA, treeA, treeA]);
});
it('should fill up with the first state tree if provided trees are less than required', () => {
const result = padOutputStateMerkleTrees([treeA, treeB], 5, []);
expect(result).toEqual([treeA, treeB, treeA, treeA, treeA]);
});
it('should remove extra trees if the number of output state trees is greater than the number of output accounts', () => {
const result = padOutputStateMerkleTrees([treeA, treeB, treeC], 2, []);
expect(result).toEqual([treeA, treeB]);
});
it('should return the same outputStateMerkleTrees if its length equals the number of output compressed accounts', () => {
const result = padOutputStateMerkleTrees([treeA, treeB, treeC], 3, []);
expect(result).toEqual([treeA, treeB, treeC]);
});
});
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/e2e/transfer.test.ts
|
import { describe, it, assert, beforeAll } from 'vitest';
import { Signer } from '@solana/web3.js';
import { newAccountWithLamports } from '../../src/utils/test-utils';
import { Rpc } from '../../src/rpc';
import { bn, compress } from '../../src';
import { transfer } from '../../src/actions/transfer';
import { getTestRpc } from '../../src/test-helpers/test-rpc';
import { WasmFactory } from '@lightprotocol/hasher.rs';
describe('transfer', () => {
let rpc: Rpc;
let payer: Signer;
let bob: Signer;
beforeAll(async () => {
const lightWasm = await WasmFactory.getInstance();
rpc = await getTestRpc(lightWasm);
payer = await newAccountWithLamports(rpc, 2e9, 256);
bob = await newAccountWithLamports(rpc, 2e9, 256);
await compress(rpc, payer, 1e9, payer.publicKey);
});
const numberOfTransfers = 10;
it(`should send compressed lamports alice -> bob for ${numberOfTransfers} transfers in a loop`, async () => {
const transferAmount = 1000;
for (let i = 0; i < numberOfTransfers; i++) {
const preSenderBalance = (
await rpc.getCompressedAccountsByOwner(payer.publicKey)
).items.reduce((acc, account) => acc.add(account.lamports), bn(0));
const preReceiverBalance = (
await rpc.getCompressedAccountsByOwner(bob.publicKey)
).items.reduce((acc, account) => acc.add(account.lamports), bn(0));
await transfer(rpc, payer, transferAmount, payer, bob.publicKey);
const postSenderAccs = await rpc.getCompressedAccountsByOwner(
payer.publicKey,
);
const postReceiverAccs = await rpc.getCompressedAccountsByOwner(
bob.publicKey,
);
const postSenderBalance = postSenderAccs.items.reduce(
(acc, account) => acc.add(account.lamports),
bn(0),
);
const postReceiverBalance = postReceiverAccs.items.reduce(
(acc, account) => acc.add(account.lamports),
bn(0),
);
assert(
postSenderBalance.sub(preSenderBalance).eq(bn(-transferAmount)),
`Iteration ${i + 1}: Sender balance should decrease by ${transferAmount}`,
);
assert(
postReceiverBalance
.sub(preReceiverBalance)
.eq(bn(transferAmount)),
`Iteration ${i + 1}: Receiver balance should increase by ${transferAmount}`,
);
}
});
});
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/e2e/serde.test.ts
|
import { describe, it, expect } from 'vitest';
import { LightSystemProgram } from '../../src/programs';
import {
CompressedAccount,
PublicTransactionEvent,
bn,
useWallet,
} from '../../src';
import { Connection, Keypair, PublicKey } from '@solana/web3.js';
import { AnchorProvider, Program, setProvider } from '@coral-xyz/anchor';
import { IDL } from '../../src/idls/account_compression';
describe('account compression program', () => {
it('instantiate using IDL', async () => {
const mockKeypair = Keypair.generate();
const mockConnection = new Connection(
'http://127.0.0.1:8899',
'confirmed',
);
const mockProvider = new AnchorProvider(
mockConnection,
useWallet(mockKeypair),
{
commitment: 'confirmed',
preflightCommitment: 'confirmed',
},
);
setProvider(mockProvider);
const program = new Program(
IDL,
new PublicKey('5QPEJ5zDsVou9FQS3KCauKswM3VwBEBu4dpL9xTqkWwN'),
mockProvider,
);
expect(program).toBeDefined();
});
});
describe('serde', () => {
it('decode output compressed account ', async () => {
const compressedAccount = [
88, 8, 48, 185, 124, 227, 14, 195, 230, 152, 61, 39, 56, 191, 13,
126, 54, 43, 47, 131, 175, 16, 52, 167, 129, 174, 200, 118, 174, 9,
254, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
];
const deserializedCompressedAccount: CompressedAccount =
LightSystemProgram.program.coder.types.decode(
'CompressedAccount',
Buffer.from(compressedAccount),
);
expect(deserializedCompressedAccount.data).toBe(null);
expect(deserializedCompressedAccount.address).toBe(null);
expect(deserializedCompressedAccount.lamports.eq(bn(0))).toBe(true);
});
it('decode event ', async () => {
const data = [
0, 0, 0, 0, 1, 0, 0, 0, 33, 32, 204, 221, 5, 83, 170, 139, 228, 191,
81, 173, 10, 116, 229, 191, 155, 209, 23, 164, 28, 64, 188, 34, 248,
127, 110, 97, 26, 188, 139, 164, 0, 0, 0, 0, 1, 0, 0, 0, 22, 143,
135, 215, 254, 121, 58, 95, 241, 202, 91, 53, 255, 47, 224, 255, 67,
218, 48, 172, 51, 208, 29, 102, 177, 187, 207, 73, 108, 18, 59, 255,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 0, 0, 0, 68, 77, 125, 32, 76, 128, 61, 180, 1, 207, 69,
44, 121, 118, 153, 17, 179, 183, 115, 34, 163, 127, 102, 214, 1, 87,
175, 177, 95, 49, 65, 69, 0,
];
const event: PublicTransactionEvent =
LightSystemProgram.program.coder.types.decode(
'PublicTransactionEvent',
Buffer.from(data),
);
const refOutputCompressedAccountHash = [
33, 32, 204, 221, 5, 83, 170, 139, 228, 191, 81, 173, 10, 116, 229,
191, 155, 209, 23, 164, 28, 64, 188, 34, 248, 127, 110, 97, 26, 188,
139, 164,
];
expect(
bn(event.outputCompressedAccountHashes[0]).eq(
bn(refOutputCompressedAccountHash),
),
).toBe(true);
});
});
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/e2e/test-rpc.test.ts
|
import { describe, it, assert, beforeAll, expect } from 'vitest';
import { Signer } from '@solana/web3.js';
import {
STATE_MERKLE_TREE_NETWORK_FEE,
STATE_MERKLE_TREE_ROLLOVER_FEE,
defaultTestStateTreeAccounts,
} from '../../src/constants';
import { newAccountWithLamports } from '../../src/utils/test-utils';
import { compress, decompress, transfer } from '../../src/actions';
import { bn, CompressedAccountWithMerkleContext } from '../../src/state';
import { getTestRpc, TestRpc } from '../../src/test-helpers/test-rpc';
import { WasmFactory } from '@lightprotocol/hasher.rs';
/// TODO: add test case for payer != address
describe('test-rpc', () => {
const { merkleTree } = defaultTestStateTreeAccounts();
let rpc: TestRpc;
let payer: Signer;
let preCompressBalance: number;
let postCompressBalance: number;
let compressLamportsAmount: number;
let compressedTestAccount: CompressedAccountWithMerkleContext;
let refPayer: Signer;
const refCompressLamports = 1e7;
beforeAll(async () => {
const lightWasm = await WasmFactory.getInstance();
rpc = await getTestRpc(lightWasm);
refPayer = await newAccountWithLamports(rpc, 1e9, 200);
payer = await newAccountWithLamports(rpc, 1e9, 148);
/// compress refPayer
await compress(
rpc,
refPayer,
refCompressLamports,
refPayer.publicKey,
merkleTree,
);
/// compress
compressLamportsAmount = 1e7;
preCompressBalance = await rpc.getBalance(payer.publicKey);
await compress(
rpc,
payer,
compressLamportsAmount,
payer.publicKey,
merkleTree,
);
});
it('getCompressedAccountsByOwner', async () => {
const compressedAccounts = await rpc.getCompressedAccountsByOwner(
payer.publicKey,
);
compressedTestAccount = compressedAccounts.items[0];
assert.equal(compressedAccounts.items.length, 1);
assert.equal(
Number(compressedTestAccount.lamports),
compressLamportsAmount,
);
assert.equal(
compressedTestAccount.owner.toBase58(),
payer.publicKey.toBase58(),
);
assert.equal(compressedTestAccount.data?.data, null);
postCompressBalance = await rpc.getBalance(payer.publicKey);
assert.equal(
postCompressBalance,
preCompressBalance -
compressLamportsAmount -
5000 -
STATE_MERKLE_TREE_ROLLOVER_FEE.toNumber(),
);
});
it('getCompressedAccountProof for refPayer', async () => {
const compressedAccounts = await rpc.getCompressedAccountsByOwner(
payer.publicKey,
);
const refHash = compressedAccounts.items[0].hash;
const compressedAccountProof = await rpc.getCompressedAccountProof(
bn(refHash),
);
const proof = compressedAccountProof.merkleProof.map(x => x.toString());
expect(proof.length).toStrictEqual(26);
expect(compressedAccountProof.hash).toStrictEqual(refHash);
expect(compressedAccountProof.leafIndex).toStrictEqual(
compressedAccounts.items[0].leafIndex,
);
expect(compressedAccountProof.rootIndex).toStrictEqual(2);
preCompressBalance = await rpc.getBalance(payer.publicKey);
await transfer(
rpc,
payer,
compressLamportsAmount,
payer,
payer.publicKey,
merkleTree,
);
const compressedAccounts1 = await rpc.getCompressedAccountsByOwner(
payer.publicKey,
);
expect(compressedAccounts1.items.length).toStrictEqual(1);
postCompressBalance = await rpc.getBalance(payer.publicKey);
assert.equal(
postCompressBalance,
preCompressBalance -
5000 -
STATE_MERKLE_TREE_ROLLOVER_FEE.toNumber() -
STATE_MERKLE_TREE_NETWORK_FEE.toNumber(),
);
await compress(rpc, payer, compressLamportsAmount, payer.publicKey);
const compressedAccounts2 = await rpc.getCompressedAccountsByOwner(
payer.publicKey,
);
expect(compressedAccounts2.items.length).toStrictEqual(2);
});
it('getCompressedAccountProof: get many valid proofs (10)', async () => {
for (let lamports = 1; lamports <= 10; lamports++) {
await decompress(rpc, payer, lamports, payer.publicKey);
}
});
it('getIndexerHealth', async () => {
/// getHealth
const health = await rpc.getIndexerHealth();
assert.strictEqual(health, 'ok');
});
it('getIndexerSlot / getSlot', async () => {
const slot = await rpc.getIndexerSlot();
const slotWeb3 = await rpc.getSlot();
assert(slot > 0);
assert(slotWeb3 > 0);
});
it('getCompressedAccount', async () => {
const compressedAccounts = await rpc.getCompressedAccountsByOwner(
payer.publicKey,
);
const refHash = compressedAccounts.items[0].hash;
/// getCompressedAccount
const compressedAccount = await rpc.getCompressedAccount(
undefined,
bn(refHash),
);
assert(compressedAccount !== null);
assert.equal(
compressedAccount.owner.toBase58(),
payer.publicKey.toBase58(),
);
assert.equal(compressedAccount.data, null);
});
it('getCompressedBalance', async () => {
const compressedAccounts = await rpc.getCompressedAccountsByOwner(
refPayer.publicKey,
);
const refHash = compressedAccounts.items[0].hash;
/// getCompressedBalance
await expect(rpc.getCompressedBalance(bn(refHash))).rejects.toThrow(
'address is not supported in test-rpc',
);
const compressedBalance = await rpc.getCompressedBalance(
undefined,
bn(refHash),
);
expect(compressedBalance?.eq(bn(refCompressLamports))).toBeTruthy();
});
});
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/e2e/compress.test.ts
|
import { describe, it, assert, beforeAll, expect } from 'vitest';
import { Signer } from '@solana/web3.js';
import {
STATE_MERKLE_TREE_NETWORK_FEE,
ADDRESS_QUEUE_ROLLOVER_FEE,
STATE_MERKLE_TREE_ROLLOVER_FEE,
defaultTestStateTreeAccounts,
ADDRESS_TREE_NETWORK_FEE,
} from '../../src/constants';
import { newAccountWithLamports } from '../../src/utils/test-utils';
import { Rpc } from '../../src/rpc';
import {
LightSystemProgram,
bn,
compress,
createAccount,
createAccountWithLamports,
decompress,
} from '../../src';
import { TestRpc, getTestRpc } from '../../src/test-helpers/test-rpc';
import { WasmFactory } from '@lightprotocol/hasher.rs';
/// TODO: make available to developers via utils
function txFees(
txs: {
in: number;
out: number;
addr?: number;
base?: number;
}[],
): number {
let totalFee = bn(0);
txs.forEach(tx => {
const solanaBaseFee = tx.base === 0 ? bn(0) : bn(tx.base || 5000);
/// Fee per output
const stateOutFee = STATE_MERKLE_TREE_ROLLOVER_FEE.mul(bn(tx.out));
/// Fee per new address created
const addrFee = tx.addr
? ADDRESS_QUEUE_ROLLOVER_FEE.mul(bn(tx.addr))
: bn(0);
/// Fee if the tx nullifies at least one input account
const networkInFee = tx.in ? STATE_MERKLE_TREE_NETWORK_FEE : bn(0);
/// Fee if the tx creates at least one address
const networkAddressFee = tx.addr ? ADDRESS_TREE_NETWORK_FEE : bn(0);
totalFee = totalFee.add(
solanaBaseFee
.add(stateOutFee)
.add(addrFee)
.add(networkInFee)
.add(networkAddressFee),
);
});
return totalFee.toNumber();
}
/// TODO: add test case for payer != address
describe('compress', () => {
const { merkleTree } = defaultTestStateTreeAccounts();
let rpc: Rpc;
let payer: Signer;
beforeAll(async () => {
const lightWasm = await WasmFactory.getInstance();
rpc = await getTestRpc(lightWasm);
payer = await newAccountWithLamports(rpc, 1e9, 256);
});
it('should create account with address', async () => {
const preCreateAccountsBalance = await rpc.getBalance(payer.publicKey);
await createAccount(
rpc as TestRpc,
payer,
[
new Uint8Array([
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
]),
],
LightSystemProgram.programId,
);
await createAccountWithLamports(
rpc as TestRpc,
payer,
[
new Uint8Array([
1, 2, 255, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
]),
],
0,
LightSystemProgram.programId,
);
await createAccount(
rpc as TestRpc,
payer,
[
new Uint8Array([
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 1,
]),
],
LightSystemProgram.programId,
);
await createAccount(
rpc as TestRpc,
payer,
[
new Uint8Array([
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 2,
]),
],
LightSystemProgram.programId,
);
await expect(
createAccount(
rpc as TestRpc,
payer,
[
new Uint8Array([
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 2,
]),
],
LightSystemProgram.programId,
),
).rejects.toThrow();
const postCreateAccountsBalance = await rpc.getBalance(payer.publicKey);
assert.equal(
postCreateAccountsBalance,
preCreateAccountsBalance -
txFees([
{ in: 0, out: 1, addr: 1 },
{ in: 0, out: 1, addr: 1 },
{ in: 0, out: 1, addr: 1 },
{ in: 0, out: 1, addr: 1 },
]),
);
});
it('should compress lamports and create an account with address and lamports', async () => {
payer = await newAccountWithLamports(rpc, 1e9, 256);
const compressLamportsAmount = 1e7;
const preCompressBalance = await rpc.getBalance(payer.publicKey);
assert.equal(preCompressBalance, 1e9);
await compress(rpc, payer, compressLamportsAmount, payer.publicKey);
const compressedAccounts = await rpc.getCompressedAccountsByOwner(
payer.publicKey,
);
assert.equal(compressedAccounts.items.length, 1);
assert.equal(
Number(compressedAccounts.items[0].lamports),
compressLamportsAmount,
);
assert.equal(compressedAccounts.items[0].data, null);
const postCompressBalance = await rpc.getBalance(payer.publicKey);
assert.equal(
postCompressBalance,
preCompressBalance -
compressLamportsAmount -
txFees([{ in: 0, out: 1 }]),
);
await createAccountWithLamports(
rpc as TestRpc,
payer,
[
new Uint8Array([
1, 255, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
]),
],
100,
LightSystemProgram.programId,
);
const postCreateAccountBalance = await rpc.getBalance(payer.publicKey);
assert.equal(
postCreateAccountBalance,
postCompressBalance - txFees([{ in: 1, out: 2, addr: 1 }]),
);
});
it('should compress lamports and create an account with address and lamports', async () => {
payer = await newAccountWithLamports(rpc, 1e9, 256);
const compressLamportsAmount = 1e7;
const preCompressBalance = await rpc.getBalance(payer.publicKey);
assert.equal(preCompressBalance, 1e9);
await compress(
rpc,
payer,
compressLamportsAmount,
payer.publicKey,
merkleTree,
);
const compressedAccounts = await rpc.getCompressedAccountsByOwner(
payer.publicKey,
);
assert.equal(compressedAccounts.items.length, 1);
assert.equal(
Number(compressedAccounts.items[0].lamports),
compressLamportsAmount,
);
assert.equal(compressedAccounts.items[0].data, null);
const postCompressBalance = await rpc.getBalance(payer.publicKey);
assert.equal(
postCompressBalance,
preCompressBalance -
compressLamportsAmount -
txFees([{ in: 0, out: 1 }]),
);
/// Decompress
const decompressLamportsAmount = 1e6;
const decompressRecipient = payer.publicKey;
await decompress(
rpc,
payer,
decompressLamportsAmount,
decompressRecipient,
);
const compressedAccounts2 = await rpc.getCompressedAccountsByOwner(
payer.publicKey,
);
assert.equal(compressedAccounts2.items.length, 1);
assert.equal(
Number(compressedAccounts2.items[0].lamports),
compressLamportsAmount - decompressLamportsAmount,
);
await decompress(rpc, payer, 1, decompressRecipient, merkleTree);
const postDecompressBalance = await rpc.getBalance(decompressRecipient);
assert.equal(
postDecompressBalance,
postCompressBalance +
decompressLamportsAmount +
1 -
txFees([
{ in: 1, out: 1 },
{ in: 1, out: 1 },
]),
);
});
});
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/e2e/testnet.test.ts
|
import { describe, it, assert, beforeAll } from 'vitest';
import { Signer } from '@solana/web3.js';
import { newAccountWithLamports } from '../../src/utils/test-utils';
import { createRpc, Rpc } from '../../src/rpc';
import { bn, compress } from '../../src';
import { transfer } from '../../src/actions/transfer';
import { getTestRpc } from '../../src/test-helpers/test-rpc';
import { WasmFactory } from '@lightprotocol/hasher.rs';
describe('testnet transfer', () => {
let rpc: Rpc;
let payer: Signer;
let bob: Signer;
beforeAll(async () => {
const validatorUrl = 'https://zk-testnet.helius.dev:8899';
const photonUrl = 'https://zk-testnet.helius.dev:8784';
const proverUrl = 'https://zk-testnet.helius.dev:3001';
rpc = createRpc(validatorUrl, photonUrl, proverUrl);
payer = await newAccountWithLamports(rpc, 2e9, 256);
bob = await newAccountWithLamports(rpc, 2e9, 256);
await compress(rpc, payer, 1e9, payer.publicKey);
});
const numberOfTransfers = 10;
it(`should send compressed lamports alice -> bob for ${numberOfTransfers} transfers in a loop`, async () => {
const transferAmount = 1000;
for (let i = 0; i < numberOfTransfers; i++) {
const preSenderBalance = (
await rpc.getCompressedAccountsByOwner(payer.publicKey)
).items.reduce((acc, account) => acc.add(account.lamports), bn(0));
const preReceiverBalance = (
await rpc.getCompressedAccountsByOwner(bob.publicKey)
).items.reduce((acc, account) => acc.add(account.lamports), bn(0));
await transfer(rpc, payer, transferAmount, payer, bob.publicKey);
const postSenderAccs = await rpc.getCompressedAccountsByOwner(
payer.publicKey,
);
const postReceiverAccs = await rpc.getCompressedAccountsByOwner(
bob.publicKey,
);
const postSenderBalance = postSenderAccs.items.reduce(
(acc, account) => acc.add(account.lamports),
bn(0),
);
const postReceiverBalance = postReceiverAccs.items.reduce(
(acc, account) => acc.add(account.lamports),
bn(0),
);
assert(
postSenderBalance.sub(preSenderBalance).eq(bn(-transferAmount)),
`Iteration ${i + 1}: Sender balance should decrease by ${transferAmount}`,
);
assert(
postReceiverBalance
.sub(preReceiverBalance)
.eq(bn(transferAmount)),
`Iteration ${i + 1}: Receiver balance should increase by ${transferAmount}`,
);
}
});
});
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/e2e/rpc-interop.test.ts
|
import { describe, it, assert, beforeAll, expect } from 'vitest';
import { PublicKey, Signer } from '@solana/web3.js';
import { newAccountWithLamports } from '../../src/utils/test-utils';
import { Rpc, createRpc } from '../../src/rpc';
import {
LightSystemProgram,
bn,
compress,
createAccount,
createAccountWithLamports,
defaultTestStateTreeAccounts,
deriveAddress,
deriveAddressSeed,
sleep,
} from '../../src';
import { getTestRpc, TestRpc } from '../../src/test-helpers/test-rpc';
import { transfer } from '../../src/actions/transfer';
import { WasmFactory } from '@lightprotocol/hasher.rs';
import { randomBytes } from 'tweetnacl';
describe('rpc-interop', () => {
let payer: Signer;
let bob: Signer;
let rpc: Rpc;
let testRpc: TestRpc;
let executedTxs = 0;
beforeAll(async () => {
const lightWasm = await WasmFactory.getInstance();
rpc = createRpc();
testRpc = await getTestRpc(lightWasm);
/// These are constant test accounts in between test runs
payer = await newAccountWithLamports(rpc, 10e9, 256);
bob = await newAccountWithLamports(rpc, 10e9, 256);
await compress(rpc, payer, 1e9, payer.publicKey);
executedTxs++;
});
const transferAmount = 1e4;
const numberOfTransfers = 15;
it('getCompressedAccountsByOwner [noforester] filter should work', async () => {
let accs = await rpc.getCompressedAccountsByOwner(payer.publicKey, {
filters: [
{
memcmp: {
offset: 1,
bytes: '5Vf',
},
},
],
});
assert.equal(accs.items.length, 0);
accs = await rpc.getCompressedAccountsByOwner(payer.publicKey, {
dataSlice: { offset: 1, length: 2 },
});
assert.equal(accs.items.length, 1);
});
it('getValidityProof [noforester] (inclusion) should match', async () => {
const senderAccounts = await rpc.getCompressedAccountsByOwner(
payer.publicKey,
);
const senderAccountsTest = await testRpc.getCompressedAccountsByOwner(
payer.publicKey,
);
const hash = bn(senderAccounts.items[0].hash);
const hashTest = bn(senderAccountsTest.items[0].hash);
// accounts are the same
assert.isTrue(hash.eq(hashTest));
const validityProof = await rpc.getValidityProof([hash]);
const validityProofTest = await testRpc.getValidityProof([hashTest]);
validityProof.leafIndices.forEach((leafIndex, index) => {
assert.equal(leafIndex, validityProofTest.leafIndices[index]);
});
validityProof.leaves.forEach((leaf, index) => {
assert.isTrue(leaf.eq(validityProofTest.leaves[index]));
});
validityProof.roots.forEach((elem, index) => {
assert.isTrue(elem.eq(validityProofTest.roots[index]));
});
validityProof.rootIndices.forEach((elem, index) => {
assert.equal(elem, validityProofTest.rootIndices[index]);
});
validityProof.merkleTrees.forEach((elem, index) => {
assert.isTrue(elem.equals(validityProofTest.merkleTrees[index]));
});
validityProof.nullifierQueues.forEach((elem, index) => {
assert.isTrue(
elem.equals(validityProofTest.nullifierQueues[index]),
);
});
/// Executes a transfer using a 'validityProof' from Photon
await transfer(rpc, payer, 1e5, payer, bob.publicKey);
executedTxs++;
/// Executes a transfer using a 'validityProof' directly from a prover.
await transfer(testRpc, payer, 1e5, payer, bob.publicKey);
executedTxs++;
});
it('getValidityProof [noforester] (new-addresses) should match', async () => {
const newAddressSeeds = [
new Uint8Array([
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 42, 42, 42, 14, 15, 16, 11, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
]),
];
const newAddressSeed = deriveAddressSeed(
newAddressSeeds,
LightSystemProgram.programId,
);
const newAddress = bn(deriveAddress(newAddressSeed).toBuffer());
/// consistent proof metadata for same address
const validityProof = await rpc.getValidityProof([], [newAddress]);
const validityProofTest = await testRpc.getValidityProof(
[],
[newAddress],
);
validityProof.leafIndices.forEach((leafIndex, index) => {
assert.equal(leafIndex, validityProofTest.leafIndices[index]);
});
validityProof.leaves.forEach((leaf, index) => {
assert.isTrue(leaf.eq(validityProofTest.leaves[index]));
});
validityProof.roots.forEach((elem, index) => {
assert.isTrue(elem.eq(validityProofTest.roots[index]));
});
validityProof.rootIndices.forEach((elem, index) => {
assert.equal(elem, validityProofTest.rootIndices[index]);
});
validityProof.merkleTrees.forEach((elem, index) => {
assert.isTrue(elem.equals(validityProofTest.merkleTrees[index]));
});
validityProof.nullifierQueues.forEach((elem, index) => {
assert.isTrue(
elem.equals(validityProofTest.nullifierQueues[index]),
);
});
/// Need a new unique address because the previous one has been created.
const newAddressSeedsTest = [
new Uint8Array([
2, 2, 3, 4, 5, 6, 7, 8, 9, 10, 42, 42, 42, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 32, 29, 30, 31, 32,
]),
];
/// Creates a compressed account with address using a (non-inclusion)
/// 'validityProof' from Photon
await createAccount(
rpc,
payer,
newAddressSeedsTest,
LightSystemProgram.programId,
);
executedTxs++;
/// Creates a compressed account with address using a (non-inclusion)
/// 'validityProof' directly from a prover.
await createAccount(
testRpc,
payer,
newAddressSeeds,
LightSystemProgram.programId,
);
executedTxs++;
});
it('getValidityProof [noforester] (combined) should match', async () => {
const senderAccountsTest = await testRpc.getCompressedAccountsByOwner(
payer.publicKey,
);
// wait for photon to be in sync
await sleep(3000);
const senderAccounts = await rpc.getCompressedAccountsByOwner(
payer.publicKey,
);
const hashTest = bn(senderAccountsTest.items[0].hash);
const hash = bn(senderAccounts.items[0].hash);
// accounts are the same
assert.isTrue(hash.eq(hashTest));
const newAddressSeeds = [
new Uint8Array([
1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 42, 32, 42, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 32, 32, 27, 28, 29, 30, 31, 32,
]),
];
const newAddressSeed = deriveAddressSeed(
newAddressSeeds,
LightSystemProgram.programId,
);
const newAddress = bn(deriveAddress(newAddressSeed).toBytes());
const validityProof = await rpc.getValidityProof([hash], [newAddress]);
const validityProofTest = await testRpc.getValidityProof(
[hashTest],
[newAddress],
);
// compressedAccountProofs should match
const compressedAccountProof = (
await rpc.getMultipleCompressedAccountProofs([hash])
)[0];
const compressedAccountProofTest = (
await testRpc.getMultipleCompressedAccountProofs([hashTest])
)[0];
compressedAccountProof.merkleProof.forEach((proof, index) => {
assert.isTrue(
proof.eq(compressedAccountProofTest.merkleProof[index]),
);
});
// newAddressProofs should match
const newAddressProof = (
await rpc.getMultipleNewAddressProofs([newAddress])
)[0];
const newAddressProofTest = (
await testRpc.getMultipleNewAddressProofs([newAddress])
)[0];
assert.isTrue(
newAddressProof.indexHashedIndexedElementLeaf.eq(
newAddressProofTest.indexHashedIndexedElementLeaf,
),
);
assert.isTrue(
newAddressProof.leafHigherRangeValue.eq(
newAddressProofTest.leafHigherRangeValue,
),
);
assert.isTrue(
newAddressProof.nextIndex.eq(newAddressProofTest.nextIndex),
);
assert.isTrue(
newAddressProof.leafLowerRangeValue.eq(
newAddressProofTest.leafLowerRangeValue,
),
);
assert.isTrue(
newAddressProof.merkleTree.equals(newAddressProofTest.merkleTree),
);
assert.isTrue(
newAddressProof.nullifierQueue.equals(
newAddressProofTest.nullifierQueue,
),
);
assert.isTrue(newAddressProof.root.eq(newAddressProofTest.root));
assert.isTrue(newAddressProof.value.eq(newAddressProofTest.value));
// validity proof metadata should match
validityProof.leafIndices.forEach((leafIndex, index) => {
assert.equal(leafIndex, validityProofTest.leafIndices[index]);
});
validityProof.leaves.forEach((leaf, index) => {
assert.isTrue(leaf.eq(validityProofTest.leaves[index]));
});
validityProof.roots.forEach((elem, index) => {
assert.isTrue(elem.eq(validityProofTest.roots[index]));
});
validityProof.rootIndices.forEach((elem, index) => {
assert.equal(elem, validityProofTest.rootIndices[index]);
});
validityProof.merkleTrees.forEach((elem, index) => {
assert.isTrue(elem.equals(validityProofTest.merkleTrees[index]));
});
validityProof.nullifierQueues.forEach((elem, index) => {
assert.isTrue(
elem.equals(validityProofTest.nullifierQueues[index]),
'Mismatch in nullifierQueues expected: ' +
elem +
' got: ' +
validityProofTest.nullifierQueues[index],
);
});
/// Creates a compressed account with address and lamports using a
/// (combined) 'validityProof' from Photon
await createAccountWithLamports(
rpc,
payer,
[
new Uint8Array([
1, 2, 255, 4, 5, 6, 7, 8, 9, 10, 11, 111, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 32, 29, 30, 31,
32,
]),
],
0,
LightSystemProgram.programId,
);
executedTxs++;
});
/// This assumes support for getMultipleNewAddressProofs in Photon.
it('getMultipleNewAddressProofs [noforester] should match', async () => {
const newAddress = bn(
new Uint8Array([
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 42, 42, 42, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
]),
);
const newAddressProof = (
await rpc.getMultipleNewAddressProofs([newAddress])
)[0];
const newAddressProofTest = (
await testRpc.getMultipleNewAddressProofs([newAddress])
)[0];
assert.isTrue(
newAddressProof.indexHashedIndexedElementLeaf.eq(
newAddressProofTest.indexHashedIndexedElementLeaf,
),
);
assert.isTrue(
newAddressProof.leafHigherRangeValue.eq(
newAddressProofTest.leafHigherRangeValue,
),
`Mismatch in leafHigherRangeValue expected: ${newAddressProofTest.leafHigherRangeValue} got: ${newAddressProof.leafHigherRangeValue}`,
);
assert.isTrue(
newAddressProof.nextIndex.eq(newAddressProofTest.nextIndex),
`Mismatch in leafHigherRangeValue expected: ${newAddressProofTest.nextIndex} got: ${newAddressProof.nextIndex}`,
);
assert.isTrue(
newAddressProof.leafLowerRangeValue.eq(
newAddressProofTest.leafLowerRangeValue,
),
);
assert.isTrue(
newAddressProof.merkleTree.equals(newAddressProofTest.merkleTree),
);
assert.isTrue(
newAddressProof.nullifierQueue.equals(
newAddressProofTest.nullifierQueue,
),
`Mismatch in nullifierQueue expected: ${newAddressProofTest.nullifierQueue} got: ${newAddressProof.nullifierQueue}`,
);
assert.isTrue(newAddressProof.root.eq(newAddressProofTest.root));
assert.isTrue(newAddressProof.value.eq(newAddressProofTest.value));
newAddressProof.merkleProofHashedIndexedElementLeaf.forEach(
(elem, index) => {
const expected =
newAddressProofTest.merkleProofHashedIndexedElementLeaf[
index
];
assert.isTrue(
elem.eq(expected),
`Mismatch in merkleProofHashedIndexedElementLeaf expected: ${expected.toString()} got: ${elem.toString()}`,
);
},
);
});
it('getMultipleCompressedAccountProofs in transfer loop should match', async () => {
for (let round = 0; round < numberOfTransfers; round++) {
const prePayerAccounts = await rpc.getCompressedAccountsByOwner(
payer.publicKey,
);
const preSenderBalance = prePayerAccounts.items.reduce(
(acc, account) => acc.add(account.lamports),
bn(0),
);
const preReceiverAccounts = await rpc.getCompressedAccountsByOwner(
bob.publicKey,
);
const preReceiverBalance = preReceiverAccounts.items.reduce(
(acc, account) => acc.add(account.lamports),
bn(0),
);
/// get reference proofs for sender
const testProofs = await testRpc.getMultipleCompressedAccountProofs(
prePayerAccounts.items.map(account => bn(account.hash)),
);
/// get photon proofs for sender
const proofs = await rpc.getMultipleCompressedAccountProofs(
prePayerAccounts.items.map(account => bn(account.hash)),
);
/// compare each proof by node and root
assert.equal(testProofs.length, proofs.length);
proofs.forEach((proof, index) => {
proof.merkleProof.forEach((elem, elemIndex) => {
assert.isTrue(
bn(elem).eq(
bn(testProofs[index].merkleProof[elemIndex]),
),
);
});
});
assert.isTrue(bn(proofs[0].root).eq(bn(testProofs[0].root)));
await transfer(rpc, payer, transferAmount, payer, bob.publicKey);
executedTxs++;
const postSenderAccs = await rpc.getCompressedAccountsByOwner(
payer.publicKey,
);
const postReceiverAccs = await rpc.getCompressedAccountsByOwner(
bob.publicKey,
);
const postSenderBalance = postSenderAccs.items.reduce(
(acc, account) => acc.add(account.lamports),
bn(0),
);
const postReceiverBalance = postReceiverAccs.items.reduce(
(acc, account) => acc.add(account.lamports),
bn(0),
);
assert(
postSenderBalance.sub(preSenderBalance).eq(bn(-transferAmount)),
`Iteration ${round + 1}: Sender balance should decrease by ${transferAmount}`,
);
assert(
postReceiverBalance
.sub(preReceiverBalance)
.eq(bn(transferAmount)),
`Iteration ${round + 1}: Receiver balance should increase by ${transferAmount}`,
);
}
});
it('getCompressedAccountsByOwner should match', async () => {
const senderAccounts = await rpc.getCompressedAccountsByOwner(
payer.publicKey,
);
const senderAccountsTest = await testRpc.getCompressedAccountsByOwner(
payer.publicKey,
);
assert.equal(
senderAccounts.items.length,
senderAccountsTest.items.length,
);
senderAccounts.items.forEach((account, index) => {
assert.equal(
account.owner.toBase58(),
senderAccountsTest.items[index].owner.toBase58(),
);
assert.isTrue(
account.lamports.eq(senderAccountsTest.items[index].lamports),
);
});
const receiverAccounts = await rpc.getCompressedAccountsByOwner(
bob.publicKey,
);
const receiverAccountsTest = await testRpc.getCompressedAccountsByOwner(
bob.publicKey,
);
assert.equal(
receiverAccounts.items.length,
receiverAccountsTest.items.length,
);
receiverAccounts.items.sort((a, b) =>
a.lamports.sub(b.lamports).toNumber(),
);
receiverAccountsTest.items.sort((a, b) =>
a.lamports.sub(b.lamports).toNumber(),
);
receiverAccounts.items.forEach((account, index) => {
assert.equal(
account.owner.toBase58(),
receiverAccountsTest.items[index].owner.toBase58(),
);
assert.isTrue(
account.lamports.eq(receiverAccountsTest.items[index].lamports),
);
});
});
it('getCompressedAccount should match ', async () => {
const senderAccounts = await rpc.getCompressedAccountsByOwner(
payer.publicKey,
);
const compressedAccount = await rpc.getCompressedAccount(
undefined,
bn(senderAccounts.items[0].hash),
);
const compressedAccountTest = await testRpc.getCompressedAccount(
undefined,
bn(senderAccounts.items[0].hash),
);
assert.isTrue(
compressedAccount!.lamports.eq(compressedAccountTest!.lamports),
);
assert.isTrue(
compressedAccount!.owner.equals(compressedAccountTest!.owner),
);
assert.isNull(compressedAccount!.data);
assert.isNull(compressedAccountTest!.data);
});
it('getMultipleCompressedAccounts should match', async () => {
await compress(rpc, payer, 1e9, payer.publicKey);
executedTxs++;
const senderAccounts = await rpc.getCompressedAccountsByOwner(
payer.publicKey,
);
const compressedAccounts = await rpc.getMultipleCompressedAccounts(
senderAccounts.items.map(account => bn(account.hash)),
);
const compressedAccountsTest =
await testRpc.getMultipleCompressedAccounts(
senderAccounts.items.map(account => bn(account.hash)),
);
assert.equal(compressedAccounts.length, compressedAccountsTest.length);
compressedAccounts.forEach((account, index) => {
assert.isTrue(
account.lamports.eq(compressedAccountsTest[index].lamports),
);
assert.equal(
account.owner.toBase58(),
compressedAccountsTest[index].owner.toBase58(),
);
assert.isNull(account.data);
assert.isNull(compressedAccountsTest[index].data);
});
});
it('[test-rpc missing] getCompressionSignaturesForAccount should match', async () => {
const senderAccounts = await rpc.getCompressedAccountsByOwner(
payer.publicKey,
);
const signaturesUnspent = await rpc.getCompressionSignaturesForAccount(
bn(senderAccounts.items[0].hash),
);
/// most recent therefore unspent account
assert.equal(signaturesUnspent.length, 1);
/// Note: assumes largest-first selection mechanism
const largestAccount = senderAccounts.items.reduce((acc, account) =>
account.lamports.gt(acc.lamports) ? account : acc,
);
await transfer(rpc, payer, 1, payer, bob.publicKey);
executedTxs++;
const signaturesSpent = await rpc.getCompressionSignaturesForAccount(
bn(largestAccount.hash),
);
/// 1 spent account, so always 2 signatures.
assert.equal(signaturesSpent.length, 2);
});
it('[test-rpc missing] getSignaturesForOwner should match', async () => {
const signatures = await rpc.getCompressionSignaturesForOwner(
payer.publicKey,
);
assert.equal(signatures.items.length, executedTxs);
});
it('[test-rpc missing] getLatestNonVotingSignatures should match', async () => {
const testEnvSetupTxs = 2;
let signatures = (await rpc.getLatestNonVotingSignatures()).value.items;
assert.isAtLeast(signatures.length, executedTxs + testEnvSetupTxs);
signatures = (await rpc.getLatestNonVotingSignatures(2)).value.items;
assert.equal(signatures.length, 2);
});
it('[test-rpc missing] getLatestCompressionSignatures should match', async () => {
const { items: signatures } = (
await rpc.getLatestCompressionSignatures()
).value;
assert.isAtLeast(signatures.length, executedTxs);
/// Shoudl return 1 using limit param
const { items: signatures2, cursor } = (
await rpc.getLatestCompressionSignatures(undefined, 1)
).value;
assert.equal(signatures2.length, 1);
// wait for photon to be in sync
await sleep(3000);
const signatures3 = (
await rpc.getLatestCompressionSignatures(cursor!, 1)
).value.items;
/// cursor should work
assert.notEqual(signatures2[0].signature, signatures3[0].signature);
});
it('[test-rpc missing] getCompressedTransaction should match', async () => {
const signatures = await rpc.getCompressionSignaturesForOwner(
payer.publicKey,
);
const compressedTx = await rpc.getTransactionWithCompressionInfo(
signatures.items[0].signature,
);
/// is transfer
assert.equal(compressedTx?.compressionInfo.closedAccounts.length, 1);
assert.equal(compressedTx?.compressionInfo.openedAccounts.length, 2);
});
it('[test-rpc missing] getCompressionSignaturesForAddress should work', async () => {
const seeds = [new Uint8Array(randomBytes(32))];
const seed = deriveAddressSeed(seeds, LightSystemProgram.programId);
const addressTree = defaultTestStateTreeAccounts().addressTree;
const address = deriveAddress(seed, addressTree);
await createAccount(rpc, payer, seeds, LightSystemProgram.programId);
// fetch the owners latest account
const accounts = await rpc.getCompressedAccountsByOwner(
payer.publicKey,
);
const latestAccount = accounts.items[0];
// assert the address was indexed
assert.isTrue(new PublicKey(latestAccount.address!).equals(address));
const signaturesUnspent = await rpc.getCompressionSignaturesForAddress(
new PublicKey(latestAccount.address!),
);
/// most recent therefore unspent account
assert.equal(signaturesUnspent.items.length, 1);
});
it('getCompressedAccount with address param should work ', async () => {
const seeds = [new Uint8Array(randomBytes(32))];
const seed = deriveAddressSeed(seeds, LightSystemProgram.programId);
const addressTree = defaultTestStateTreeAccounts().addressTree;
const addressQueue = defaultTestStateTreeAccounts().addressQueue;
const address = deriveAddress(seed, addressTree);
await createAccount(
rpc,
payer,
seeds,
LightSystemProgram.programId,
addressTree,
addressQueue,
);
// fetch the owners latest account
const accounts = await rpc.getCompressedAccountsByOwner(
payer.publicKey,
);
const latestAccount = accounts.items[0];
assert.isTrue(new PublicKey(latestAccount.address!).equals(address));
const compressedAccountByHash = await rpc.getCompressedAccount(
undefined,
bn(latestAccount.hash),
);
const compressedAccountByAddress = await rpc.getCompressedAccount(
bn(latestAccount.address!),
undefined,
);
await expect(
testRpc.getCompressedAccount(bn(latestAccount.address!), undefined),
).rejects.toThrow();
assert.isTrue(
bn(compressedAccountByHash!.address!).eq(
bn(compressedAccountByAddress!.address!),
),
);
});
});
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/e2e
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/e2e/browser/rpc.browser.spec.ts
|
import { test, expect } from '@playwright/test';
import {
Rpc,
bn,
compress,
createRpc,
defaultTestStateTreeAccounts,
newAccountWithLamports,
} from '../../../src';
test.describe('RPC in browser', () => {
const { merkleTree } = defaultTestStateTreeAccounts();
test.beforeAll(async ({ page }) => {
try {
const rpc = createRpc();
const payer = await newAccountWithLamports(rpc, 1000005000, 100);
await page.goto(
'http://localhost:4004/tests/e2e/browser/test-page.html',
);
await page.waitForFunction(
() => (window as any).stateless !== undefined,
);
await compress(rpc, payer, 1e9, payer.publicKey, merkleTree);
} catch (error) {
console.log('error: ', error);
}
});
test.only('getCompressedAccountsByOwner', async ({ page }) => {
const result = await page.evaluate(async () => {
// @ts-ignore
const sdk = window.stateless;
const rpc: Rpc = sdk.createRpc();
const payer = sdk.getTestKeypair(100);
const compressedAccounts = await rpc.getCompressedAccountsByOwner(
payer.publicKey,
);
if (!compressedAccounts)
throw new Error('No compressed accounts found');
return compressedAccounts;
});
expect(result.length).toEqual(1);
});
test('getCompressedAccount', async ({ page }) => {
const result = await page.evaluate(async () => {
//@ts-ignore
const sdk = window.stateless;
const rpc: Rpc = sdk.createRpc();
const payer = sdk.getTestKeypair(100);
const compressedAccounts = await rpc.getCompressedAccountsByOwner(
payer.publicKey,
);
const hash = compressedAccounts[0].hash;
//@ts-ignore
const sdk2 = window.stateless;
const rpc2: Rpc = sdk2.createRpc();
let account: any;
try {
account = await rpc2.getCompressedAccount(bn(hash));
} catch (error) {
console.log('error: ', error);
throw error;
}
if (!account) throw new Error('No compressed account found');
return { account, owner: payer.publicKey };
});
expect(result.account.owner.equals(result.owner)).toBeTruthy();
});
test('getMultipleCompressedAccounts', async ({ page }) => {
const result = await page.evaluate(async () => {
//@ts-ignore
const sdk = window.stateless;
const rpc: Rpc = sdk.createRpc();
const payer = sdk.getTestKeypair(100);
const compressedAccounts = await rpc.getCompressedAccountsByOwner(
payer.publicKey,
);
const hashes = compressedAccounts.map(account => bn(account.hash));
const accounts = await rpc.getMultipleCompressedAccounts(hashes);
if (!accounts || accounts.length === 0)
throw new Error('No compressed accounts found');
return accounts;
});
expect(result.length).toBeGreaterThan(0);
});
// TODO: enable
// test('getCompressedTokenAccountsByOwner', async ({ page }) => {
// const result = await page.evaluate(async () => {
// //@ts-ignore
// const sdk = window.stateless;
// const rpc = sdk.createRpc();
// const payer = sdk.getTestKeypair(100);
// const compressedAccounts = await rpc.getCompressedAccountsByOwner(
// payer.publicKey,
// );
// const hash = compressedAccounts[0].hash;
// const accounts = await rpc.getCompressedTokenAccountsByOwner(owner);
// if (!accounts || accounts.length === 0)
// throw new Error('No token accounts found');
// return accounts;
// });
// assert.isTrue(result.length > 0);
// });
test('getHealth', async ({ page }) => {
const result = await page.evaluate(async () => {
//@ts-ignore
const sdk = window.stateless;
const rpc: Rpc = sdk.createRpc();
const health = await rpc.getHealth();
if (!health) throw new Error('Health check failed');
return health;
});
expect(result).toEqual('ok');
});
});
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/e2e
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/e2e/browser/test-page.html
|
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<title>Test Page</title>
</head>
<body>
<!-- <script type="module" src="/dist/es/index.js"></script> -->
<script type="module">
import * as stateless from '/dist/browser/index.js';
window.stateless = stateless;
console.log('HTML stateless: ', stateless);
</script>
</body>
</html>
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/errors.ts
|
// TODO: Clean up
export enum UtxoErrorCode {
NEGATIVE_LAMPORTS = 'NEGATIVE_LAMPORTS',
NOT_U64 = 'NOT_U64',
BLINDING_EXCEEDS_FIELD_SIZE = 'BLINDING_EXCEEDS_FIELD_SIZE',
}
export enum SelectInUtxosErrorCode {
FAILED_TO_FIND_UTXO_COMBINATION = 'FAILED_TO_FIND_UTXO_COMBINATION',
INVALID_NUMBER_OF_IN_UTXOS = 'INVALID_NUMBER_OF_IN_UTXOS',
}
export enum CreateUtxoErrorCode {
OWNER_UNDEFINED = 'OWNER_UNDEFINED',
INVALID_OUTPUT_UTXO_LENGTH = 'INVALID_OUTPUT_UTXO_LENGTH',
UTXO_DATA_UNDEFINED = 'UTXO_DATA_UNDEFINED',
}
export enum RpcErrorCode {
CONNECTION_UNDEFINED = 'CONNECTION_UNDEFINED',
RPC_PUBKEY_UNDEFINED = 'RPC_PUBKEY_UNDEFINED',
RPC_METHOD_NOT_IMPLEMENTED = 'RPC_METHOD_NOT_IMPLEMENTED',
RPC_INVALID = 'RPC_INVALID',
}
export enum LookupTableErrorCode {
LOOK_UP_TABLE_UNDEFINED = 'LOOK_UP_TABLE_UNDEFINED',
LOOK_UP_TABLE_NOT_INITIALIZED = 'LOOK_UP_TABLE_NOT_INITIALIZED',
}
export enum HashErrorCode {
NO_POSEIDON_HASHER_PROVIDED = 'NO_POSEIDON_HASHER_PROVIDED',
}
export enum ProofErrorCode {
INVALID_PROOF = 'INVALID_PROOF',
PROOF_INPUT_UNDEFINED = 'PROOF_INPUT_UNDEFINED',
PROOF_GENERATION_FAILED = 'PROOF_GENERATION_FAILED',
}
export enum MerkleTreeErrorCode {
MERKLE_TREE_NOT_INITIALIZED = 'MERKLE_TREE_NOT_INITIALIZED',
SOL_MERKLE_TREE_UNDEFINED = 'SOL_MERKLE_TREE_UNDEFINED',
MERKLE_TREE_UNDEFINED = 'MERKLE_TREE_UNDEFINED',
INPUT_UTXO_NOT_INSERTED_IN_MERKLE_TREE = 'INPUT_UTXO_NOT_INSERTED_IN_MERKLE_TREE',
MERKLE_TREE_INDEX_UNDEFINED = 'MERKLE_TREE_INDEX_UNDEFINED',
MERKLE_TREE_SET_SPACE_UNDEFINED = 'MERKLE_TREE_SET_SPACE_UNDEFINED',
}
export enum UtilsErrorCode {
ACCOUNT_NAME_UNDEFINED_IN_IDL = 'ACCOUNT_NAME_UNDEFINED_IN_IDL',
PROPERTY_UNDEFINED = 'PROPERTY_UNDEFINED',
LOOK_UP_TABLE_CREATION_FAILED = 'LOOK_UP_TABLE_CREATION_FAILED',
UNSUPPORTED_ARCHITECTURE = 'UNSUPPORTED_ARCHITECTURE',
UNSUPPORTED_PLATFORM = 'UNSUPPORTED_PLATFORM',
ACCOUNTS_UNDEFINED = 'ACCOUNTS_UNDEFINED',
INVALID_NUMBER = 'INVALID_NUMBER',
}
class MetaError extends Error {
code: string;
functionName: string;
codeMessage?: string;
constructor(code: string, functionName: string, codeMessage?: string) {
super(`${code}: ${codeMessage}`);
this.code = code;
this.functionName = functionName;
this.codeMessage = codeMessage;
}
}
export class UtxoError extends MetaError {}
export class SelectInUtxosError extends MetaError {}
export class CreateUtxoError extends MetaError {}
export class RpcError extends MetaError {}
export class LookupTableError extends MetaError {}
export class HashError extends MetaError {}
export class ProofError extends MetaError {}
export class MerkleTreeError extends MetaError {}
export class UtilsError extends MetaError {}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/rpc.ts
|
import {
Connection,
ConnectionConfig,
SolanaJSONRPCError,
PublicKey,
} from '@solana/web3.js';
import { Buffer } from 'buffer';
import {
BalanceResult,
CompressedAccountResult,
CompressedAccountsByOwnerResult,
CompressedProofWithContext,
CompressedTokenAccountsByOwnerOrDelegateResult,
CompressedTransaction,
CompressedTransactionResult,
CompressionApiInterface,
GetCompressedTokenAccountsByOwnerOrDelegateOptions,
HealthResult,
HexInputsForProver,
MerkeProofResult,
MultipleCompressedAccountsResult,
NativeBalanceResult,
ParsedTokenAccount,
SignatureListResult,
SignatureListWithCursorResult,
SignatureWithMetadata,
SlotResult,
TokenBalanceListResult,
jsonRpcResult,
jsonRpcResultAndContext,
ValidityProofResult,
NewAddressProofResult,
LatestNonVotingSignaturesResult,
LatestNonVotingSignatures,
LatestNonVotingSignaturesResultPaginated,
LatestNonVotingSignaturesPaginated,
WithContext,
GetCompressedAccountsByOwnerConfig,
WithCursor,
AddressWithTree,
HashWithTree,
CompressedMintTokenHoldersResult,
CompressedMintTokenHolders,
TokenBalance,
TokenBalanceListResultV2,
PaginatedOptions,
} from './rpc-interface';
import {
MerkleContextWithMerkleProof,
BN254,
bn,
CompressedAccountWithMerkleContext,
encodeBN254toBase58,
createCompressedAccountWithMerkleContext,
createMerkleContext,
TokenData,
CompressedProof,
} from './state';
import { array, create, nullable } from 'superstruct';
import { defaultTestStateTreeAccounts } from './constants';
import { BN } from '@coral-xyz/anchor';
import { toCamelCase, toHex } from './utils/conversion';
import {
proofFromJsonStruct,
negateAndCompressProof,
} from './utils/parse-validity-proof';
/** @internal */
export function parseAccountData({
discriminator,
data,
dataHash,
}: {
discriminator: BN;
data: string;
dataHash: BN;
}) {
return {
discriminator: discriminator.toArray('le', 8),
data: Buffer.from(data, 'base64'),
dataHash: dataHash.toArray('le', 32),
};
}
/** @internal */
async function getCompressedTokenAccountsByOwnerOrDelegate(
rpc: Rpc,
ownerOrDelegate: PublicKey,
options: GetCompressedTokenAccountsByOwnerOrDelegateOptions,
filterByDelegate: boolean = false,
): Promise<WithCursor<ParsedTokenAccount[]>> {
const endpoint = filterByDelegate
? 'getCompressedTokenAccountsByDelegate'
: 'getCompressedTokenAccountsByOwner';
const propertyToCheck = filterByDelegate ? 'delegate' : 'owner';
const unsafeRes = await rpcRequest(rpc.compressionApiEndpoint, endpoint, {
[propertyToCheck]: ownerOrDelegate.toBase58(),
mint: options.mint?.toBase58(),
limit: options.limit?.toNumber(),
cursor: options.cursor,
});
const res = create(
unsafeRes,
jsonRpcResultAndContext(CompressedTokenAccountsByOwnerOrDelegateResult),
);
if ('error' in res) {
throw new SolanaJSONRPCError(
res.error,
`failed to get info for compressed accounts by ${propertyToCheck} ${ownerOrDelegate.toBase58()}`,
);
}
if (res.result.value === null) {
throw new Error('not implemented: NULL result');
}
const accounts: ParsedTokenAccount[] = [];
res.result.value.items.map(item => {
const _account = item.account;
const _tokenData = item.tokenData;
const compressedAccount: CompressedAccountWithMerkleContext =
createCompressedAccountWithMerkleContext(
createMerkleContext(
_account.tree!,
mockNullifierQueue,
_account.hash.toArray('be', 32),
_account.leafIndex,
),
_account.owner,
bn(_account.lamports),
_account.data ? parseAccountData(_account.data) : undefined,
_account.address || undefined,
);
const parsed: TokenData = {
mint: _tokenData.mint,
owner: _tokenData.owner,
amount: _tokenData.amount,
delegate: _tokenData.delegate,
state: ['uninitialized', 'initialized', 'frozen'].indexOf(
_tokenData.state,
),
tlv: null,
};
if (
parsed[propertyToCheck]?.toBase58() !== ownerOrDelegate.toBase58()
) {
throw new Error(
`RPC returned token account with ${propertyToCheck} different from requested ${propertyToCheck}`,
);
}
accounts.push({
compressedAccount,
parsed,
});
});
/// TODO: consider custom or different sort. Most recent here.
return {
items: accounts.sort(
(a, b) =>
b.compressedAccount.leafIndex - a.compressedAccount.leafIndex,
),
cursor: res.result.value.cursor,
};
}
/** @internal */
function buildCompressedAccountWithMaybeTokenData(
accountStructWithOptionalTokenData: any,
): {
account: CompressedAccountWithMerkleContext;
maybeTokenData: TokenData | null;
} {
const compressedAccountResult = accountStructWithOptionalTokenData.account;
const tokenDataResult =
accountStructWithOptionalTokenData.optionalTokenData;
const compressedAccount: CompressedAccountWithMerkleContext =
createCompressedAccountWithMerkleContext(
createMerkleContext(
compressedAccountResult.merkleTree,
mockNullifierQueue,
compressedAccountResult.hash.toArray('be', 32),
compressedAccountResult.leafIndex,
),
compressedAccountResult.owner,
bn(compressedAccountResult.lamports),
compressedAccountResult.data
? parseAccountData(compressedAccountResult.data)
: undefined,
compressedAccountResult.address || undefined,
);
if (tokenDataResult === null) {
return { account: compressedAccount, maybeTokenData: null };
}
const parsed: TokenData = {
mint: tokenDataResult.mint,
owner: tokenDataResult.owner,
amount: tokenDataResult.amount,
delegate: tokenDataResult.delegate,
state: ['uninitialized', 'initialized', 'frozen'].indexOf(
tokenDataResult.state,
),
tlv: null,
};
return { account: compressedAccount, maybeTokenData: parsed };
}
/**
* Establish a Compression-compatible JSON RPC connection
*
* @param endpointOrWeb3JsConnection endpoint to the solana cluster or
* Connection object
* @param compressionApiEndpoint Endpoint to the compression server
* @param proverEndpoint Endpoint to the prover server. defaults
* to endpoint
* @param connectionConfig Optional connection config
*/
export function createRpc(
endpointOrWeb3JsConnection: string | Connection = 'http://127.0.0.1:8899',
compressionApiEndpoint: string = 'http://127.0.0.1:8784',
proverEndpoint: string = 'http://127.0.0.1:3001',
config?: ConnectionConfig,
): Rpc {
const endpoint =
typeof endpointOrWeb3JsConnection === 'string'
? endpointOrWeb3JsConnection
: endpointOrWeb3JsConnection.rpcEndpoint;
return new Rpc(endpoint, compressionApiEndpoint, proverEndpoint, config);
}
/** @internal */
export const rpcRequest = async (
rpcEndpoint: string,
method: string,
params: any = [],
convertToCamelCase = true,
debug = false,
): Promise<any> => {
const body = JSON.stringify({
jsonrpc: '2.0',
id: 'test-account',
method: method,
params: params,
});
if (debug) {
const generateCurlSnippet = () => {
const escapedBody = body.replace(/"/g, '\\"');
return `curl -X POST ${rpcEndpoint} \\
-H "Content-Type: application/json" \\
-d "${escapedBody}"`;
};
console.log('Debug: Stack trace:');
console.log(new Error().stack);
console.log('\nDebug: curl:');
console.log(generateCurlSnippet());
console.log('\n');
}
const response = await fetch(rpcEndpoint, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: body,
});
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
if (convertToCamelCase) {
const res = await response.json();
return toCamelCase(res);
}
return await response.json();
};
/** @internal */
export const proverRequest = async (
proverEndpoint: string,
method: 'inclusion' | 'new-address' | 'combined',
params: any = [],
log = false,
): Promise<CompressedProof> => {
let logMsg: string = '';
if (log) {
logMsg = `Proof generation for method:${method}`;
console.time(logMsg);
}
let body;
if (method === 'inclusion') {
body = JSON.stringify({ 'input-compressed-accounts': params });
} else if (method === 'new-address') {
body = JSON.stringify({ 'new-addresses': params });
} else if (method === 'combined') {
body = JSON.stringify({
'input-compressed-accounts': params[0],
'new-addresses': params[1],
});
}
const response = await fetch(`${proverEndpoint}/prove`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: body,
});
if (!response.ok) {
throw new Error(`Error fetching proof: ${response.statusText}`);
}
const data: any = await response.json();
const parsed = proofFromJsonStruct(data);
const compressedProof = negateAndCompressProof(parsed);
if (log) console.timeEnd(logMsg);
return compressedProof;
};
export type NonInclusionMerkleProofInputs = {
root: BN;
value: BN;
leaf_lower_range_value: BN;
leaf_higher_range_value: BN;
nextIndex: BN;
merkle_proof_hashed_indexed_element_leaf: BN[];
index_hashed_indexed_element_leaf: BN;
};
export type MerkleContextWithNewAddressProof = {
root: BN;
rootIndex: number;
value: BN;
leafLowerRangeValue: BN;
leafHigherRangeValue: BN;
nextIndex: BN;
merkleProofHashedIndexedElementLeaf: BN[];
indexHashedIndexedElementLeaf: BN;
merkleTree: PublicKey;
nullifierQueue: PublicKey;
};
export type NonInclusionJsonStruct = {
root: string;
value: string;
pathIndex: number;
pathElements: string[];
leafLowerRangeValue: string;
leafHigherRangeValue: string;
nextIndex: number;
};
export function convertMerkleProofsWithContextToHex(
merkleProofsWithContext: MerkleContextWithMerkleProof[],
): HexInputsForProver[] {
const inputs: HexInputsForProver[] = [];
for (let i = 0; i < merkleProofsWithContext.length; i++) {
const input: HexInputsForProver = {
root: toHex(merkleProofsWithContext[i].root),
pathIndex: merkleProofsWithContext[i].leafIndex,
pathElements: merkleProofsWithContext[i].merkleProof.map(hex =>
toHex(hex),
),
leaf: toHex(bn(merkleProofsWithContext[i].hash)),
};
inputs.push(input);
}
return inputs;
}
export function convertNonInclusionMerkleProofInputsToHex(
nonInclusionMerkleProofInputs: MerkleContextWithNewAddressProof[],
): NonInclusionJsonStruct[] {
const inputs: NonInclusionJsonStruct[] = [];
for (let i = 0; i < nonInclusionMerkleProofInputs.length; i++) {
const input: NonInclusionJsonStruct = {
root: toHex(nonInclusionMerkleProofInputs[i].root),
value: toHex(nonInclusionMerkleProofInputs[i].value),
pathIndex:
nonInclusionMerkleProofInputs[
i
].indexHashedIndexedElementLeaf.toNumber(),
pathElements: nonInclusionMerkleProofInputs[
i
].merkleProofHashedIndexedElementLeaf.map(hex => toHex(hex)),
nextIndex: nonInclusionMerkleProofInputs[i].nextIndex.toNumber(),
leafLowerRangeValue: toHex(
nonInclusionMerkleProofInputs[i].leafLowerRangeValue,
),
leafHigherRangeValue: toHex(
nonInclusionMerkleProofInputs[i].leafHigherRangeValue,
),
};
inputs.push(input);
}
return inputs;
}
/// TODO: replace with dynamic nullifierQueue
const mockNullifierQueue = defaultTestStateTreeAccounts().nullifierQueue;
const mockAddressQueue = defaultTestStateTreeAccounts().addressQueue;
/**
*
*/
export class Rpc extends Connection implements CompressionApiInterface {
compressionApiEndpoint: string;
proverEndpoint: string;
/**
* Establish a Compression-compatible JSON RPC connection
*
* @param endpoint Endpoint to the solana cluster
* @param compressionApiEndpoint Endpoint to the compression server
* @param proverEndpoint Endpoint to the prover server.
* @param connectionConfig Optional connection config
*/
constructor(
endpoint: string,
compressionApiEndpoint: string,
proverEndpoint: string,
config?: ConnectionConfig,
) {
super(endpoint, config || 'confirmed');
this.compressionApiEndpoint = compressionApiEndpoint;
this.proverEndpoint = proverEndpoint;
}
/**
* Fetch the compressed account for the specified account address or hash
*/
async getCompressedAccount(
address?: BN254,
hash?: BN254,
): Promise<CompressedAccountWithMerkleContext | null> {
if (!hash && !address) {
throw new Error('Either hash or address must be provided');
}
if (hash && address) {
throw new Error('Only one of hash or address must be provided');
}
const unsafeRes = await rpcRequest(
this.compressionApiEndpoint,
'getCompressedAccount',
{
hash: hash ? encodeBN254toBase58(hash) : undefined,
address: address ? encodeBN254toBase58(address) : undefined,
},
);
const res = create(
unsafeRes,
jsonRpcResultAndContext(nullable(CompressedAccountResult)),
);
if ('error' in res) {
throw new SolanaJSONRPCError(
res.error,
`failed to get info for compressed account ${hash ? hash.toString() : address ? address.toString() : ''}`,
);
}
if (res.result.value === null) {
return null;
}
const item = res.result.value;
const account = createCompressedAccountWithMerkleContext(
createMerkleContext(
item.tree!,
mockNullifierQueue,
item.hash.toArray('be', 32),
item.leafIndex,
),
item.owner,
bn(item.lamports),
item.data ? parseAccountData(item.data) : undefined,
item.address || undefined,
);
return account;
}
/**
* Fetch the compressed balance for the specified account address or hash
*/
async getCompressedBalance(address?: BN254, hash?: BN254): Promise<BN> {
if (!hash && !address) {
throw new Error('Either hash or address must be provided');
}
if (hash && address) {
throw new Error('Only one of hash or address must be provided');
}
const unsafeRes = await rpcRequest(
this.compressionApiEndpoint,
'getCompressedBalance',
{
hash: hash ? encodeBN254toBase58(hash) : undefined,
address: address ? encodeBN254toBase58(address) : undefined,
},
);
const res = create(
unsafeRes,
jsonRpcResultAndContext(NativeBalanceResult),
);
if ('error' in res) {
throw new SolanaJSONRPCError(
res.error,
`failed to get balance for compressed account ${hash ? hash.toString() : address ? address.toString() : ''}`,
);
}
if (res.result.value === null) {
return bn(0);
}
return bn(res.result.value);
}
/// TODO: validate that this is just for sol accounts
/**
* Fetch the total compressed balance for the specified owner public key
*/
async getCompressedBalanceByOwner(owner: PublicKey): Promise<BN> {
const unsafeRes = await rpcRequest(
this.compressionApiEndpoint,
'getCompressedBalanceByOwner',
{ owner: owner.toBase58() },
);
const res = create(
unsafeRes,
jsonRpcResultAndContext(NativeBalanceResult),
);
if ('error' in res) {
throw new SolanaJSONRPCError(
res.error,
`failed to get balance for compressed account ${owner.toBase58()}`,
);
}
if (res.result.value === null) {
return bn(0);
}
return bn(res.result.value);
}
/**
* Fetch the latest merkle proof for the specified account hash from the
* cluster
*/
async getCompressedAccountProof(
hash: BN254,
): Promise<MerkleContextWithMerkleProof> {
const unsafeRes = await rpcRequest(
this.compressionApiEndpoint,
'getCompressedAccountProof',
{ hash: encodeBN254toBase58(hash) },
);
const res = create(
unsafeRes,
jsonRpcResultAndContext(MerkeProofResult),
);
if ('error' in res) {
throw new SolanaJSONRPCError(
res.error,
`failed to get proof for compressed account ${hash.toString()}`,
);
}
if (res.result.value === null) {
throw new Error(
`failed to get proof for compressed account ${hash.toString()}`,
);
}
const value: MerkleContextWithMerkleProof = {
hash: res.result.value.hash.toArray('be', 32),
merkleTree: res.result.value.merkleTree,
leafIndex: res.result.value.leafIndex,
merkleProof: res.result.value.proof,
nullifierQueue: mockNullifierQueue, // TODO(photon): support nullifierQueue in response.
rootIndex: res.result.value.rootSeq % 2400,
root: res.result.value.root,
};
return value;
}
/**
* Fetch all the account info for multiple compressed accounts specified by
* an array of account hashes
*/
async getMultipleCompressedAccounts(
hashes: BN254[],
): Promise<CompressedAccountWithMerkleContext[]> {
const unsafeRes = await rpcRequest(
this.compressionApiEndpoint,
'getMultipleCompressedAccounts',
{ hashes: hashes.map(hash => encodeBN254toBase58(hash)) },
);
const res = create(
unsafeRes,
jsonRpcResultAndContext(MultipleCompressedAccountsResult),
);
if ('error' in res) {
throw new SolanaJSONRPCError(
res.error,
`failed to get info for compressed accounts ${hashes.map(hash => encodeBN254toBase58(hash)).join(', ')}`,
);
}
if (res.result.value === null) {
throw new Error(
`failed to get info for compressed accounts ${hashes.map(hash => encodeBN254toBase58(hash)).join(', ')}`,
);
}
const accounts: CompressedAccountWithMerkleContext[] = [];
res.result.value.items.map(item => {
const account = createCompressedAccountWithMerkleContext(
createMerkleContext(
item.tree!,
mockNullifierQueue,
item.hash.toArray('be', 32),
item.leafIndex,
),
item.owner,
bn(item.lamports),
item.data ? parseAccountData(item.data) : undefined,
item.address || undefined,
);
accounts.push(account);
});
return accounts.sort((a, b) => b.leafIndex - a.leafIndex);
}
/**
* Fetch the latest merkle proofs for multiple compressed accounts specified
* by an array account hashes
*/
async getMultipleCompressedAccountProofs(
hashes: BN254[],
): Promise<MerkleContextWithMerkleProof[]> {
const unsafeRes = await rpcRequest(
this.compressionApiEndpoint,
'getMultipleCompressedAccountProofs',
hashes.map(hash => encodeBN254toBase58(hash)),
);
const res = create(
unsafeRes,
jsonRpcResultAndContext(array(MerkeProofResult)),
);
if ('error' in res) {
throw new SolanaJSONRPCError(
res.error,
`failed to get proofs for compressed accounts ${hashes.map(hash => encodeBN254toBase58(hash)).join(', ')}`,
);
}
if (res.result.value === null) {
throw new Error(
`failed to get proofs for compressed accounts ${hashes.map(hash => encodeBN254toBase58(hash)).join(', ')}`,
);
}
const merkleProofs: MerkleContextWithMerkleProof[] = [];
for (const proof of res.result.value) {
const value: MerkleContextWithMerkleProof = {
hash: proof.hash.toArray('be', 32),
merkleTree: proof.merkleTree,
leafIndex: proof.leafIndex,
merkleProof: proof.proof,
nullifierQueue: mockAddressQueue, // TODO(photon): support nullifierQueue in response.
rootIndex: proof.rootSeq % 2400,
root: proof.root,
};
merkleProofs.push(value);
}
return merkleProofs;
}
/**
* Fetch all the compressed accounts owned by the specified public key.
* Owner can be a program or user account
*/
async getCompressedAccountsByOwner(
owner: PublicKey,
config?: GetCompressedAccountsByOwnerConfig | undefined,
): Promise<WithCursor<CompressedAccountWithMerkleContext[]>> {
const unsafeRes = await rpcRequest(
this.compressionApiEndpoint,
'getCompressedAccountsByOwner',
{
owner: owner.toBase58(),
filters: config?.filters || [],
dataSlice: config?.dataSlice,
cursor: config?.cursor,
limit: config?.limit?.toNumber(),
},
);
const res = create(
unsafeRes,
jsonRpcResultAndContext(CompressedAccountsByOwnerResult),
);
if ('error' in res) {
throw new SolanaJSONRPCError(
res.error,
`failed to get info for compressed accounts owned by ${owner.toBase58()}`,
);
}
if (res.result.value === null) {
return {
items: [],
cursor: null,
};
}
const accounts: CompressedAccountWithMerkleContext[] = [];
res.result.value.items.map(item => {
const account = createCompressedAccountWithMerkleContext(
createMerkleContext(
item.tree!,
mockNullifierQueue,
item.hash.toArray('be', 32),
item.leafIndex,
),
item.owner,
bn(item.lamports),
item.data ? parseAccountData(item.data) : undefined,
item.address || undefined,
);
accounts.push(account);
});
return {
items: accounts.sort((a, b) => b.leafIndex - a.leafIndex),
cursor: res.result.value.cursor,
};
}
/**
* Fetch all the compressed token accounts owned by the specified public
* key. Owner can be a program or user account
*/
async getCompressedTokenAccountsByOwner(
owner: PublicKey,
options?: GetCompressedTokenAccountsByOwnerOrDelegateOptions,
): Promise<WithCursor<ParsedTokenAccount[]>> {
if (!options) options = {};
return await getCompressedTokenAccountsByOwnerOrDelegate(
this,
owner,
options,
false,
);
}
/**
* Fetch all the compressed accounts delegated to the specified public key.
*/
async getCompressedTokenAccountsByDelegate(
delegate: PublicKey,
options?: GetCompressedTokenAccountsByOwnerOrDelegateOptions,
): Promise<WithCursor<ParsedTokenAccount[]>> {
if (!options) options = {};
return getCompressedTokenAccountsByOwnerOrDelegate(
this,
delegate,
options,
true,
);
}
/**
* Fetch the compressed token balance for the specified account hash
*/
async getCompressedTokenAccountBalance(
hash: BN254,
): Promise<{ amount: BN }> {
const unsafeRes = await rpcRequest(
this.compressionApiEndpoint,
'getCompressedTokenAccountBalance',
{ hash: encodeBN254toBase58(hash) },
);
const res = create(unsafeRes, jsonRpcResultAndContext(BalanceResult));
if ('error' in res) {
throw new SolanaJSONRPCError(
res.error,
`failed to get balance for compressed token account ${hash.toString()}`,
);
}
if (res.result.value === null) {
throw new Error(
`failed to get balance for compressed token account ${hash.toString()}`,
);
}
return { amount: bn(res.result.value.amount) };
}
/**
* @deprecated use {@link getCompressedTokenBalancesByOwnerV2} instead.
*
* Fetch all the compressed token balances owned by the specified public
* key. Can filter by mint. Returns without context.
*/
async getCompressedTokenBalancesByOwner(
owner: PublicKey,
options?: GetCompressedTokenAccountsByOwnerOrDelegateOptions,
): Promise<WithCursor<TokenBalance[]>> {
if (!options) options = {};
const unsafeRes = await rpcRequest(
this.compressionApiEndpoint,
'getCompressedTokenBalancesByOwner',
{
owner: owner.toBase58(),
mint: options.mint?.toBase58(),
limit: options.limit?.toNumber(),
cursor: options.cursor,
},
);
const res = create(
unsafeRes,
jsonRpcResultAndContext(TokenBalanceListResult),
);
if ('error' in res) {
throw new SolanaJSONRPCError(
res.error,
`failed to get compressed token balances for owner ${owner.toBase58()}`,
);
}
if (res.result.value === null) {
throw new Error(
`failed to get compressed token balances for owner ${owner.toBase58()}`,
);
}
const maybeFiltered = options.mint
? res.result.value.tokenBalances.filter(
tokenBalance =>
tokenBalance.mint.toBase58() === options.mint!.toBase58(),
)
: res.result.value.tokenBalances;
return {
items: maybeFiltered,
cursor: res.result.value.cursor,
};
}
/**
* Fetch the compressed token balances owned by the specified public
* key. Paginated. Can filter by mint. Returns with context.
*/
async getCompressedTokenBalancesByOwnerV2(
owner: PublicKey,
options?: GetCompressedTokenAccountsByOwnerOrDelegateOptions,
): Promise<WithContext<WithCursor<TokenBalance[]>>> {
if (!options) options = {};
const unsafeRes = await rpcRequest(
this.compressionApiEndpoint,
'getCompressedTokenBalancesByOwnerV2',
{
owner: owner.toBase58(),
mint: options.mint?.toBase58(),
limit: options.limit?.toNumber(),
cursor: options.cursor,
},
);
const res = create(
unsafeRes,
jsonRpcResultAndContext(TokenBalanceListResultV2),
);
if ('error' in res) {
throw new SolanaJSONRPCError(
res.error,
`failed to get compressed token balances for owner ${owner.toBase58()}`,
);
}
if (res.result.value === null) {
throw new Error(
`failed to get compressed token balances for owner ${owner.toBase58()}`,
);
}
const maybeFiltered = options.mint
? res.result.value.items.filter(
tokenBalance =>
tokenBalance.mint.toBase58() === options.mint!.toBase58(),
)
: res.result.value.items;
return {
context: res.result.context,
value: {
items: maybeFiltered,
cursor: res.result.value.cursor,
},
};
}
/**
* Returns confirmed compression signatures for transactions involving the specified
* account hash forward in time from genesis to the most recent confirmed
* block
*
* @param hash queried account hash
*/
async getCompressionSignaturesForAccount(
hash: BN254,
): Promise<SignatureWithMetadata[]> {
const unsafeRes = await rpcRequest(
this.compressionApiEndpoint,
'getCompressionSignaturesForAccount',
{ hash: encodeBN254toBase58(hash) },
);
const res = create(
unsafeRes,
jsonRpcResultAndContext(SignatureListResult),
);
if ('error' in res) {
throw new SolanaJSONRPCError(
res.error,
`failed to get signatures for compressed account ${hash.toString()}`,
);
}
return res.result.value.items;
}
/**
* Fetch a confirmed or finalized transaction from the cluster. Return with
* CompressionInfo
*/
async getTransactionWithCompressionInfo(
signature: string,
): Promise<CompressedTransaction | null> {
const unsafeRes = await rpcRequest(
this.compressionApiEndpoint,
'getTransactionWithCompressionInfo',
{ signature },
);
const res = create(
unsafeRes,
jsonRpcResult(CompressedTransactionResult),
);
if ('error' in res) {
throw new SolanaJSONRPCError(res.error, 'failed to get slot');
}
if (res.result.transaction === null) return null;
const closedAccounts: {
account: CompressedAccountWithMerkleContext;
maybeTokenData: TokenData | null;
}[] = [];
const openedAccounts: {
account: CompressedAccountWithMerkleContext;
maybeTokenData: TokenData | null;
}[] = [];
res.result.compressionInfo.closedAccounts.map(item => {
closedAccounts.push(buildCompressedAccountWithMaybeTokenData(item));
});
res.result.compressionInfo.openedAccounts.map(item => {
openedAccounts.push(buildCompressedAccountWithMaybeTokenData(item));
});
const calculateTokenBalances = (
accounts: Array<{
account: CompressedAccountWithMerkleContext;
maybeTokenData: TokenData | null;
}>,
):
| Array<{
owner: PublicKey;
mint: PublicKey;
amount: BN;
}>
| undefined => {
const balances = Object.values(
accounts.reduce(
(acc, { maybeTokenData }) => {
if (maybeTokenData) {
const { owner, mint, amount } = maybeTokenData;
const key = `${owner.toBase58()}_${mint.toBase58()}`;
if (key in acc) {
acc[key].amount = acc[key].amount.add(amount);
} else {
acc[key] = { owner, mint, amount };
}
}
return acc;
},
{} as {
[key: string]: {
owner: PublicKey;
mint: PublicKey;
amount: BN;
};
},
),
);
return balances.length > 0 ? balances : undefined;
};
const preTokenBalances = calculateTokenBalances(closedAccounts);
const postTokenBalances = calculateTokenBalances(openedAccounts);
return {
compressionInfo: {
closedAccounts,
openedAccounts,
preTokenBalances,
postTokenBalances,
},
transaction: res.result.transaction,
};
}
/**
* Returns confirmed signatures for transactions involving the specified
* address forward in time from genesis to the most recent confirmed block
*
* @param address queried compressed account address
*/
async getCompressionSignaturesForAddress(
address: PublicKey,
options?: PaginatedOptions,
): Promise<WithCursor<SignatureWithMetadata[]>> {
const unsafeRes = await rpcRequest(
this.compressionApiEndpoint,
'getCompressionSignaturesForAddress',
{
address: address.toBase58(),
cursor: options?.cursor,
limit: options?.limit?.toNumber(),
},
);
const res = create(
unsafeRes,
jsonRpcResultAndContext(SignatureListWithCursorResult),
);
if ('error' in res) {
throw new SolanaJSONRPCError(
res.error,
`failed to get signatures for address ${address.toBase58()}`,
);
}
if (res.result.value === null) {
throw new Error(
`failed to get signatures for address ${address.toBase58()}`,
);
}
return res.result.value;
}
/**
* Returns confirmed signatures for compression transactions involving the
* specified account owner forward in time from genesis to the
* most recent confirmed block
*
* @param owner queried owner public key
*/
async getCompressionSignaturesForOwner(
owner: PublicKey,
options?: PaginatedOptions,
): Promise<WithCursor<SignatureWithMetadata[]>> {
const unsafeRes = await rpcRequest(
this.compressionApiEndpoint,
'getCompressionSignaturesForOwner',
{
owner: owner.toBase58(),
cursor: options?.cursor,
limit: options?.limit?.toNumber(),
},
);
const res = create(
unsafeRes,
jsonRpcResultAndContext(SignatureListWithCursorResult),
);
if ('error' in res) {
throw new SolanaJSONRPCError(
res.error,
`failed to get signatures for owner ${owner.toBase58()}`,
);
}
if (res.result.value === null) {
throw new Error(
`failed to get signatures for owner ${owner.toBase58()}`,
);
}
return res.result.value;
}
/**
* Returns confirmed signatures for compression transactions involving the
* specified token account owner forward in time from genesis to the most
* recent confirmed block
*/
async getCompressionSignaturesForTokenOwner(
owner: PublicKey,
options?: PaginatedOptions,
): Promise<WithCursor<SignatureWithMetadata[]>> {
const unsafeRes = await rpcRequest(
this.compressionApiEndpoint,
'getCompressionSignaturesForTokenOwner',
{
owner: owner.toBase58(),
cursor: options?.cursor,
limit: options?.limit?.toNumber(),
},
);
const res = create(
unsafeRes,
jsonRpcResultAndContext(SignatureListWithCursorResult),
);
if ('error' in res) {
throw new SolanaJSONRPCError(
res.error,
`failed to get signatures for owner ${owner.toBase58()}`,
);
}
if (res.result.value === null) {
throw new Error(
`failed to get signatures for owner ${owner.toBase58()}`,
);
}
return res.result.value;
}
/**
* Fetch the current indexer health status
*/
async getIndexerHealth(): Promise<string> {
const unsafeRes = await rpcRequest(
this.compressionApiEndpoint,
'getIndexerHealth',
);
const res = create(unsafeRes, jsonRpcResult(HealthResult));
if ('error' in res) {
throw new SolanaJSONRPCError(res.error, 'failed to get health');
}
return res.result;
}
/**
* Ensure that the Compression Indexer has already indexed the transaction
*/
async confirmTransactionIndexed(slot: number): Promise<boolean> {
const startTime = Date.now();
// eslint-disable-next-line no-constant-condition
while (true) {
const indexerSlot = await this.getIndexerSlot();
if (indexerSlot >= slot) {
return true;
}
if (Date.now() - startTime > 20000) {
// 20 seconds
throw new Error(
'Timeout: Indexer slot did not reach the required slot within 20 seconds',
);
}
await new Promise(resolve => setTimeout(resolve, 200));
}
}
/**
* Fetch the current slot that the node is processing
*/
async getIndexerSlot(): Promise<number> {
const unsafeRes = await rpcRequest(
this.compressionApiEndpoint,
'getIndexerSlot',
);
const res = create(unsafeRes, jsonRpcResult(SlotResult));
if ('error' in res) {
throw new SolanaJSONRPCError(res.error, 'failed to get slot');
}
return res.result;
}
/**
* Fetch all the compressed token holders for a given mint. Paginated.
*/
async getCompressedMintTokenHolders(
mint: PublicKey,
options?: PaginatedOptions,
): Promise<WithContext<WithCursor<CompressedMintTokenHolders[]>>> {
const unsafeRes = await rpcRequest(
this.compressionApiEndpoint,
'getCompressedMintTokenHolders',
{
mint: mint.toBase58(),
cursor: options?.cursor,
limit: options?.limit?.toNumber(),
},
);
const res = create(
unsafeRes,
jsonRpcResultAndContext(CompressedMintTokenHoldersResult),
);
if ('error' in res) {
throw new SolanaJSONRPCError(
res.error,
'failed to get mint token holders',
);
}
return res.result;
}
/**
* Fetch the latest compression signatures on the cluster. Results are
* paginated.
*/
async getLatestCompressionSignatures(
cursor?: string,
limit?: number,
): Promise<LatestNonVotingSignaturesPaginated> {
const unsafeRes = await rpcRequest(
this.compressionApiEndpoint,
'getLatestCompressionSignatures',
{ limit, cursor },
);
const res = create(
unsafeRes,
jsonRpcResultAndContext(LatestNonVotingSignaturesResultPaginated),
);
if ('error' in res) {
throw new SolanaJSONRPCError(
res.error,
'failed to get latest non-voting signatures',
);
}
return res.result;
}
/**
* Fetch all non-voting signatures
*/
async getLatestNonVotingSignatures(
limit?: number,
cursor?: string,
): Promise<LatestNonVotingSignatures> {
const unsafeRes = await rpcRequest(
this.compressionApiEndpoint,
'getLatestNonVotingSignatures',
{ limit, cursor },
);
const res = create(
unsafeRes,
jsonRpcResultAndContext(LatestNonVotingSignaturesResult),
);
if ('error' in res) {
throw new SolanaJSONRPCError(
res.error,
'failed to get latest non-voting signatures',
);
}
return res.result;
}
/**
* Fetch the latest address proofs for new unique addresses specified by an
* array of addresses.
*
* the proof states that said address have not yet been created in
* respective address tree.
* @param addresses Array of BN254 new addresses
* @returns Array of validity proofs for new addresses
*/
async getMultipleNewAddressProofs(addresses: BN254[]) {
const unsafeRes = await rpcRequest(
this.compressionApiEndpoint,
'getMultipleNewAddressProofs',
addresses.map(address => encodeBN254toBase58(address)),
);
const res = create(
unsafeRes,
jsonRpcResultAndContext(array(NewAddressProofResult)),
);
if ('error' in res) {
throw new SolanaJSONRPCError(
res.error,
`failed to get proofs for new addresses ${addresses.map(address => encodeBN254toBase58(address)).join(', ')}`,
);
}
if (res.result.value === null) {
throw new Error(
`failed to get proofs for new addresses ${addresses.map(address => encodeBN254toBase58(address)).join(', ')}`,
);
}
/// Creates proof for each address
const newAddressProofs: MerkleContextWithNewAddressProof[] = [];
for (const proof of res.result.value) {
const _proof: MerkleContextWithNewAddressProof = {
root: proof.root,
rootIndex: proof.rootSeq % 2400,
value: proof.address,
leafLowerRangeValue: proof.lowerRangeAddress,
leafHigherRangeValue: proof.higherRangeAddress,
nextIndex: bn(proof.nextIndex),
merkleProofHashedIndexedElementLeaf: proof.proof,
indexHashedIndexedElementLeaf: bn(proof.lowElementLeafIndex),
merkleTree: proof.merkleTree,
nullifierQueue: mockAddressQueue,
};
newAddressProofs.push(_proof);
}
return newAddressProofs;
}
/**
* Advanced usage of getValidityProof: fetches ZKP directly from a custom
* non-rpcprover. Note: This uses the proverEndpoint specified in the
* constructor. For normal usage, please use {@link getValidityProof}
* instead.
*
* Fetch the latest validity proof for (1) compressed accounts specified by
* an array of account hashes. (2) new unique addresses specified by an
* array of addresses.
*
* Validity proofs prove the presence of compressed accounts in state trees
* and the non-existence of addresses in address trees, respectively. They
* enable verification without recomputing the merkle proof path, thus
* lowering verification and data costs.
*
* @param hashes Array of BN254 hashes.
* @param newAddresses Array of BN254 new addresses.
* @returns validity proof with context
*/
async getValidityProofDirect(
hashes: BN254[] = [],
newAddresses: BN254[] = [],
): Promise<CompressedProofWithContext> {
let validityProof: CompressedProofWithContext;
if (hashes.length === 0 && newAddresses.length === 0) {
throw new Error(
'Empty input. Provide hashes and/or new addresses.',
);
} else if (hashes.length > 0 && newAddresses.length === 0) {
/// inclusion
const merkleProofsWithContext =
await this.getMultipleCompressedAccountProofs(hashes);
const inputs = convertMerkleProofsWithContextToHex(
merkleProofsWithContext,
);
const compressedProof = await proverRequest(
this.proverEndpoint,
'inclusion',
inputs,
false,
);
validityProof = {
compressedProof,
roots: merkleProofsWithContext.map(proof => proof.root),
rootIndices: merkleProofsWithContext.map(
proof => proof.rootIndex,
),
leafIndices: merkleProofsWithContext.map(
proof => proof.leafIndex,
),
leaves: merkleProofsWithContext.map(proof => bn(proof.hash)),
merkleTrees: merkleProofsWithContext.map(
proof => proof.merkleTree,
),
nullifierQueues: merkleProofsWithContext.map(
proof => proof.nullifierQueue,
),
};
} else if (hashes.length === 0 && newAddresses.length > 0) {
/// new-address
const newAddressProofs: MerkleContextWithNewAddressProof[] =
await this.getMultipleNewAddressProofs(newAddresses);
const inputs =
convertNonInclusionMerkleProofInputsToHex(newAddressProofs);
const compressedProof = await proverRequest(
this.proverEndpoint,
'new-address',
inputs,
false,
);
validityProof = {
compressedProof,
roots: newAddressProofs.map(proof => proof.root),
rootIndices: newAddressProofs.map(proof => proof.rootIndex),
leafIndices: newAddressProofs.map(proof =>
proof.nextIndex.toNumber(),
),
leaves: newAddressProofs.map(proof => bn(proof.value)),
merkleTrees: newAddressProofs.map(proof => proof.merkleTree),
nullifierQueues: newAddressProofs.map(
proof => proof.nullifierQueue,
),
};
} else if (hashes.length > 0 && newAddresses.length > 0) {
/// combined
const merkleProofsWithContext =
await this.getMultipleCompressedAccountProofs(hashes);
const inputs = convertMerkleProofsWithContextToHex(
merkleProofsWithContext,
);
const newAddressProofs: MerkleContextWithNewAddressProof[] =
await this.getMultipleNewAddressProofs(newAddresses);
const newAddressInputs =
convertNonInclusionMerkleProofInputsToHex(newAddressProofs);
const compressedProof = await proverRequest(
this.proverEndpoint,
'combined',
[inputs, newAddressInputs],
false,
);
validityProof = {
compressedProof,
roots: merkleProofsWithContext
.map(proof => proof.root)
.concat(newAddressProofs.map(proof => proof.root)),
rootIndices: merkleProofsWithContext
.map(proof => proof.rootIndex)
.concat(newAddressProofs.map(proof => proof.rootIndex)),
leafIndices: merkleProofsWithContext
.map(proof => proof.leafIndex)
.concat(
newAddressProofs.map(
proof => proof.nextIndex.toNumber(), // TODO: support >32bit
),
),
leaves: merkleProofsWithContext
.map(proof => bn(proof.hash))
.concat(newAddressProofs.map(proof => bn(proof.value))),
merkleTrees: merkleProofsWithContext
.map(proof => proof.merkleTree)
.concat(newAddressProofs.map(proof => proof.merkleTree)),
nullifierQueues: merkleProofsWithContext
.map(proof => proof.nullifierQueue)
.concat(
newAddressProofs.map(proof => proof.nullifierQueue),
),
};
} else throw new Error('Invalid input');
return validityProof;
}
/**
* Fetch the latest validity proof for (1) compressed accounts specified by
* an array of account hashes. (2) new unique addresses specified by an
* array of addresses.
*
* Validity proofs prove the presence of compressed accounts in state trees
* and the non-existence of addresses in address trees, respectively. They
* enable verification without recomputing the merkle proof path, thus
* lowering verification and data costs.
*
* @param hashes Array of BN254 hashes.
* @param newAddresses Array of BN254 new addresses.
* @returns validity proof with context
*/
async getValidityProof(
hashes: BN254[] = [],
newAddresses: BN254[] = [],
): Promise<CompressedProofWithContext> {
const defaultAddressTreePublicKey =
defaultTestStateTreeAccounts().addressTree;
const defaultAddressQueuePublicKey =
defaultTestStateTreeAccounts().addressQueue;
const defaultStateTreePublicKey =
defaultTestStateTreeAccounts().merkleTree;
const defaultStateQueuePublicKey =
defaultTestStateTreeAccounts().nullifierQueue;
const formattedHashes = hashes.map(item => {
return {
hash: item,
tree: defaultStateTreePublicKey,
queue: defaultStateQueuePublicKey,
};
});
const formattedNewAddresses = newAddresses.map(item => {
return {
address: item,
tree: defaultAddressTreePublicKey,
queue: defaultAddressQueuePublicKey,
};
});
return this.getValidityProofV0(formattedHashes, formattedNewAddresses);
}
/**
* Fetch the latest validity proof for (1) compressed accounts specified by
* an array of account hashes. (2) new unique addresses specified by an
* array of addresses.
*
* Validity proofs prove the presence of compressed accounts in state trees
* and the non-existence of addresses in address trees, respectively. They
* enable verification without recomputing the merkle proof path, thus
* lowering verification and data costs.
*
* @param hashes Array of { hash: BN254, tree: PublicKey, queue: PublicKey }.
* @param newAddresses Array of { address: BN254, tree: PublicKey, queue: PublicKey }.
* @returns validity proof with context
*/
async getValidityProofV0(
hashes: HashWithTree[] = [],
newAddresses: AddressWithTree[] = [],
): Promise<CompressedProofWithContext> {
const { value } = await this.getValidityProofAndRpcContext(
hashes,
newAddresses,
);
return value;
}
/**
* Fetch the latest validity proof for (1) compressed accounts specified by
* an array of account hashes. (2) new unique addresses specified by an
* array of addresses. Returns with context slot.
*
* Validity proofs prove the presence of compressed accounts in state trees
* and the non-existence of addresses in address trees, respectively. They
* enable verification without recomputing the merkle proof path, thus
* lowering verification and data costs.
*
* @param hashes Array of BN254 hashes.
* @param newAddresses Array of BN254 new addresses. Optionally specify the
* tree and queue for each address. Default to public
* state tree/queue.
* @returns validity proof with context
*/
async getValidityProofAndRpcContext(
hashes: HashWithTree[] = [],
newAddresses: AddressWithTree[] = [],
): Promise<WithContext<CompressedProofWithContext>> {
const unsafeRes = await rpcRequest(
this.compressionApiEndpoint,
'getValidityProof',
{
hashes: hashes.map(({ hash }) => encodeBN254toBase58(hash)),
newAddressesWithTrees: newAddresses.map(
({ address, tree }) => ({
address: encodeBN254toBase58(address),
tree: tree.toBase58(),
}),
),
},
);
const res = create(
unsafeRes,
jsonRpcResultAndContext(ValidityProofResult),
);
if ('error' in res) {
throw new SolanaJSONRPCError(
res.error,
`failed to get ValidityProof for compressed accounts ${hashes.map(hash => hash.toString())}`,
);
}
const result = res.result.value;
if (result === null) {
throw new Error(
`failed to get ValidityProof for compressed accounts ${hashes.map(hash => hash.toString())}`,
);
}
const value: CompressedProofWithContext = {
compressedProof: result.compressedProof,
merkleTrees: result.merkleTrees,
leafIndices: result.leafIndices,
nullifierQueues: [
...hashes.map(({ queue }) => queue),
...newAddresses.map(({ queue }) => queue),
],
rootIndices: result.rootIndices,
roots: result.roots,
leaves: result.leaves,
};
return { value, context: res.result.context };
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/constants.ts
|
import { BN } from '@coral-xyz/anchor';
import { Buffer } from 'buffer';
import { ConfirmOptions, PublicKey } from '@solana/web3.js';
export const FIELD_SIZE = new BN(
'21888242871839275222246405745257275088548364400416034343698204186575808495617',
);
export const HIGHEST_ADDRESS_PLUS_ONE = new BN(
'452312848583266388373324160190187140051835877600158453279131187530910662655',
);
// TODO: implement properly
export const noopProgram = 'noopb9bkMVfRPU8AsbpTUg8AQkHtKwMYZiFUjNRtMmV';
export const lightProgram = 'SySTEM1eSU2p4BGQfQpimFEWWSC1XDFeun3Nqzz3rT7';
export const accountCompressionProgram = // also: merkletree program
'compr6CUsB5m2jS4Y3831ztGSTnDpnKJTKS95d64XVq';
export const getRegisteredProgramPda = () =>
new PublicKey('35hkDgaAKwMCaxRz2ocSZ6NaUrtKkyNqU6c4RV3tYJRh'); // TODO: better labelling. gov authority pda
export const getAccountCompressionAuthority = () =>
PublicKey.findProgramAddressSync(
[Buffer.from('cpi_authority')],
new PublicKey(
// TODO: can add check to ensure its consistent with the idl
lightProgram,
),
)[0];
export const defaultStaticAccounts = () => [
new PublicKey(getRegisteredProgramPda()),
new PublicKey(noopProgram),
new PublicKey(accountCompressionProgram),
new PublicKey(getAccountCompressionAuthority()),
];
export const defaultStaticAccountsStruct = () => {
return {
registeredProgramPda: new PublicKey(getRegisteredProgramPda()),
noopProgram: new PublicKey(noopProgram),
accountCompressionProgram: new PublicKey(accountCompressionProgram),
accountCompressionAuthority: new PublicKey(
getAccountCompressionAuthority(),
),
cpiSignatureAccount: null,
};
};
export const defaultTestStateTreeAccounts = () => {
return {
nullifierQueue: new PublicKey(nullifierQueuePubkey),
merkleTree: new PublicKey(merkletreePubkey),
merkleTreeHeight: DEFAULT_MERKLE_TREE_HEIGHT,
addressTree: new PublicKey(addressTree),
addressQueue: new PublicKey(addressQueue),
};
};
export const nullifierQueuePubkey =
'nfq1NvQDJ2GEgnS8zt9prAe8rjjpAW1zFkrvZoBR148';
export const merkletreePubkey = 'smt1NamzXdq4AMqS2fS2F1i5KTYPZRhoHgWx38d8WsT';
export const addressTree = 'amt1Ayt45jfbdw5YSo7iz6WZxUmnZsQTYXy82hVwyC2';
export const addressQueue = 'aq1S9z4reTSQAdgWHGD2zDaS39sjGrAxbR31vxJ2F4F';
export const confirmConfig: ConfirmOptions = {
commitment: 'confirmed',
preflightCommitment: 'confirmed',
};
export const DEFAULT_MERKLE_TREE_HEIGHT = 26;
export const DEFAULT_MERKLE_TREE_ROOTS = 2800;
/** Threshold (per asset) at which new in-UTXOs get merged, in order to reduce UTXO pool size */
export const UTXO_MERGE_THRESHOLD = 20;
export const UTXO_MERGE_MAXIMUM = 10;
/**
* Treshold after which the currently used transaction Merkle tree is switched
* to the next one
*/
export const TRANSACTION_MERKLE_TREE_ROLLOVER_THRESHOLD = new BN(
Math.floor(2 ** DEFAULT_MERKLE_TREE_HEIGHT * 0.95),
);
/**
* Fee to provide continous funding for the state Merkle tree.
* Once the state Merkle tree is at 95% capacity the accumulated fees
* will be used to fund the next state Merkle tree with the same parameters.
*
* Is charged per output compressed account.
*/
export const STATE_MERKLE_TREE_ROLLOVER_FEE = new BN(300);
/**
* Fee to provide continous funding for the address queue and address Merkle tree.
* Once the address Merkle tree is at 95% capacity the accumulated fees
* will be used to fund the next address queue and address tree with the same parameters.
*
* Is charged per newly created address.
*/
export const ADDRESS_QUEUE_ROLLOVER_FEE = new BN(392);
/**
* Is charged if the transaction nullifies at least one compressed account.
*/
export const STATE_MERKLE_TREE_NETWORK_FEE = new BN(5000);
/**
* Is charged if the transaction creates at least one address.
*/
export const ADDRESS_TREE_NETWORK_FEE = new BN(5000);
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/index.ts
|
export * from './actions';
export * from './idls';
export * from './instruction';
export * from './programs';
export * from './state';
export * from './utils';
export * from './wallet';
export * from './constants';
export * from './errors';
export * from './rpc-interface';
export * from './rpc';
export * from './test-helpers';
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/rpc-interface.ts
|
import { PublicKey, MemcmpFilter, DataSlice } from '@solana/web3.js';
import {
type as pick,
number,
string,
array,
literal,
union,
coerce,
instance,
create,
unknown,
any,
nullable,
Struct,
} from 'superstruct';
import {
BN254,
createBN254,
CompressedProof,
CompressedAccountWithMerkleContext,
MerkleContextWithMerkleProof,
bn,
TokenData,
} from './state';
import { BN } from '@coral-xyz/anchor';
export interface LatestNonVotingSignatures {
context: { slot: number };
value: {
items: {
signature: string;
slot: number;
blockTime: number;
error: string | null;
}[];
};
}
export interface GetCompressedAccountsByOwnerConfig {
filters?: GetCompressedAccountsFilter[];
dataSlice?: DataSlice;
cursor?: string;
limit?: BN;
}
export interface CompressedMintTokenHolders {
balance: BN;
owner: PublicKey;
}
export interface LatestNonVotingSignaturesPaginated {
context: { slot: number };
value: {
items: {
signature: string;
slot: number;
blockTime: number;
}[];
cursor: string | null;
};
}
export interface SignatureWithMetadata {
blockTime: number;
signature: string;
slot: number;
}
export interface HashWithTree {
hash: BN254;
tree: PublicKey;
queue: PublicKey;
}
export interface AddressWithTree {
address: BN254;
tree: PublicKey;
queue: PublicKey;
}
export interface CompressedTransaction {
compressionInfo: {
closedAccounts: {
account: CompressedAccountWithMerkleContext;
maybeTokenData: TokenData | null;
}[];
openedAccounts: {
account: CompressedAccountWithMerkleContext;
maybeTokenData: TokenData | null;
}[];
preTokenBalances?: {
owner: PublicKey;
mint: PublicKey;
amount: BN;
}[];
postTokenBalances?: {
owner: PublicKey;
mint: PublicKey;
amount: BN;
}[];
};
transaction: any;
}
export interface HexBatchInputsForProver {
'input-compressed-accounts': HexInputsForProver[];
}
export interface HexInputsForProver {
root: string;
pathIndex: number;
pathElements: string[];
leaf: string;
}
// TODO: Rename Compressed -> ValidityProof
export type CompressedProofWithContext = {
compressedProof: CompressedProof;
roots: BN[];
rootIndices: number[];
leafIndices: number[];
leaves: BN[];
merkleTrees: PublicKey[];
nullifierQueues: PublicKey[];
};
export interface GetCompressedTokenAccountsByOwnerOrDelegateOptions {
mint?: PublicKey;
cursor?: string;
limit?: BN;
}
export type TokenBalance = { balance: BN; mint: PublicKey };
/**
* **Cursor** is a unique identifier for a page of results by which the next page can be fetched.
*
* **Limit** is the maximum number of results to return per page.
*/
export interface PaginatedOptions {
cursor?: string;
limit?: BN;
}
/**
* Note, DataSizeFilter is currently not available.
*/
export type GetCompressedAccountsFilter = MemcmpFilter; // | DataSizeFilter;
export type GetCompressedAccountConfig = {
encoding?: string;
};
export type GetCompressedAccountsConfig = {
dataSlice: DataSlice;
filters?: GetCompressedAccountsFilter[];
};
export interface ParsedTokenAccount {
compressedAccount: CompressedAccountWithMerkleContext;
parsed: TokenData;
}
export type WithContext<T> = {
/** context */
context: {
slot: number;
};
/** response value */
value: T;
};
export type WithCursor<T> = {
/** context */
cursor: string | null;
/** response value */
items: T;
};
/**
* @internal
*/
const PublicKeyFromString = coerce(
instance(PublicKey),
string(),
value => new PublicKey(value),
);
/**
* @internal
*/
const ArrayFromString = coerce(instance(Array<number>), string(), value =>
Array.from(new PublicKey(value).toBytes()),
);
/**
* @internal
*/
const BN254FromString = coerce(instance(BN), string(), value => {
return createBN254(value, 'base58');
});
const BNFromInt = coerce(instance(BN), number(), value => {
// Check if the number is safe
if (Number.isSafeInteger(value)) {
return bn(value);
} else {
// Convert to string if the number is unsafe
return bn(value.toString(), 10);
}
});
/**
* @internal
*/
const Base64EncodedCompressedAccountDataResult = coerce(
string(),
string(),
value => (value === '' ? null : value),
);
/**
* @internal
*/
export function createRpcResult<T, U>(result: Struct<T, U>) {
return union([
pick({
jsonrpc: literal('2.0'),
id: string(),
result,
}),
pick({
jsonrpc: literal('2.0'),
id: string(),
error: pick({
code: unknown(),
message: string(),
data: nullable(any()),
}),
}),
]) as Struct<RpcResult<T>, null>;
}
/**
* @internal
*/
const UnknownRpcResult = createRpcResult(unknown());
/**
* @internal
*/
export function jsonRpcResult<T, U>(schema: Struct<T, U>) {
return coerce(createRpcResult(schema), UnknownRpcResult, value => {
if ('error' in value) {
return value as RpcResultError;
} else {
return {
...value,
result: create(value.result, schema),
} as RpcResultSuccess<T>;
}
}) as Struct<RpcResult<T>, null>;
}
// Add this type for the context wrapper
export type WithRpcContext<T> = {
context: {
slot: number;
};
value: T;
};
/**
* @internal
*/
export function jsonRpcResultAndContext<T, U>(value: Struct<T, U>) {
return jsonRpcResult(
pick({
context: pick({
slot: number(),
}),
value,
}),
) as Struct<RpcResult<WithRpcContext<T>>, null>;
}
/**
* @internal
*/
export const CompressedAccountResult = pick({
address: nullable(ArrayFromString),
hash: BN254FromString,
data: nullable(
pick({
data: Base64EncodedCompressedAccountDataResult,
dataHash: BN254FromString,
discriminator: BNFromInt,
}),
),
lamports: BNFromInt,
owner: PublicKeyFromString,
leafIndex: number(),
tree: PublicKeyFromString,
seq: nullable(BNFromInt),
slotCreated: BNFromInt,
});
export const TokenDataResult = pick({
mint: PublicKeyFromString,
owner: PublicKeyFromString,
amount: BNFromInt,
delegate: nullable(PublicKeyFromString),
state: string(),
});
/**
* @internal
*/
export const CompressedTokenAccountResult = pick({
tokenData: TokenDataResult,
account: CompressedAccountResult,
});
/**
* @internal
*/
export const MultipleCompressedAccountsResult = pick({
items: array(CompressedAccountResult),
});
/**
* @internal
*/
export const CompressedAccountsByOwnerResult = pick({
items: array(CompressedAccountResult),
cursor: nullable(string()),
});
/**
* @internal
*/
export const CompressedTokenAccountsByOwnerOrDelegateResult = pick({
items: array(CompressedTokenAccountResult),
cursor: nullable(string()),
});
/**
* @internal
*/
export const SlotResult = number();
/**
* @internal
*/
export const HealthResult = string();
/**
* @internal
*/
export const LatestNonVotingSignaturesResult = pick({
items: array(
pick({
signature: string(),
slot: number(),
blockTime: number(),
error: nullable(string()),
}),
),
});
/**
* @internal
*/
export const LatestNonVotingSignaturesResultPaginated = pick({
items: array(
pick({
signature: string(),
slot: number(),
blockTime: number(),
}),
),
cursor: nullable(string()),
});
/**
* @internal
*/
export const MerkeProofResult = pick({
hash: BN254FromString,
leafIndex: number(),
merkleTree: PublicKeyFromString,
proof: array(BN254FromString),
rootSeq: number(),
root: BN254FromString,
});
/**
* @internal
*/
export const NewAddressProofResult = pick({
address: BN254FromString,
nextIndex: number(),
merkleTree: PublicKeyFromString,
proof: array(BN254FromString), // this is: merkleProofHashedIndexedElementLeaf
rootSeq: number(),
root: BN254FromString,
lowerRangeAddress: BN254FromString, // this is: leafLowerRangeValue.
higherRangeAddress: BN254FromString, // this is: leafHigherRangeValue
lowElementLeafIndex: number(), // this is: indexHashedIndexedElementLeaf
});
/**
* @internal
*/
const CompressedProofResult = pick({
a: array(number()),
b: array(number()),
c: array(number()),
});
/**
* @internal
*/
export const ValidityProofResult = pick({
compressedProof: CompressedProofResult,
leafIndices: array(number()),
leaves: array(BN254FromString),
rootIndices: array(number()),
roots: array(BN254FromString),
merkleTrees: array(PublicKeyFromString),
// TODO: enable nullifierQueues
// nullifierQueues: array(PublicKeyFromString),
});
/**
* @internal
*/
export const MultipleMerkleProofsResult = array(MerkeProofResult);
/**
* @internal
*/
export const BalanceResult = pick({
amount: BNFromInt,
});
export const NativeBalanceResult = BNFromInt;
export const TokenBalanceResult = pick({
balance: BNFromInt,
mint: PublicKeyFromString,
});
export const TokenBalanceListResult = pick({
tokenBalances: array(TokenBalanceResult),
cursor: nullable(string()),
});
export const TokenBalanceListResultV2 = pick({
items: array(TokenBalanceResult),
cursor: nullable(string()),
});
export const CompressedMintTokenHoldersResult = pick({
cursor: nullable(string()),
items: array(
pick({
balance: BNFromInt,
owner: PublicKeyFromString,
}),
),
});
export const AccountProofResult = pick({
hash: array(number()),
root: array(number()),
proof: array(array(number())),
});
export const toUnixTimestamp = (blockTime: string): number => {
return new Date(blockTime).getTime();
};
export const SignatureListResult = pick({
items: array(
pick({
blockTime: number(),
signature: string(),
slot: number(),
}),
),
});
export const SignatureListWithCursorResult = pick({
items: array(
pick({
blockTime: number(),
signature: string(),
slot: number(),
}),
),
cursor: nullable(string()),
});
export const CompressedTransactionResult = pick({
compressionInfo: pick({
closedAccounts: array(
pick({
account: CompressedAccountResult,
optionalTokenData: nullable(TokenDataResult),
}),
),
openedAccounts: array(
pick({
account: CompressedAccountResult,
optionalTokenData: nullable(TokenDataResult),
}),
),
}),
/// TODO: add transaction struct
/// https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/transaction-status/src/lib.rs#L1061
transaction: any(),
});
export interface CompressionApiInterface {
getCompressedAccount(
address?: BN254,
hash?: BN254,
): Promise<CompressedAccountWithMerkleContext | null>;
getCompressedBalance(address?: BN254, hash?: BN254): Promise<BN | null>;
getCompressedBalanceByOwner(owner: PublicKey): Promise<BN>;
getCompressedAccountProof(
hash: BN254,
): Promise<MerkleContextWithMerkleProof>;
getMultipleCompressedAccounts(
hashes: BN254[],
): Promise<CompressedAccountWithMerkleContext[]>;
getMultipleCompressedAccountProofs(
hashes: BN254[],
): Promise<MerkleContextWithMerkleProof[]>;
getValidityProof(
hashes: BN254[],
newAddresses: BN254[],
): Promise<CompressedProofWithContext>;
getValidityProofV0(
hashes: HashWithTree[],
newAddresses: AddressWithTree[],
): Promise<CompressedProofWithContext>;
getValidityProofAndRpcContext(
hashes: HashWithTree[],
newAddresses: AddressWithTree[],
): Promise<WithContext<CompressedProofWithContext>>;
getCompressedAccountsByOwner(
owner: PublicKey,
config?: GetCompressedAccountsByOwnerConfig,
): Promise<WithCursor<CompressedAccountWithMerkleContext[]>>;
getCompressedMintTokenHolders(
mint: PublicKey,
options?: PaginatedOptions,
): Promise<WithContext<WithCursor<CompressedMintTokenHolders[]>>>;
getCompressedTokenAccountsByOwner(
publicKey: PublicKey,
options: GetCompressedTokenAccountsByOwnerOrDelegateOptions,
): Promise<WithCursor<ParsedTokenAccount[]>>;
getCompressedTokenAccountsByDelegate(
delegate: PublicKey,
options: GetCompressedTokenAccountsByOwnerOrDelegateOptions,
): Promise<WithCursor<ParsedTokenAccount[]>>;
getCompressedTokenAccountBalance(hash: BN254): Promise<{ amount: BN }>;
getCompressedTokenBalancesByOwner(
publicKey: PublicKey,
options: GetCompressedTokenAccountsByOwnerOrDelegateOptions,
): Promise<WithCursor<TokenBalance[]>>;
getCompressedTokenBalancesByOwnerV2(
publicKey: PublicKey,
options: GetCompressedTokenAccountsByOwnerOrDelegateOptions,
): Promise<WithContext<WithCursor<TokenBalance[]>>>;
getTransactionWithCompressionInfo(
signature: string,
): Promise<CompressedTransaction | null>;
getCompressionSignaturesForAccount(
hash: BN254,
): Promise<SignatureWithMetadata[]>;
getCompressionSignaturesForAddress(
address: PublicKey,
options?: PaginatedOptions,
): Promise<WithCursor<SignatureWithMetadata[]>>;
getCompressionSignaturesForOwner(
owner: PublicKey,
options?: PaginatedOptions,
): Promise<WithCursor<SignatureWithMetadata[]>>;
getCompressionSignaturesForTokenOwner(
owner: PublicKey,
options?: PaginatedOptions,
): Promise<WithCursor<SignatureWithMetadata[]>>;
getLatestNonVotingSignatures(
limit?: number,
cursor?: string,
): Promise<LatestNonVotingSignatures>;
getLatestCompressionSignatures(
cursor?: string,
limit?: number,
): Promise<LatestNonVotingSignaturesPaginated>;
getIndexerHealth(): Promise<string>;
getIndexerSlot(): Promise<number>;
}
// Public types for consumers
export type RpcResultSuccess<T> = {
jsonrpc: '2.0';
id: string;
result: T;
};
export type RpcResultError = {
jsonrpc: '2.0';
id: string;
error: {
code: unknown;
message: string;
data?: any;
};
};
export type RpcResult<T> = RpcResultSuccess<T> | RpcResultError;
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/utils/validation.ts
|
import { BN } from '@coral-xyz/anchor';
import {
CompressedAccount,
CompressedAccountWithMerkleContext,
bn,
} from '../state';
export const validateSufficientBalance = (balance: BN) => {
if (balance.lt(bn(0))) {
throw new Error('Not enough balance for transfer');
}
};
export const validateSameOwner = (
compressedAccounts:
| CompressedAccount[]
| CompressedAccountWithMerkleContext[],
) => {
if (compressedAccounts.length === 0) {
throw new Error('No accounts provided for validation');
}
const zerothOwner = compressedAccounts[0].owner;
if (
!compressedAccounts.every(account => account.owner.equals(zerothOwner))
) {
throw new Error('All input accounts must have the same owner');
}
};
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/utils/pipe.ts
|
/** pipe function */
export function pipe<T, R>(
initialFunction: (arg: T) => R,
...functions: ((arg: R) => R)[]
): (initialValue: T) => R {
return (initialValue: T): R =>
functions.reduce(
(currentValue, currentFunction) => currentFunction(currentValue),
initialFunction(initialValue),
);
}
//@ts-ignore
if (import.meta.vitest) {
//@ts-ignore
const { it, expect, describe } = import.meta.vitest;
describe('pipe', () => {
it('should return the result of applying all fns to the initial value', () => {
const addOne = (x: number) => x + 1;
const multiplyByTwo = (x: number) => x * 2;
const subtractThree = (x: number) => x - 3;
const addOneMultiplyByTwoSubtractThree = pipe(
addOne,
multiplyByTwo,
subtractThree,
);
expect(addOneMultiplyByTwoSubtractThree(5)).toBe(9);
});
});
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/utils/airdrop.ts
|
import {
Commitment,
Connection,
PublicKey,
TransactionConfirmationStrategy,
} from '@solana/web3.js';
export async function airdropSol({
connection,
lamports,
recipientPublicKey,
}: {
connection: Connection;
lamports: number;
recipientPublicKey: PublicKey;
}) {
const txHash = await connection.requestAirdrop(
recipientPublicKey,
lamports,
);
await confirmTransaction(connection, txHash);
return txHash;
}
export async function confirmTransaction(
connection: Connection,
signature: string,
confirmation: Commitment = 'confirmed',
) {
const latestBlockHash = await connection.getLatestBlockhash(confirmation);
const strategy: TransactionConfirmationStrategy = {
signature: signature.toString(),
lastValidBlockHeight: latestBlockHash.lastValidBlockHeight,
blockhash: latestBlockHash.blockhash,
};
return await connection.confirmTransaction(strategy, confirmation);
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/utils/sleep.ts
|
// zzz
export function sleep(ms: number): Promise<void> {
return new Promise(resolve => setTimeout(resolve, ms));
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/utils/calculate-compute-unit-price.ts
|
/**
* @param targetLamports - Target priority fee in lamports
* @param computeUnits - Expected compute units used by the transaction
* @returns microLamports per compute unit (use in
* `ComputeBudgetProgram.setComputeUnitPrice`)
*/
export function calculateComputeUnitPrice(
targetLamports: number,
computeUnits: number,
): number {
return Math.ceil((targetLamports * 1_000_000) / computeUnits);
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/utils/parse-validity-proof.ts
|
import { BN } from '@coral-xyz/anchor';
import { FIELD_SIZE } from '../constants';
import { CompressedProof } from '../state';
interface GnarkProofJson {
ar: string[];
bs: string[][];
krs: string[];
}
type ProofABC = {
a: Uint8Array;
b: Uint8Array;
c: Uint8Array;
};
export const placeholderValidityProof = () => ({
a: Array.from({ length: 32 }, (_, i) => i + 1),
b: Array.from({ length: 64 }, (_, i) => i + 1),
c: Array.from({ length: 32 }, (_, i) => i + 1),
});
export const checkValidityProofShape = (proof: CompressedProof) => {
if (
proof.a.length !== 32 ||
proof.b.length !== 64 ||
proof.c.length !== 32
) {
throw new Error('ValidityProof has invalid shape');
}
};
export function proofFromJsonStruct(json: GnarkProofJson): ProofABC {
const proofAX = deserializeHexStringToBeBytes(json.ar[0]);
const proofAY = deserializeHexStringToBeBytes(json.ar[1]);
const proofA: Uint8Array = new Uint8Array([...proofAX, ...proofAY]);
const proofBX0 = deserializeHexStringToBeBytes(json.bs[0][0]);
const proofBX1 = deserializeHexStringToBeBytes(json.bs[0][1]);
const proofBY0 = deserializeHexStringToBeBytes(json.bs[1][0]);
const proofBY1 = deserializeHexStringToBeBytes(json.bs[1][1]);
const proofB: Uint8Array = new Uint8Array([
...proofBX0,
...proofBX1,
...proofBY0,
...proofBY1,
]);
const proofCX = deserializeHexStringToBeBytes(json.krs[0]);
const proofCY = deserializeHexStringToBeBytes(json.krs[1]);
const proofC: Uint8Array = new Uint8Array([...proofCX, ...proofCY]);
const proofABC: ProofABC = { a: proofA, b: proofB, c: proofC };
return proofABC;
}
// TODO: add unit test for negation
// TODO: test if LE BE issue. unit test
export function negateAndCompressProof(proof: ProofABC): CompressedProof {
const proofA = proof.a;
const proofB = proof.b;
const proofC = proof.c;
const aXElement = proofA.slice(0, 32);
const aYElement = new BN(proofA.slice(32, 64), 32, 'be');
/// Negate
const proofAIsPositive = yElementIsPositiveG1(aYElement) ? false : true;
/// First byte of proofA is the bitmask
aXElement[0] = addBitmaskToByte(aXElement[0], proofAIsPositive);
const bXElement = proofB.slice(0, 64);
const bYElement = proofB.slice(64, 128);
const proofBIsPositive = yElementIsPositiveG2(
new BN(bYElement.slice(0, 32), 32, 'be'),
new BN(bYElement.slice(32, 64), 32, 'be'),
);
bXElement[0] = addBitmaskToByte(bXElement[0], proofBIsPositive);
const cXElement = proofC.slice(0, 32);
const cYElement = proofC.slice(32, 64);
const proofCIsPositive = yElementIsPositiveG1(new BN(cYElement, 32, 'be'));
cXElement[0] = addBitmaskToByte(cXElement[0], proofCIsPositive);
const compressedProof: CompressedProof = {
a: Array.from(aXElement),
b: Array.from(bXElement),
c: Array.from(cXElement),
};
return compressedProof;
}
function deserializeHexStringToBeBytes(hexStr: string): Uint8Array {
// Using BN for simpler conversion from hex string to byte array
const bn = new BN(
hexStr.startsWith('0x') ? hexStr.substring(2) : hexStr,
'hex',
);
return new Uint8Array(bn.toArray('be', 32));
}
function yElementIsPositiveG1(yElement: BN): boolean {
return yElement.lte(FIELD_SIZE.sub(yElement));
}
function yElementIsPositiveG2(yElement1: BN, yElement2: BN): boolean {
const fieldMidpoint = FIELD_SIZE.div(new BN(2));
// Compare the first component of the y coordinate
if (yElement1.lt(fieldMidpoint)) {
return true;
} else if (yElement1.gt(fieldMidpoint)) {
return false;
}
// If the first component is equal to the midpoint, compare the second component
return yElement2.lt(fieldMidpoint);
}
// bitmask compatible with solana altbn128 compression syscall and arkworks' implementation
// https://github.com/arkworks-rs/algebra/blob/master/ff/src/fields/models/fp/mod.rs#L580
// https://github.com/arkworks-rs/algebra/blob/master/serialize/src/flags.rs#L18
// fn u8_bitmask(value: u8, inf: bool, neg: bool) -> u8 {
// let mut mask = 0;
// match self {
// inf => mask |= 1 << 6,
// neg => mask |= 1 << 7,
// _ => (),
// }
// mask
// }
function addBitmaskToByte(byte: number, yIsPositive: boolean): number {
if (!yIsPositive) {
return (byte |= 1 << 7);
} else {
return byte;
}
}
//@ts-ignore
if (import.meta.vitest) {
//@ts-ignore
const { it, expect, describe } = import.meta.vitest;
// Unit test for addBitmaskToByte function
describe('addBitmaskToByte', () => {
it('should add a bitmask to the byte if yIsPositive is false', () => {
const byte = 0b00000000;
const yIsPositive = false;
const result = addBitmaskToByte(byte, yIsPositive);
expect(result).toBe(0b10000000); // 128 in binary, which is 1 << 7
});
it('should not modify the byte if yIsPositive is true', () => {
const byte = 0b00000000;
const yIsPositive = true;
const result = addBitmaskToByte(byte, yIsPositive);
expect(result).toBe(0b00000000);
});
});
describe('test prover server', () => {
const TEST_JSON = {
ar: [
'0x22bdaa3187d8fe294925a66fa0165a11bc9e07678fa2fc72402ebfd33d521c69',
'0x2d18ff780b69898b4cdd8d7b6ac72d077799399f0f45e52665426456f3903584',
],
bs: [
[
'0x138cc0962e49f76a701d2871d2799892c9782940095eb0429e979f336d2e162d',
'0x2fe1bfbb15cbfb83d7e00ace23e45f890604003783eaf34affa35e0d6f4822bc',
],
[
'0x1a89264f82cc6e8ef1c696bea0b5803c28c0ba6ab61366bcb71e73a4135cae8d',
'0xf778d857b3df01a4100265c9d014ce02d47425f0114685356165fa5ee3f3a26',
],
],
krs: [
'0x176b6ae9001f66832951e2d43a98a972667447bb1781f534b70cb010270dcdd3',
'0xb748d5fac1686db28d94c02250af7eb4f28dfdabc8983305c45bcbc6e163eeb',
],
};
const COMPRESSED_PROOF_A = [
34, 189, 170, 49, 135, 216, 254, 41, 73, 37, 166, 111, 160, 22, 90,
17, 188, 158, 7, 103, 143, 162, 252, 114, 64, 46, 191, 211, 61, 82,
28, 105,
];
const COMPRESSED_PROOF_B = [
147, 140, 192, 150, 46, 73, 247, 106, 112, 29, 40, 113, 210, 121,
152, 146, 201, 120, 41, 64, 9, 94, 176, 66, 158, 151, 159, 51, 109,
46, 22, 45, 47, 225, 191, 187, 21, 203, 251, 131, 215, 224, 10, 206,
35, 228, 95, 137, 6, 4, 0, 55, 131, 234, 243, 74, 255, 163, 94, 13,
111, 72, 34, 188,
];
const COMPRESSED_PROOF_C = [
23, 107, 106, 233, 0, 31, 102, 131, 41, 81, 226, 212, 58, 152, 169,
114, 102, 116, 71, 187, 23, 129, 245, 52, 183, 12, 176, 16, 39, 13,
205, 211,
];
it('should execute a compressed token mint', async () => {
const proof = proofFromJsonStruct(TEST_JSON);
const compressedProof = negateAndCompressProof(proof);
expect(compressedProof.a).toEqual(COMPRESSED_PROOF_A);
expect(compressedProof.b).toEqual(COMPRESSED_PROOF_B);
expect(compressedProof.c).toEqual(COMPRESSED_PROOF_C);
});
});
describe('Validity Proof Functions', () => {
describe('placeholderValidityProof', () => {
it('should create a validity proof with correct shape', () => {
const validityProof = placeholderValidityProof();
expect(validityProof.a.length).toBe(32);
expect(validityProof.b.length).toBe(64);
expect(validityProof.c.length).toBe(32);
});
});
describe('checkValidityProofShape', () => {
it('should not throw an error for valid proof shape', () => {
const validProof = {
a: Array.from(new Uint8Array(32)),
b: Array.from(new Uint8Array(64)),
c: Array.from(new Uint8Array(32)),
};
expect(() => checkValidityProofShape(validProof)).not.toThrow();
});
it('should throw an error for an invalid proof', () => {
const invalidProof = {
a: Array.from(new Uint8Array(31)), // incorrect length
b: Array.from(new Uint8Array(64)),
c: Array.from(new Uint8Array(32)),
};
expect(() => checkValidityProofShape(invalidProof)).toThrow(
'ValidityProof has invalid shape',
);
});
});
});
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/utils/test-utils.ts
|
import { Connection, Keypair, Signer } from '@solana/web3.js';
import { confirmTx } from '../utils/send-and-confirm';
import { Rpc } from '../rpc';
let c = 1;
export const ALICE = getTestKeypair(255);
export const BOB = getTestKeypair(254);
export const CHARLIE = getTestKeypair(253);
export const DAVE = getTestKeypair(252);
/**
* Create a new account and airdrop lamports to it
*
* @param rpc connection to use
* @param lamports amount of lamports to airdrop
* @param counter counter to use for generating the keypair.
* If undefined or >255, generates random keypair.
*/
export async function newAccountWithLamports(
rpc: Rpc,
lamports = 1000000000,
counter: number | undefined = undefined,
): Promise<Signer> {
/// get random keypair
if (counter === undefined || counter > 255) {
counter = 256;
}
const account = getTestKeypair(counter);
const sig = await rpc.requestAirdrop(account.publicKey, lamports);
await confirmTx(rpc, sig);
return account;
}
export function getConnection(): Connection {
const url = 'http://127.0.0.1:8899';
const connection = new Connection(url, 'confirmed');
return connection;
}
/**
* For use in tests.
* Generate a unique keypair by passing in a counter <255. If no counter
* is supplied, it uses and increments a global counter.
* if counter > 255, generates random keypair
*/
export function getTestKeypair(
counter: number | undefined = undefined,
): Keypair {
if (!counter) {
counter = c;
c++;
}
if (counter > 255) {
return Keypair.generate();
}
const seed = new Uint8Array(32);
seed[31] = counter; // le
return Keypair.fromSeed(seed);
}
//@ts-ignore
if (import.meta.vitest) {
//@ts-ignore
const { describe, it, expect } = import.meta.vitest;
describe('getTestKeypair', () => {
it('should generate a keypair with a specific counter', () => {
const keypair = getTestKeypair(10);
const keypair2 = getTestKeypair(10);
expect(keypair).toEqual(keypair2);
expect(keypair).toBeInstanceOf(Keypair);
expect(keypair.publicKey).toBeDefined();
expect(keypair.secretKey).toBeDefined();
});
it('should generate random keypair if counter is greater than 255', () => {
const testFn = () => getTestKeypair(256);
const kp1 = testFn();
const kp2 = testFn();
expect(kp1).not.toEqual(kp2);
});
it('should increment the global counter if no counter is provided', () => {
const initialKeypair = getTestKeypair();
const nextKeypair = getTestKeypair();
const nextNextKeypair = getTestKeypair();
const nextNextNextKeypair = getTestKeypair(3);
expect(initialKeypair).not.toEqual(nextKeypair);
expect(nextKeypair).not.toEqual(nextNextKeypair);
expect(nextNextKeypair).toEqual(nextNextNextKeypair);
});
});
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/utils/conversion.ts
|
import { Buffer } from 'buffer';
import { bn, createBN254 } from '../state/BN254';
import { FIELD_SIZE } from '../constants';
import { keccak_256 } from '@noble/hashes/sha3';
import { Keypair } from '@solana/web3.js';
import { BN } from '@coral-xyz/anchor';
export function byteArrayToKeypair(byteArray: number[]): Keypair {
return Keypair.fromSecretKey(Uint8Array.from(byteArray));
}
/**
* @internal
* convert BN to hex with '0x' prefix
*/
export function toHex(bn: BN): string {
return '0x' + bn.toString('hex');
}
export const toArray = <T>(value: T | T[]) =>
Array.isArray(value) ? value : [value];
export const bufToDecStr = (buf: Buffer): string => {
return createBN254(buf).toString();
};
function isSmallerThanBn254FieldSizeBe(bytes: Buffer): boolean {
const bigint = bn(bytes, undefined, 'be');
return bigint.lt(FIELD_SIZE);
}
/**
* Hash the provided `bytes` with Keccak256 and ensure the result fits in the
* BN254 prime field by repeatedly hashing the inputs with various "bump seeds"
* and truncating the resulting hash to 31 bytes.
*
* @deprecated Use `hashvToBn254FieldSizeBe` instead.
*/
export function hashToBn254FieldSizeBe(bytes: Buffer): [Buffer, number] | null {
// TODO(vadorovsky, affects-onchain): Get rid of the bump mechanism, it
// makes no sense. Doing the same as in the `hashvToBn254FieldSizeBe` below
// - overwriting the most significant byte with zero - is sufficient for
// truncation, it's also faster, doesn't force us to return `Option` and
// care about handling an error which is practically never returned.
//
// The reason we can't do it now is that it would affect on-chain programs.
// Once we can update programs, we can get rid of the seed bump (or even of
// this function all together in favor of the `hashv` variant).
let bumpSeed = 255;
while (bumpSeed >= 0) {
const inputWithBumpSeed = Buffer.concat([
bytes,
Buffer.from([bumpSeed]),
]);
const hash = keccak_256(inputWithBumpSeed);
if (hash.length !== 32) {
throw new Error('Invalid hash length');
}
hash[0] = 0;
if (isSmallerThanBn254FieldSizeBe(Buffer.from(hash))) {
return [Buffer.from(hash), bumpSeed];
}
bumpSeed -= 1;
}
return null;
}
/**
* Hash the provided `bytes` with Keccak256 and ensure that the result fits in
* the BN254 prime field by truncating the resulting hash to 31 bytes.
*
* @param bytes Input bytes
*
* @returns Hash digest
*/
export function hashvToBn254FieldSizeBe(bytes: Uint8Array[]): Uint8Array {
const hasher = keccak_256.create();
for (const input of bytes) {
hasher.update(input);
}
const hash = hasher.digest();
hash[0] = 0;
return hash;
}
/** Mutates array in place */
export function pushUniqueItems<T>(items: T[], map: T[]): void {
items.forEach(item => {
if (!map.includes(item)) {
map.push(item);
}
});
}
export function toCamelCase(
obj: Array<any> | unknown | any,
): Array<any> | unknown | any {
if (Array.isArray(obj)) {
return obj.map(v => toCamelCase(v));
} else if (obj !== null && obj.constructor === Object) {
return Object.keys(obj).reduce((result, key) => {
const camelCaseKey = key.replace(/([-_][a-z])/gi, $1 => {
return $1.toUpperCase().replace('-', '').replace('_', '');
});
result[camelCaseKey] = toCamelCase(obj[key]);
return result;
}, {} as any);
}
return obj;
}
// FIXME: check bundling and how to resolve the type error
//@ts-ignore
if (import.meta.vitest) {
//@ts-ignore
const { it, expect, describe } = import.meta.vitest;
describe('toArray function', () => {
it('should convert a single item to an array', () => {
expect(toArray(1)).toEqual([1]);
});
it('should leave an array unchanged', () => {
expect(toArray([1, 2, 3])).toEqual([1, 2, 3]);
});
});
describe('isSmallerThanBn254FieldSizeBe function', () => {
it('should return true for a small number', () => {
const buf = Buffer.from(
'0000000000000000000000000000000000000000000000000000000000000000',
'hex',
);
expect(isSmallerThanBn254FieldSizeBe(buf)).toBe(true);
});
it('should return false for a large number', () => {
const buf = Buffer.from(
'0000000000000000000000000000000000000000000000000000000000000065',
'hex',
).reverse();
expect(isSmallerThanBn254FieldSizeBe(buf)).toBe(false);
});
});
describe('hashToBn254FieldSizeBe function', () => {
const refBumpSeed = [252];
const bytes = [
131, 219, 249, 246, 221, 196, 33, 3, 114, 23, 121, 235, 18, 229, 71,
152, 39, 87, 169, 208, 143, 101, 43, 128, 245, 59, 22, 134, 182,
231, 116, 33,
];
const refResult = [
0, 146, 15, 187, 171, 163, 183, 93, 237, 121, 37, 231, 55, 162, 208,
188, 244, 77, 185, 157, 93, 9, 101, 193, 220, 247, 109, 94, 48, 212,
98, 149,
];
it('should return a valid value for initial buffer', async () => {
const result = await hashToBn254FieldSizeBe(Buffer.from(bytes));
expect(Array.from(result![0])).toEqual(refResult);
});
it('should return a valid value for initial buffer', async () => {
const buf = Buffer.from(
'0000000000000000000000000000000000000000000000000000000000000000',
'hex',
);
const result = await hashToBn254FieldSizeBe(buf);
expect(result).not.toBeNull();
if (result) {
expect(result[0]).toBeInstanceOf(Buffer);
expect(result[1]).toBe(255);
}
});
it('should return a valid value for a buffer that can be hashed to a smaller value', async () => {
const buf = Buffer.from(
'fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe',
'hex',
);
const result = await hashToBn254FieldSizeBe(buf);
expect(result).not.toBeNull();
if (result) {
expect(result[1]).toBeLessThanOrEqual(255);
expect(result[0]).toBeInstanceOf(Buffer);
// Check if the hashed value is indeed smaller than the bn254 field size
expect(isSmallerThanBn254FieldSizeBe(result[0])).toBe(true);
}
});
it('should correctly hash the input buffer', async () => {
const buf = Buffer.from('deadbeef', 'hex');
const result = await hashToBn254FieldSizeBe(buf);
expect(result).not.toBeNull();
if (result) {
// Since the actual hash value depends on the crypto implementation and input,
// we cannot predict the exact output. However, we can check if the output is valid.
expect(result[0].length).toBe(32); // SHA-256 hash length
expect(result[1]).toBeLessThanOrEqual(255);
expect(isSmallerThanBn254FieldSizeBe(result[0])).toBe(true);
}
});
});
describe('pushUniqueItems function', () => {
it('should add unique items', () => {
const map = [1, 2, 3];
const itemsToAdd = [3, 4, 5];
pushUniqueItems(itemsToAdd, map);
expect(map).toEqual([1, 2, 3, 4, 5]);
});
it('should ignore duplicates', () => {
const map = [1, 2, 3];
const itemsToAdd = [1, 2, 3];
pushUniqueItems(itemsToAdd, map);
expect(map).toEqual([1, 2, 3]);
});
it('should handle empty arrays', () => {
const map: number[] = [];
const itemsToAdd: number[] = [];
pushUniqueItems(itemsToAdd, map);
expect(map).toEqual([]);
});
});
describe('bufToDecStr', () => {
it("should convert buffer [0] to '0'", () => {
expect(bufToDecStr(Buffer.from([0]))).toEqual('0');
});
it("should convert buffer [1] to '1'", () => {
expect(bufToDecStr(Buffer.from([1]))).toEqual('1');
});
it("should convert buffer [1, 0] to '256'", () => {
expect(bufToDecStr(Buffer.from([1, 0]))).toEqual('256');
});
it("should convert buffer [1, 1] to '257'", () => {
expect(bufToDecStr(Buffer.from([1, 1]))).toEqual('257');
});
it("should convert buffer [7, 91, 205, 21] to '123456789'", () => {
expect(bufToDecStr(Buffer.from([7, 91, 205, 21]))).toEqual(
'123456789',
);
});
});
describe('toCamelCase', () => {
it('should convert object keys to camelCase', () => {
const input = { test_key: 1, 'another-testKey': 2 };
const expected = { testKey: 1, anotherTestKey: 2 };
expect(toCamelCase(input)).toEqual(expected);
});
it('should handle arrays of objects', () => {
const input = [{ array_key: 3 }, { 'another_array-key': 4 }];
const expected = [{ arrayKey: 3 }, { anotherArrayKey: 4 }];
expect(toCamelCase(input)).toEqual(expected);
});
it('should return the input if it is neither an object nor an array', () => {
const input = 'testString';
expect(toCamelCase(input)).toBe(input);
});
});
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/utils/send-and-confirm.ts
|
import {
VersionedTransaction,
TransactionConfirmationStrategy,
SignatureResult,
RpcResponseAndContext,
Signer,
TransactionInstruction,
TransactionMessage,
ConfirmOptions,
TransactionSignature,
PublicKey,
AddressLookupTableAccount,
} from '@solana/web3.js';
import { Rpc } from '../rpc';
/**
* Builds a versioned Transaction from instructions.
*
* @param instructions instructions to include
* @param payerPublicKey fee payer public key
* @param blockhash blockhash to use
* @param lookupTableAccounts lookup table accounts to include
*
* @return VersionedTransaction
*/
export function buildTx(
instructions: TransactionInstruction[],
payerPublicKey: PublicKey,
blockhash: string,
lookupTableAccounts?: AddressLookupTableAccount[],
): VersionedTransaction {
const messageV0 = new TransactionMessage({
payerKey: payerPublicKey,
recentBlockhash: blockhash,
instructions,
}).compileToV0Message(lookupTableAccounts);
return new VersionedTransaction(messageV0);
}
/**
* Sends a versioned transaction and confirms it.
*
* @param rpc connection to use
* @param tx versioned transaction to send
* @param confirmOptions confirmation options
* @param blockHashCtx blockhash context for confirmation
*
* @return TransactionSignature
*/
export async function sendAndConfirmTx(
rpc: Rpc,
tx: VersionedTransaction,
confirmOptions?: ConfirmOptions,
blockHashCtx?: { blockhash: string; lastValidBlockHeight: number },
): Promise<TransactionSignature> {
const txId = await rpc.sendTransaction(tx, confirmOptions);
if (!blockHashCtx) blockHashCtx = await rpc.getLatestBlockhash();
const transactionConfirmationStrategy0: TransactionConfirmationStrategy = {
signature: txId,
blockhash: blockHashCtx.blockhash,
lastValidBlockHeight: blockHashCtx.lastValidBlockHeight,
};
const ctxAndRes = await rpc.confirmTransaction(
transactionConfirmationStrategy0,
confirmOptions?.commitment || rpc.commitment || 'confirmed',
);
const slot = ctxAndRes.context.slot;
await rpc.confirmTransactionIndexed(slot);
return txId;
}
/**
* Confirms a transaction with a given txId.
*
* @param rpc connection to use
* @param txId transaction signature to confirm
* @param confirmOptions confirmation options
* @param blockHashCtx blockhash context for confirmation
* @return SignatureResult
*/
export async function confirmTx(
rpc: Rpc,
txId: string,
confirmOptions?: ConfirmOptions,
blockHashCtx?: { blockhash: string; lastValidBlockHeight: number },
): Promise<RpcResponseAndContext<SignatureResult>> {
if (!blockHashCtx) blockHashCtx = await rpc.getLatestBlockhash();
const transactionConfirmationStrategy: TransactionConfirmationStrategy = {
signature: txId,
blockhash: blockHashCtx.blockhash,
lastValidBlockHeight: blockHashCtx.lastValidBlockHeight,
};
const res = await rpc.confirmTransaction(
transactionConfirmationStrategy,
confirmOptions?.commitment || rpc.commitment || 'confirmed',
);
const slot = res.context.slot;
await rpc.confirmTransactionIndexed(slot);
return res;
}
/**
* Builds a versioned Transaction from instructions and signs it.
*
* @param instructions instructions to include in the transaction
* @param payer payer of the transaction
* @param blockhash recent blockhash to use in the transaction
* @param additionalSigners non-feepayer signers to include in the
* transaction
* @param lookupTableAccounts lookup table accounts to include in the
* transaction
*/
export function buildAndSignTx(
instructions: TransactionInstruction[],
payer: Signer,
blockhash: string,
additionalSigners: Signer[] = [],
lookupTableAccounts?: AddressLookupTableAccount[],
): VersionedTransaction {
if (additionalSigners.includes(payer))
throw new Error('payer must not be in additionalSigners');
const allSigners = [payer, ...additionalSigners];
const tx = buildTx(
instructions,
payer.publicKey,
blockhash,
lookupTableAccounts,
);
tx.sign(allSigners);
return tx;
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/utils/index.ts
|
export * from './address';
export * from './airdrop';
export * from './conversion';
export * from './parse-validity-proof';
export * from './pipe';
export * from './send-and-confirm';
export * from './sleep';
export * from './test-utils';
export * from './validation';
export * from './calculate-compute-unit-price';
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/utils/address.ts
|
import { AccountMeta, PublicKey } from '@solana/web3.js';
import { hashToBn254FieldSizeBe, hashvToBn254FieldSizeBe } from './conversion';
import { defaultTestStateTreeAccounts } from '../constants';
import { getIndexOrAdd } from '../instruction';
export function deriveAddressSeed(
seeds: Uint8Array[],
programId: PublicKey,
): Uint8Array {
const combinedSeeds: Uint8Array[] = [programId.toBytes(), ...seeds];
const hash = hashvToBn254FieldSizeBe(combinedSeeds);
return hash;
}
/**
* Derive an address for a compressed account from a seed and an address Merkle
* tree public key.
*
* @param seed Seed to derive the address from
* @param addressMerkleTreePubkey Merkle tree public key. Defaults to
* defaultTestStateTreeAccounts().addressTree
* @returns Derived address
*/
export function deriveAddress(
seed: Uint8Array,
addressMerkleTreePubkey: PublicKey = defaultTestStateTreeAccounts()
.addressTree,
): PublicKey {
if (seed.length != 32) {
throw new Error('Seed length is not 32 bytes.');
}
const bytes = addressMerkleTreePubkey.toBytes();
const combined = Buffer.from([...bytes, ...seed]);
const hash = hashToBn254FieldSizeBe(combined);
if (hash === null) {
throw new Error('DeriveAddressError');
}
const buf = hash[0];
return new PublicKey(buf);
}
export interface NewAddressParams {
/**
* Seed for the compressed account. Must be seed used to derive
* newAccountAddress
*/
seed: Uint8Array;
/**
* Recent state root index of the address tree. The expiry is tied to the
* validity proof.
*/
addressMerkleTreeRootIndex: number;
/**
* Address tree pubkey. Must be base pubkey used to derive new address
*/
addressMerkleTreePubkey: PublicKey;
/**
* Address space queue pubkey. Associated with the state tree.
*/
addressQueuePubkey: PublicKey;
}
export interface NewAddressParamsPacked {
/**
* Seed for the compressed account. Must be seed used to derive
* newAccountAddress
*/
seed: number[];
/**
* Recent state root index of the address tree. The expiry is tied to the
* validity proof.
*/
addressMerkleTreeRootIndex: number;
/**
* Index of the address merkle tree account in the remaining accounts array
*/
addressMerkleTreeAccountIndex: number;
/**
* Index of the address queue account in the remaining accounts array
*/
addressQueueAccountIndex: number;
}
/**
* Packs new address params for instruction data in TypeScript clients
*
* @param newAddressParams New address params
* @param remainingAccounts Remaining accounts
* @returns Packed new address params
*/
export function packNewAddressParams(
newAddressParams: NewAddressParams[],
remainingAccounts: PublicKey[],
): {
newAddressParamsPacked: NewAddressParamsPacked[];
remainingAccounts: PublicKey[];
} {
const _remainingAccounts = remainingAccounts.slice();
const newAddressParamsPacked: NewAddressParamsPacked[] =
newAddressParams.map(x => ({
seed: Array.from(x.seed),
addressMerkleTreeRootIndex: x.addressMerkleTreeRootIndex,
addressMerkleTreeAccountIndex: 0, // will be assigned later
addressQueueAccountIndex: 0, // will be assigned later
}));
newAddressParams.forEach((params, i) => {
newAddressParamsPacked[i].addressMerkleTreeAccountIndex = getIndexOrAdd(
_remainingAccounts,
params.addressMerkleTreePubkey,
);
});
newAddressParams.forEach((params, i) => {
newAddressParamsPacked[i].addressQueueAccountIndex = getIndexOrAdd(
_remainingAccounts,
params.addressQueuePubkey,
);
});
return { newAddressParamsPacked, remainingAccounts: _remainingAccounts };
}
//@ts-ignore
if (import.meta.vitest) {
//@ts-ignore
const { it, expect, describe } = import.meta.vitest;
const programId = new PublicKey(
'7yucc7fL3JGbyMwg4neUaenNSdySS39hbAk89Ao3t1Hz',
);
describe('derive address seed', () => {
it('should derive a valid address seed', () => {
const seeds: Uint8Array[] = [
new TextEncoder().encode('foo'),
new TextEncoder().encode('bar'),
];
expect(deriveAddressSeed(seeds, programId)).toStrictEqual(
new Uint8Array([
0, 246, 150, 3, 192, 95, 53, 123, 56, 139, 206, 179, 253,
133, 115, 103, 120, 155, 251, 72, 250, 47, 117, 217, 118,
59, 174, 207, 49, 101, 201, 110,
]),
);
});
it('should derive a valid address seed', () => {
const seeds: Uint8Array[] = [
new TextEncoder().encode('ayy'),
new TextEncoder().encode('lmao'),
];
expect(deriveAddressSeed(seeds, programId)).toStrictEqual(
new Uint8Array([
0, 202, 44, 25, 221, 74, 144, 92, 69, 168, 38, 19, 206, 208,
29, 162, 53, 27, 120, 214, 152, 116, 15, 107, 212, 168, 33,
121, 187, 10, 76, 233,
]),
);
});
});
describe('deriveAddress function', () => {
it('should derive a valid address from a seed and a merkle tree public key', async () => {
const seeds: Uint8Array[] = [
new TextEncoder().encode('foo'),
new TextEncoder().encode('bar'),
];
const seed = deriveAddressSeed(seeds, programId);
const merkleTreePubkey = new PublicKey(
'11111111111111111111111111111111',
);
const derivedAddress = deriveAddress(seed, merkleTreePubkey);
expect(derivedAddress).toBeInstanceOf(PublicKey);
expect(derivedAddress).toStrictEqual(
new PublicKey('139uhyyBtEh4e1CBDJ68ooK5nCeWoncZf9HPyAfRrukA'),
);
});
it('should derive a valid address from a seed and a merkle tree public key', async () => {
const seeds: Uint8Array[] = [
new TextEncoder().encode('ayy'),
new TextEncoder().encode('lmao'),
];
const seed = deriveAddressSeed(seeds, programId);
const merkleTreePubkey = new PublicKey(
'11111111111111111111111111111111',
);
const derivedAddress = deriveAddress(seed, merkleTreePubkey);
expect(derivedAddress).toBeInstanceOf(PublicKey);
expect(derivedAddress).toStrictEqual(
new PublicKey('12bhHm6PQjbNmEn3Yu1Gq9k7XwVn2rZpzYokmLwbFazN'),
);
});
});
describe('packNewAddressParams function', () => {
it('should pack new address params correctly', () => {
const newAddressParams = [
{
seed: new Uint8Array([1, 2, 3, 4]),
addressMerkleTreeRootIndex: 0,
addressMerkleTreePubkey: new PublicKey(
'11111111111111111111111111111111',
),
addressQueuePubkey: new PublicKey(
'11111111111111111111111111111112',
),
},
];
const remainingAccounts = [
new PublicKey('11111111111111111111111111111112'),
new PublicKey('11111111111111111111111111111111'),
];
const packedParams = packNewAddressParams(
newAddressParams,
remainingAccounts,
);
expect(
packedParams.newAddressParamsPacked[0]
.addressMerkleTreeAccountIndex,
).toBe(1);
expect(
packedParams.newAddressParamsPacked[0].addressQueueAccountIndex,
).toBe(0);
});
});
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/state/compressed-account.ts
|
import { BN } from '@coral-xyz/anchor';
import { PublicKey } from '@solana/web3.js';
import { CompressedAccount, CompressedAccountData } from './types';
import { BN254, bn } from './BN254';
export type CompressedAccountWithMerkleContext = CompressedAccount &
MerkleContext & {
readOnly: boolean;
};
/**
* Context for compressed account inserted into a state Merkle tree
* */
export type MerkleContext = {
/** State Merkle tree */
merkleTree: PublicKey;
/** The state nullfier queue belonging to merkleTree */
nullifierQueue: PublicKey;
/** Poseidon hash of the utxo preimage. Is a leaf in state merkle tree */
hash: number[]; // TODO: BN254;
/** 'hash' position within the Merkle tree */
leafIndex: number;
};
export type MerkleContextWithMerkleProof = MerkleContext & {
/** Recent valid 'hash' proof path, expires after n slots */
merkleProof: BN254[];
/** Index of state root the merkleproof is valid for, expires after n slots */
rootIndex: number;
/** Current root */
root: BN254;
};
export const createCompressedAccount = (
owner: PublicKey,
lamports?: BN,
data?: CompressedAccountData,
address?: number[],
): CompressedAccount => ({
owner,
lamports: lamports ?? bn(0),
address: address ?? null,
data: data ?? null,
});
export const createCompressedAccountWithMerkleContext = (
merkleContext: MerkleContext,
owner: PublicKey,
lamports?: BN,
data?: CompressedAccountData,
address?: number[],
): CompressedAccountWithMerkleContext => ({
...createCompressedAccount(owner, lamports, data, address),
...merkleContext,
readOnly: false,
});
export const createMerkleContext = (
merkleTree: PublicKey,
nullifierQueue: PublicKey,
hash: number[], // TODO: BN254,
leafIndex: number,
): MerkleContext => ({
merkleTree,
nullifierQueue,
hash,
leafIndex,
});
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/state/types.ts
|
import { BN } from '@coral-xyz/anchor';
import { PublicKey } from '@solana/web3.js';
import { Buffer } from 'buffer';
import { NewAddressParamsPacked } from '../utils';
export interface PackedCompressedAccountWithMerkleContext {
compressedAccount: CompressedAccount;
merkleContext: PackedMerkleContext;
rootIndex: number; // u16
readOnly: boolean;
}
export interface PackedMerkleContext {
merkleTreePubkeyIndex: number; // u8
nullifierQueuePubkeyIndex: number; // u8
leafIndex: number; // u32
queueIndex: null | QueueIndex; // Option<QueueIndex>
}
export interface QueueIndex {
queueId: number; // u8
index: number; // u16
}
/**
* Describe the generic compressed account details applicable to every
* compressed account.
* */
export interface CompressedAccount {
/** Public key of program or user that owns the account */
owner: PublicKey;
/** Lamports attached to the account */
lamports: BN; // u64 // FIXME: optional
/**
* TODO: use PublicKey. Optional unique account ID that is persistent across
* transactions.
*/
address: number[] | null; // Option<PublicKey>
/** Optional data attached to the account */
data: CompressedAccountData | null; // Option<CompressedAccountData>
}
/**
* Describe the generic compressed account details applicable to every
* compressed account.
* */
export interface OutputCompressedAccountWithPackedContext {
compressedAccount: CompressedAccount;
merkleTreeIndex: number;
}
export interface CompressedAccountData {
discriminator: number[]; // [u8; 8] // TODO: test with uint8Array instead
data: Buffer; // bytes
dataHash: number[]; // [u8; 32]
}
export interface PublicTransactionEvent {
inputCompressedAccountHashes: number[][]; // Vec<[u8; 32]>
outputCompressedAccountHashes: number[][]; // Vec<[u8; 32]>
outputCompressedAccounts: OutputCompressedAccountWithPackedContext[];
outputLeafIndices: number[]; // Vec<u32>
relayFee: BN | null; // Option<u64>
isCompress: boolean; // bool
compressOrDecompressLamports: BN | null; // Option<u64>
pubkeyArray: PublicKey[]; // Vec<PublicKey>
message: Uint8Array | null; // Option<bytes>
}
export interface InstructionDataInvoke {
proof: CompressedProof | null; // Option<CompressedProof>
inputCompressedAccountsWithMerkleContext: PackedCompressedAccountWithMerkleContext[];
outputCompressedAccounts: OutputCompressedAccountWithPackedContext[];
relayFee: BN | null; // Option<u64>
newAddressParams: NewAddressParamsPacked[]; // Vec<NewAddressParamsPacked>
compressOrDecompressLamports: BN | null; // Option<u64>
isCompress: boolean; // bool
}
export interface CompressedProof {
a: number[]; // [u8; 32]
b: number[]; // [u8; 64]
c: number[]; // [u8; 32]
}
/**
* Compressed-token types
*
* TODO: Token-related code should ideally not have to go into stateless.js.
* Find a better altnerative way to extend the RPC.
*
*/
export type TokenTransferOutputData = {
owner: PublicKey;
amount: BN;
lamports: BN | null;
tlv: Buffer | null;
};
export type CompressedTokenInstructionDataTransfer = {
proof: CompressedProof | null;
mint: PublicKey;
delegatedTransfer: null;
inputTokenDataWithContext: InputTokenDataWithContext[];
outputCompressedAccounts: TokenTransferOutputData[];
isCompress: boolean;
compressOrDecompressAmount: BN | null;
cpiContext: null;
lamportsChangeAccountMerkleTreeIndex: number | null;
};
export interface InputTokenDataWithContext {
amount: BN;
delegateIndex: number | null; // Option<u8>
merkleContext: PackedMerkleContext;
rootIndex: number; // u16
lamports: BN | null;
tlv: Buffer | null;
}
export type TokenData = {
/// The mint associated with this account
mint: PublicKey;
/// The owner of this account.
owner: PublicKey;
/// The amount of tokens this account holds.
amount: BN;
/// If `delegate` is `Some` then `delegated_amount` represents
/// the amount authorized by the delegate
delegate: PublicKey | null;
/// The account's state
state: number; // AccountState_IdlType;
/// TokenExtension tlv
tlv: Buffer | null;
};
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/state/index.ts
|
export * from './BN254';
export * from './compressed-account';
export * from './types';
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/state/BN254.ts
|
// TODO: consider implementing BN254 as wrapper class around _BN mirroring
// PublicKey this would encapsulate our runtime checks and also enforce
// typesafety at compile time
import { FIELD_SIZE } from '../constants';
import { PublicKey } from '@solana/web3.js';
import { BN } from '@coral-xyz/anchor';
import { bs58 } from '@coral-xyz/anchor/dist/esm/utils/bytes';
import { Buffer } from 'buffer';
/**
* bignumber with <254-bit max size. Anchor serialization doesn't support native
* bigint yet, so we wrap BN. This wrapper has simple base10 encoding which is
* needed for zk circuit compat, in addition to the base58 encoding that users
* are used to from working with the web3.js PublicKey type.
*/
export type BN254 = BN;
export const bn = (
number: string | number | BN | Buffer | Uint8Array | number[],
base?: number | 'hex' | undefined,
endian?: BN.Endianness | undefined,
): BN => new BN(number, base, endian);
/** Create a bigint instance with <254-bit max size and base58 capabilities */
export const createBN254 = (
number: string | number | BN | Buffer | Uint8Array | number[],
base?: number | 'hex' | 'base58' | undefined,
): BN254 => {
if (base === 'base58') {
if (typeof number !== 'string')
throw new Error('Must be a base58 string');
return createBN254(bs58.decode(number));
}
const bigintNumber = new BN(number, base);
return enforceSize(bigintNumber);
};
/**
* Enforces a maximum size of <254 bits for bigint instances. This is necessary
* for compatibility with zk-SNARKs, where hashes must be less than the field
* modulus (~2^254).
*/
function enforceSize(bigintNumber: BN254): BN254 {
if (bigintNumber.gte(FIELD_SIZE)) {
throw new Error('Value is too large. Max <254 bits');
}
return bigintNumber;
}
/** Convert <254-bit bigint to Base58 string. */
export function encodeBN254toBase58(bigintNumber: BN): string {
/// enforce size
const bn254 = createBN254(bigintNumber);
const bn254Buffer = bn254.toArrayLike(Buffer, undefined, 32);
return bs58.encode(bn254Buffer);
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/idls/account_compression.ts
|
export type AccountCompression = {
version: '1.2.0';
name: 'account_compression';
constants: [
{
name: 'CPI_AUTHORITY_PDA_SEED';
type: 'bytes';
value: '[99, 112, 105, 95, 97, 117, 116, 104, 111, 114, 105, 116, 121]';
},
{
name: 'GROUP_AUTHORITY_SEED';
type: 'bytes';
value: '[103, 114, 111, 117, 112, 95, 97, 117, 116, 104, 111, 114, 105, 116, 121]';
},
{
name: 'STATE_MERKLE_TREE_HEIGHT';
type: 'u64';
value: '26';
},
{
name: 'STATE_MERKLE_TREE_CHANGELOG';
type: 'u64';
value: '1400';
},
{
name: 'STATE_MERKLE_TREE_ROOTS';
type: 'u64';
value: '2400';
},
{
name: 'STATE_MERKLE_TREE_CANOPY_DEPTH';
type: 'u64';
value: '10';
},
{
name: 'STATE_NULLIFIER_QUEUE_VALUES';
type: 'u16';
value: '28_807';
},
{
name: 'STATE_NULLIFIER_QUEUE_SEQUENCE_THRESHOLD';
type: 'u64';
value: '2400';
},
{
name: 'ADDRESS_MERKLE_TREE_HEIGHT';
type: 'u64';
value: '26';
},
{
name: 'ADDRESS_MERKLE_TREE_CHANGELOG';
type: 'u64';
value: '1400';
},
{
name: 'ADDRESS_MERKLE_TREE_ROOTS';
type: 'u64';
value: '2400';
},
{
name: 'ADDRESS_MERKLE_TREE_CANOPY_DEPTH';
type: 'u64';
value: '10';
},
{
name: 'ADDRESS_MERKLE_TREE_INDEXED_CHANGELOG';
type: 'u64';
value: '1400';
},
{
name: 'ADDRESS_QUEUE_VALUES';
type: 'u16';
value: '28_807';
},
{
name: 'ADDRESS_QUEUE_SEQUENCE_THRESHOLD';
type: 'u64';
value: '2400';
},
{
name: 'NOOP_PUBKEY';
type: {
array: ['u8', 32];
};
value: '[11 , 188 , 15 , 192 , 187 , 71 , 202 , 47 , 116 , 196 , 17 , 46 , 148 , 171 , 19 , 207 , 163 , 198 , 52 , 229 , 220 , 23 , 234 , 203 , 3 , 205 , 26 , 35 , 205 , 126 , 120 , 124 ,]';
},
];
instructions: [
{
name: 'initializeAddressMerkleTreeAndQueue';
accounts: [
{
name: 'authority';
isMut: true;
isSigner: true;
},
{
name: 'merkleTree';
isMut: true;
isSigner: false;
},
{
name: 'queue';
isMut: true;
isSigner: false;
},
{
name: 'registeredProgramPda';
isMut: false;
isSigner: false;
isOptional: true;
},
];
args: [
{
name: 'index';
type: 'u64';
},
{
name: 'programOwner';
type: {
option: 'publicKey';
};
},
{
name: 'forester';
type: {
option: 'publicKey';
};
},
{
name: 'addressMerkleTreeConfig';
type: {
defined: 'AddressMerkleTreeConfig';
};
},
{
name: 'addressQueueConfig';
type: {
defined: 'AddressQueueConfig';
};
},
];
},
{
name: 'insertAddresses';
accounts: [
{
name: 'feePayer';
isMut: true;
isSigner: true;
docs: ['Fee payer pays rollover fee.'];
},
{
name: 'authority';
isMut: false;
isSigner: true;
},
{
name: 'registeredProgramPda';
isMut: false;
isSigner: false;
isOptional: true;
},
{
name: 'systemProgram';
isMut: false;
isSigner: false;
},
];
args: [
{
name: 'addresses';
type: {
vec: {
array: ['u8', 32];
};
};
},
];
},
{
name: 'updateAddressMerkleTree';
docs: ['Updates the address Merkle tree with a new address.'];
accounts: [
{
name: 'authority';
isMut: false;
isSigner: true;
},
{
name: 'registeredProgramPda';
isMut: false;
isSigner: false;
isOptional: true;
},
{
name: 'queue';
isMut: true;
isSigner: false;
},
{
name: 'merkleTree';
isMut: true;
isSigner: false;
},
{
name: 'logWrapper';
isMut: false;
isSigner: false;
},
];
args: [
{
name: 'changelogIndex';
type: 'u16';
},
{
name: 'indexedChangelogIndex';
type: 'u16';
},
{
name: 'value';
type: 'u16';
},
{
name: 'lowAddressIndex';
type: 'u64';
},
{
name: 'lowAddressValue';
type: {
array: ['u8', 32];
};
},
{
name: 'lowAddressNextIndex';
type: 'u64';
},
{
name: 'lowAddressNextValue';
type: {
array: ['u8', 32];
};
},
{
name: 'lowAddressProof';
type: {
array: [
{
array: ['u8', 32];
},
16,
];
};
},
];
},
{
name: 'rolloverAddressMerkleTreeAndQueue';
accounts: [
{
name: 'feePayer';
isMut: true;
isSigner: true;
docs: [
'Signer used to receive rollover accounts rentexemption reimbursement.',
];
},
{
name: 'authority';
isMut: false;
isSigner: true;
},
{
name: 'registeredProgramPda';
isMut: false;
isSigner: false;
isOptional: true;
},
{
name: 'newAddressMerkleTree';
isMut: true;
isSigner: false;
},
{
name: 'newQueue';
isMut: true;
isSigner: false;
},
{
name: 'oldAddressMerkleTree';
isMut: true;
isSigner: false;
},
{
name: 'oldQueue';
isMut: true;
isSigner: false;
},
];
args: [];
},
{
name: 'initializeGroupAuthority';
docs: [
'initialize group (a group can be used to give multiple programs access',
'to the same Merkle trees by registering the programs to the group)',
];
accounts: [
{
name: 'authority';
isMut: true;
isSigner: true;
},
{
name: 'seed';
isMut: false;
isSigner: true;
docs: [
'Seed public key used to derive the group authority.',
];
},
{
name: 'groupAuthority';
isMut: true;
isSigner: false;
},
{
name: 'systemProgram';
isMut: false;
isSigner: false;
},
];
args: [
{
name: 'authority';
type: 'publicKey';
},
];
},
{
name: 'updateGroupAuthority';
accounts: [
{
name: 'authority';
isMut: false;
isSigner: true;
},
{
name: 'groupAuthority';
isMut: true;
isSigner: false;
},
];
args: [
{
name: 'authority';
type: 'publicKey';
},
];
},
{
name: 'registerProgramToGroup';
accounts: [
{
name: 'authority';
isMut: true;
isSigner: true;
},
{
name: 'programToBeRegistered';
isMut: false;
isSigner: true;
},
{
name: 'registeredProgramPda';
isMut: true;
isSigner: false;
},
{
name: 'groupAuthorityPda';
isMut: false;
isSigner: false;
},
{
name: 'systemProgram';
isMut: false;
isSigner: false;
},
];
args: [];
},
{
name: 'deregisterProgram';
accounts: [
{
name: 'authority';
isMut: true;
isSigner: true;
},
{
name: 'registeredProgramPda';
isMut: true;
isSigner: false;
},
{
name: 'groupAuthorityPda';
isMut: false;
isSigner: false;
},
{
name: 'closeRecipient';
isMut: true;
isSigner: false;
},
];
args: [];
},
{
name: 'initializeStateMerkleTreeAndNullifierQueue';
docs: [
'Initializes a new Merkle tree from config bytes.',
'Index is an optional identifier and not checked by the program.',
];
accounts: [
{
name: 'authority';
isMut: true;
isSigner: true;
},
{
name: 'merkleTree';
isMut: true;
isSigner: false;
},
{
name: 'nullifierQueue';
isMut: true;
isSigner: false;
},
{
name: 'registeredProgramPda';
isMut: false;
isSigner: false;
isOptional: true;
},
];
args: [
{
name: 'index';
type: 'u64';
},
{
name: 'programOwner';
type: {
option: 'publicKey';
};
},
{
name: 'forester';
type: {
option: 'publicKey';
};
},
{
name: 'stateMerkleTreeConfig';
type: {
defined: 'StateMerkleTreeConfig';
};
},
{
name: 'nullifierQueueConfig';
type: {
defined: 'NullifierQueueConfig';
};
},
{
name: 'additionalBytes';
type: 'u64';
},
];
},
{
name: 'appendLeavesToMerkleTrees';
accounts: [
{
name: 'feePayer';
isMut: true;
isSigner: true;
docs: ['Fee payer pays rollover fee.'];
},
{
name: 'authority';
isMut: false;
isSigner: true;
docs: [
'Checked whether instruction is accessed by a registered program or owner = authority.',
];
},
{
name: 'registeredProgramPda';
isMut: false;
isSigner: false;
isOptional: true;
docs: [
'Some assumes that the Merkle trees are accessed by a registered program.',
'None assumes that the Merkle trees are accessed by its owner.',
];
},
{
name: 'systemProgram';
isMut: false;
isSigner: false;
},
];
args: [
{
name: 'leaves';
type: {
vec: {
defined: '(u8,[u8;32])';
};
};
},
];
},
{
name: 'nullifyLeaves';
accounts: [
{
name: 'authority';
isMut: false;
isSigner: true;
},
{
name: 'registeredProgramPda';
isMut: false;
isSigner: false;
isOptional: true;
},
{
name: 'logWrapper';
isMut: false;
isSigner: false;
},
{
name: 'merkleTree';
isMut: true;
isSigner: false;
},
{
name: 'nullifierQueue';
isMut: true;
isSigner: false;
},
];
args: [
{
name: 'changeLogIndices';
type: {
vec: 'u64';
};
},
{
name: 'leavesQueueIndices';
type: {
vec: 'u16';
};
},
{
name: 'leafIndices';
type: {
vec: 'u64';
};
},
{
name: 'proofs';
type: {
vec: {
vec: {
array: ['u8', 32];
};
};
};
},
];
},
{
name: 'insertIntoNullifierQueues';
accounts: [
{
name: 'feePayer';
isMut: true;
isSigner: true;
docs: ['Fee payer pays rollover fee.'];
},
{
name: 'authority';
isMut: false;
isSigner: true;
},
{
name: 'registeredProgramPda';
isMut: false;
isSigner: false;
isOptional: true;
},
{
name: 'systemProgram';
isMut: false;
isSigner: false;
},
];
args: [
{
name: 'nullifiers';
type: {
vec: {
array: ['u8', 32];
};
};
},
];
},
{
name: 'rolloverStateMerkleTreeAndNullifierQueue';
accounts: [
{
name: 'feePayer';
isMut: true;
isSigner: true;
docs: [
'Signer used to receive rollover accounts rentexemption reimbursement.',
];
},
{
name: 'authority';
isMut: false;
isSigner: true;
},
{
name: 'registeredProgramPda';
isMut: false;
isSigner: false;
isOptional: true;
},
{
name: 'newStateMerkleTree';
isMut: true;
isSigner: false;
},
{
name: 'newNullifierQueue';
isMut: true;
isSigner: false;
},
{
name: 'oldStateMerkleTree';
isMut: true;
isSigner: false;
},
{
name: 'oldNullifierQueue';
isMut: true;
isSigner: false;
},
];
args: [];
},
];
accounts: [
{
name: 'registeredProgram';
type: {
kind: 'struct';
fields: [
{
name: 'registeredProgramId';
type: 'publicKey';
},
{
name: 'groupAuthorityPda';
type: 'publicKey';
},
];
};
},
{
name: 'accessMetadata';
type: {
kind: 'struct';
fields: [
{
name: 'owner';
docs: ['Owner of the Merkle tree.'];
type: 'publicKey';
},
{
name: 'programOwner';
docs: [
'Program owner of the Merkle tree. This will be used for program owned Merkle trees.',
];
type: 'publicKey';
},
{
name: 'forester';
docs: [
'Optional privileged forester pubkey, can be set for custom Merkle trees',
'without a network fee. Merkle trees without network fees are not',
'forested by light foresters. The variable is not used in the account',
'compression program but the registry program. The registry program',
'implements access control to prevent contention during forester. The',
'forester pubkey specified in this struct can bypass contention checks.',
];
type: 'publicKey';
},
];
};
},
{
name: 'addressMerkleTreeAccount';
type: {
kind: 'struct';
fields: [
{
name: 'metadata';
type: {
defined: 'MerkleTreeMetadata';
};
},
];
};
},
{
name: 'groupAuthority';
type: {
kind: 'struct';
fields: [
{
name: 'authority';
type: 'publicKey';
},
{
name: 'seed';
type: 'publicKey';
},
];
};
},
{
name: 'merkleTreeMetadata';
type: {
kind: 'struct';
fields: [
{
name: 'accessMetadata';
type: {
defined: 'AccessMetadata';
};
},
{
name: 'rolloverMetadata';
type: {
defined: 'RolloverMetadata';
};
},
{
name: 'associatedQueue';
type: 'publicKey';
},
{
name: 'nextMerkleTree';
type: 'publicKey';
},
];
};
},
{
name: 'stateMerkleTreeAccount';
docs: [
'Concurrent state Merkle tree used for public compressed transactions.',
];
type: {
kind: 'struct';
fields: [
{
name: 'metadata';
type: {
defined: 'MerkleTreeMetadata';
};
},
];
};
},
{
name: 'queueMetadata';
type: {
kind: 'struct';
fields: [
{
name: 'accessMetadata';
type: {
defined: 'AccessMetadata';
};
},
{
name: 'rolloverMetadata';
type: {
defined: 'RolloverMetadata';
};
},
{
name: 'associatedMerkleTree';
type: 'publicKey';
},
{
name: 'nextQueue';
type: 'publicKey';
},
{
name: 'queueType';
type: 'u64';
},
];
};
},
{
name: 'queueAccount';
type: {
kind: 'struct';
fields: [
{
name: 'metadata';
type: {
defined: 'QueueMetadata';
};
},
];
};
},
{
name: 'rolloverMetadata';
type: {
kind: 'struct';
fields: [
{
name: 'index';
docs: ['Unique index.'];
type: 'u64';
},
{
name: 'rolloverFee';
docs: [
'This fee is used for rent for the next account.',
'It accumulates in the account so that once the corresponding Merkle tree account is full it can be rolled over',
];
type: 'u64';
},
{
name: 'rolloverThreshold';
docs: [
'The threshold in percentage points when the account should be rolled over (95 corresponds to 95% filled).',
];
type: 'u64';
},
{
name: 'networkFee';
docs: ['Tip for maintaining the account.'];
type: 'u64';
},
{
name: 'rolledoverSlot';
docs: [
'The slot when the account was rolled over, a rolled over account should not be written to.',
];
type: 'u64';
},
{
name: 'closeThreshold';
docs: [
'If current slot is greater than rolledover_slot + close_threshold and',
"the account is empty it can be closed. No 'close' functionality has been",
'implemented yet.',
];
type: 'u64';
},
{
name: 'additionalBytes';
docs: [
'Placeholder for bytes of additional accounts which are tied to the',
'Merkle trees operation and need to be rolled over as well.',
];
type: 'u64';
},
];
};
},
];
types: [
{
name: 'AddressMerkleTreeConfig';
type: {
kind: 'struct';
fields: [
{
name: 'height';
type: 'u32';
},
{
name: 'changelogSize';
type: 'u64';
},
{
name: 'rootsSize';
type: 'u64';
},
{
name: 'canopyDepth';
type: 'u64';
},
{
name: 'addressChangelogSize';
type: 'u64';
},
{
name: 'networkFee';
type: {
option: 'u64';
};
},
{
name: 'rolloverThreshold';
type: {
option: 'u64';
};
},
{
name: 'closeThreshold';
type: {
option: 'u64';
};
},
];
};
},
{
name: 'StateMerkleTreeConfig';
type: {
kind: 'struct';
fields: [
{
name: 'height';
type: 'u32';
},
{
name: 'changelogSize';
type: 'u64';
},
{
name: 'rootsSize';
type: 'u64';
},
{
name: 'canopyDepth';
type: 'u64';
},
{
name: 'networkFee';
type: {
option: 'u64';
};
},
{
name: 'rolloverThreshold';
type: {
option: 'u64';
};
},
{
name: 'closeThreshold';
type: {
option: 'u64';
};
},
];
};
},
{
name: 'NullifierQueueConfig';
type: {
kind: 'struct';
fields: [
{
name: 'capacity';
type: 'u16';
},
{
name: 'sequenceThreshold';
type: 'u64';
},
{
name: 'networkFee';
type: {
option: 'u64';
};
},
];
};
},
{
name: 'QueueType';
type: {
kind: 'enum';
variants: [
{
name: 'NullifierQueue';
},
{
name: 'AddressQueue';
},
];
};
},
{
name: 'AddressQueueConfig';
type: {
kind: 'alias';
value: {
defined: 'NullifierQueueConfig';
};
};
},
];
errors: [
{
code: 6000;
name: 'IntegerOverflow';
msg: 'Integer overflow';
},
{
code: 6001;
name: 'InvalidAuthority';
msg: 'InvalidAuthority';
},
{
code: 6002;
name: 'NumberOfLeavesMismatch';
msg: 'Leaves <> remaining accounts mismatch. The number of remaining accounts must match the number of leaves.';
},
{
code: 6003;
name: 'InvalidNoopPubkey';
msg: 'Provided noop program public key is invalid';
},
{
code: 6004;
name: 'NumberOfChangeLogIndicesMismatch';
msg: 'Number of change log indices mismatch';
},
{
code: 6005;
name: 'NumberOfIndicesMismatch';
msg: 'Number of indices mismatch';
},
{
code: 6006;
name: 'NumberOfProofsMismatch';
msg: 'NumberOfProofsMismatch';
},
{
code: 6007;
name: 'InvalidMerkleProof';
msg: 'InvalidMerkleProof';
},
{
code: 6008;
name: 'LeafNotFound';
msg: 'Could not find the leaf in the queue';
},
{
code: 6009;
name: 'MerkleTreeAndQueueNotAssociated';
msg: 'MerkleTreeAndQueueNotAssociated';
},
{
code: 6010;
name: 'MerkleTreeAlreadyRolledOver';
msg: 'MerkleTreeAlreadyRolledOver';
},
{
code: 6011;
name: 'NotReadyForRollover';
msg: 'NotReadyForRollover';
},
{
code: 6012;
name: 'RolloverNotConfigured';
msg: 'RolloverNotConfigured';
},
{
code: 6013;
name: 'NotAllLeavesProcessed';
msg: 'NotAllLeavesProcessed';
},
{
code: 6014;
name: 'InvalidQueueType';
msg: 'InvalidQueueType';
},
{
code: 6015;
name: 'InputElementsEmpty';
msg: 'InputElementsEmpty';
},
{
code: 6016;
name: 'NoLeavesForMerkleTree';
msg: 'NoLeavesForMerkleTree';
},
{
code: 6017;
name: 'InvalidAccountSize';
msg: 'InvalidAccountSize';
},
{
code: 6018;
name: 'InsufficientRolloverFee';
msg: 'InsufficientRolloverFee';
},
{
code: 6019;
name: 'UnsupportedHeight';
msg: 'Unsupported Merkle tree height';
},
{
code: 6020;
name: 'UnsupportedCanopyDepth';
msg: 'Unsupported canopy depth';
},
{
code: 6021;
name: 'InvalidSequenceThreshold';
msg: 'Invalid sequence threshold';
},
{
code: 6022;
name: 'UnsupportedCloseThreshold';
msg: 'Unsupported close threshold';
},
{
code: 6023;
name: 'InvalidAccountBalance';
msg: 'InvalidAccountBalance';
},
{
code: 6024;
name: 'UnsupportedAdditionalBytes';
},
{
code: 6025;
name: 'InvalidGroup';
},
{
code: 6026;
name: 'ProofLengthMismatch';
},
];
};
export const IDL: AccountCompression = {
version: '1.2.0',
name: 'account_compression',
constants: [
{
name: 'CPI_AUTHORITY_PDA_SEED',
type: 'bytes',
value: '[99, 112, 105, 95, 97, 117, 116, 104, 111, 114, 105, 116, 121]',
},
{
name: 'GROUP_AUTHORITY_SEED',
type: 'bytes',
value: '[103, 114, 111, 117, 112, 95, 97, 117, 116, 104, 111, 114, 105, 116, 121]',
},
{
name: 'STATE_MERKLE_TREE_HEIGHT',
type: 'u64',
value: '26',
},
{
name: 'STATE_MERKLE_TREE_CHANGELOG',
type: 'u64',
value: '1400',
},
{
name: 'STATE_MERKLE_TREE_ROOTS',
type: 'u64',
value: '2400',
},
{
name: 'STATE_MERKLE_TREE_CANOPY_DEPTH',
type: 'u64',
value: '10',
},
{
name: 'STATE_NULLIFIER_QUEUE_VALUES',
type: 'u16',
value: '28_807',
},
{
name: 'STATE_NULLIFIER_QUEUE_SEQUENCE_THRESHOLD',
type: 'u64',
value: '2400',
},
{
name: 'ADDRESS_MERKLE_TREE_HEIGHT',
type: 'u64',
value: '26',
},
{
name: 'ADDRESS_MERKLE_TREE_CHANGELOG',
type: 'u64',
value: '1400',
},
{
name: 'ADDRESS_MERKLE_TREE_ROOTS',
type: 'u64',
value: '2400',
},
{
name: 'ADDRESS_MERKLE_TREE_CANOPY_DEPTH',
type: 'u64',
value: '10',
},
{
name: 'ADDRESS_MERKLE_TREE_INDEXED_CHANGELOG',
type: 'u64',
value: '1400',
},
{
name: 'ADDRESS_QUEUE_VALUES',
type: 'u16',
value: '28_807',
},
{
name: 'ADDRESS_QUEUE_SEQUENCE_THRESHOLD',
type: 'u64',
value: '2400',
},
{
name: 'NOOP_PUBKEY',
type: {
array: ['u8', 32],
},
value: '[11 , 188 , 15 , 192 , 187 , 71 , 202 , 47 , 116 , 196 , 17 , 46 , 148 , 171 , 19 , 207 , 163 , 198 , 52 , 229 , 220 , 23 , 234 , 203 , 3 , 205 , 26 , 35 , 205 , 126 , 120 , 124 ,]',
},
],
instructions: [
{
name: 'initializeAddressMerkleTreeAndQueue',
accounts: [
{
name: 'authority',
isMut: true,
isSigner: true,
},
{
name: 'merkleTree',
isMut: true,
isSigner: false,
},
{
name: 'queue',
isMut: true,
isSigner: false,
},
{
name: 'registeredProgramPda',
isMut: false,
isSigner: false,
isOptional: true,
},
],
args: [
{
name: 'index',
type: 'u64',
},
{
name: 'programOwner',
type: {
option: 'publicKey',
},
},
{
name: 'forester',
type: {
option: 'publicKey',
},
},
{
name: 'addressMerkleTreeConfig',
type: {
defined: 'AddressMerkleTreeConfig',
},
},
{
name: 'addressQueueConfig',
type: {
defined: 'AddressQueueConfig',
},
},
],
},
{
name: 'insertAddresses',
accounts: [
{
name: 'feePayer',
isMut: true,
isSigner: true,
docs: ['Fee payer pays rollover fee.'],
},
{
name: 'authority',
isMut: false,
isSigner: true,
},
{
name: 'registeredProgramPda',
isMut: false,
isSigner: false,
isOptional: true,
},
{
name: 'systemProgram',
isMut: false,
isSigner: false,
},
],
args: [
{
name: 'addresses',
type: {
vec: {
array: ['u8', 32],
},
},
},
],
},
{
name: 'updateAddressMerkleTree',
docs: ['Updates the address Merkle tree with a new address.'],
accounts: [
{
name: 'authority',
isMut: false,
isSigner: true,
},
{
name: 'registeredProgramPda',
isMut: false,
isSigner: false,
isOptional: true,
},
{
name: 'queue',
isMut: true,
isSigner: false,
},
{
name: 'merkleTree',
isMut: true,
isSigner: false,
},
{
name: 'logWrapper',
isMut: false,
isSigner: false,
},
],
args: [
{
name: 'changelogIndex',
type: 'u16',
},
{
name: 'indexedChangelogIndex',
type: 'u16',
},
{
name: 'value',
type: 'u16',
},
{
name: 'lowAddressIndex',
type: 'u64',
},
{
name: 'lowAddressValue',
type: {
array: ['u8', 32],
},
},
{
name: 'lowAddressNextIndex',
type: 'u64',
},
{
name: 'lowAddressNextValue',
type: {
array: ['u8', 32],
},
},
{
name: 'lowAddressProof',
type: {
array: [
{
array: ['u8', 32],
},
16,
],
},
},
],
},
{
name: 'rolloverAddressMerkleTreeAndQueue',
accounts: [
{
name: 'feePayer',
isMut: true,
isSigner: true,
docs: [
'Signer used to receive rollover accounts rentexemption reimbursement.',
],
},
{
name: 'authority',
isMut: false,
isSigner: true,
},
{
name: 'registeredProgramPda',
isMut: false,
isSigner: false,
isOptional: true,
},
{
name: 'newAddressMerkleTree',
isMut: true,
isSigner: false,
},
{
name: 'newQueue',
isMut: true,
isSigner: false,
},
{
name: 'oldAddressMerkleTree',
isMut: true,
isSigner: false,
},
{
name: 'oldQueue',
isMut: true,
isSigner: false,
},
],
args: [],
},
{
name: 'initializeGroupAuthority',
docs: [
'initialize group (a group can be used to give multiple programs access',
'to the same Merkle trees by registering the programs to the group)',
],
accounts: [
{
name: 'authority',
isMut: true,
isSigner: true,
},
{
name: 'seed',
isMut: false,
isSigner: true,
docs: [
'Seed public key used to derive the group authority.',
],
},
{
name: 'groupAuthority',
isMut: true,
isSigner: false,
},
{
name: 'systemProgram',
isMut: false,
isSigner: false,
},
],
args: [
{
name: 'authority',
type: 'publicKey',
},
],
},
{
name: 'updateGroupAuthority',
accounts: [
{
name: 'authority',
isMut: false,
isSigner: true,
},
{
name: 'groupAuthority',
isMut: true,
isSigner: false,
},
],
args: [
{
name: 'authority',
type: 'publicKey',
},
],
},
{
name: 'registerProgramToGroup',
accounts: [
{
name: 'authority',
isMut: true,
isSigner: true,
},
{
name: 'programToBeRegistered',
isMut: false,
isSigner: true,
},
{
name: 'registeredProgramPda',
isMut: true,
isSigner: false,
},
{
name: 'groupAuthorityPda',
isMut: false,
isSigner: false,
},
{
name: 'systemProgram',
isMut: false,
isSigner: false,
},
],
args: [],
},
{
name: 'deregisterProgram',
accounts: [
{
name: 'authority',
isMut: true,
isSigner: true,
},
{
name: 'registeredProgramPda',
isMut: true,
isSigner: false,
},
{
name: 'groupAuthorityPda',
isMut: false,
isSigner: false,
},
{
name: 'closeRecipient',
isMut: true,
isSigner: false,
},
],
args: [],
},
{
name: 'initializeStateMerkleTreeAndNullifierQueue',
docs: [
'Initializes a new Merkle tree from config bytes.',
'Index is an optional identifier and not checked by the program.',
],
accounts: [
{
name: 'authority',
isMut: true,
isSigner: true,
},
{
name: 'merkleTree',
isMut: true,
isSigner: false,
},
{
name: 'nullifierQueue',
isMut: true,
isSigner: false,
},
{
name: 'registeredProgramPda',
isMut: false,
isSigner: false,
isOptional: true,
},
],
args: [
{
name: 'index',
type: 'u64',
},
{
name: 'programOwner',
type: {
option: 'publicKey',
},
},
{
name: 'forester',
type: {
option: 'publicKey',
},
},
{
name: 'stateMerkleTreeConfig',
type: {
defined: 'StateMerkleTreeConfig',
},
},
{
name: 'nullifierQueueConfig',
type: {
defined: 'NullifierQueueConfig',
},
},
{
name: 'additionalBytes',
type: 'u64',
},
],
},
{
name: 'appendLeavesToMerkleTrees',
accounts: [
{
name: 'feePayer',
isMut: true,
isSigner: true,
docs: ['Fee payer pays rollover fee.'],
},
{
name: 'authority',
isMut: false,
isSigner: true,
docs: [
'Checked whether instruction is accessed by a registered program or owner = authority.',
],
},
{
name: 'registeredProgramPda',
isMut: false,
isSigner: false,
isOptional: true,
docs: [
'Some assumes that the Merkle trees are accessed by a registered program.',
'None assumes that the Merkle trees are accessed by its owner.',
],
},
{
name: 'systemProgram',
isMut: false,
isSigner: false,
},
],
args: [
{
name: 'leaves',
type: {
vec: {
defined: '(u8,[u8;32])',
},
},
},
],
},
{
name: 'nullifyLeaves',
accounts: [
{
name: 'authority',
isMut: false,
isSigner: true,
},
{
name: 'registeredProgramPda',
isMut: false,
isSigner: false,
isOptional: true,
},
{
name: 'logWrapper',
isMut: false,
isSigner: false,
},
{
name: 'merkleTree',
isMut: true,
isSigner: false,
},
{
name: 'nullifierQueue',
isMut: true,
isSigner: false,
},
],
args: [
{
name: 'changeLogIndices',
type: {
vec: 'u64',
},
},
{
name: 'leavesQueueIndices',
type: {
vec: 'u16',
},
},
{
name: 'leafIndices',
type: {
vec: 'u64',
},
},
{
name: 'proofs',
type: {
vec: {
vec: {
array: ['u8', 32],
},
},
},
},
],
},
{
name: 'insertIntoNullifierQueues',
accounts: [
{
name: 'feePayer',
isMut: true,
isSigner: true,
docs: ['Fee payer pays rollover fee.'],
},
{
name: 'authority',
isMut: false,
isSigner: true,
},
{
name: 'registeredProgramPda',
isMut: false,
isSigner: false,
isOptional: true,
},
{
name: 'systemProgram',
isMut: false,
isSigner: false,
},
],
args: [
{
name: 'nullifiers',
type: {
vec: {
array: ['u8', 32],
},
},
},
],
},
{
name: 'rolloverStateMerkleTreeAndNullifierQueue',
accounts: [
{
name: 'feePayer',
isMut: true,
isSigner: true,
docs: [
'Signer used to receive rollover accounts rentexemption reimbursement.',
],
},
{
name: 'authority',
isMut: false,
isSigner: true,
},
{
name: 'registeredProgramPda',
isMut: false,
isSigner: false,
isOptional: true,
},
{
name: 'newStateMerkleTree',
isMut: true,
isSigner: false,
},
{
name: 'newNullifierQueue',
isMut: true,
isSigner: false,
},
{
name: 'oldStateMerkleTree',
isMut: true,
isSigner: false,
},
{
name: 'oldNullifierQueue',
isMut: true,
isSigner: false,
},
],
args: [],
},
],
accounts: [
{
name: 'registeredProgram',
type: {
kind: 'struct',
fields: [
{
name: 'registeredProgramId',
type: 'publicKey',
},
{
name: 'groupAuthorityPda',
type: 'publicKey',
},
],
},
},
{
name: 'accessMetadata',
type: {
kind: 'struct',
fields: [
{
name: 'owner',
docs: ['Owner of the Merkle tree.'],
type: 'publicKey',
},
{
name: 'programOwner',
docs: [
'Program owner of the Merkle tree. This will be used for program owned Merkle trees.',
],
type: 'publicKey',
},
{
name: 'forester',
docs: [
'Optional privileged forester pubkey, can be set for custom Merkle trees',
'without a network fee. Merkle trees without network fees are not',
'forested by light foresters. The variable is not used in the account',
'compression program but the registry program. The registry program',
'implements access control to prevent contention during forester. The',
'forester pubkey specified in this struct can bypass contention checks.',
],
type: 'publicKey',
},
],
},
},
{
name: 'addressMerkleTreeAccount',
type: {
kind: 'struct',
fields: [
{
name: 'metadata',
type: {
defined: 'MerkleTreeMetadata',
},
},
],
},
},
{
name: 'groupAuthority',
type: {
kind: 'struct',
fields: [
{
name: 'authority',
type: 'publicKey',
},
{
name: 'seed',
type: 'publicKey',
},
],
},
},
{
name: 'merkleTreeMetadata',
type: {
kind: 'struct',
fields: [
{
name: 'accessMetadata',
type: {
defined: 'AccessMetadata',
},
},
{
name: 'rolloverMetadata',
type: {
defined: 'RolloverMetadata',
},
},
{
name: 'associatedQueue',
type: 'publicKey',
},
{
name: 'nextMerkleTree',
type: 'publicKey',
},
],
},
},
{
name: 'stateMerkleTreeAccount',
docs: [
'Concurrent state Merkle tree used for public compressed transactions.',
],
type: {
kind: 'struct',
fields: [
{
name: 'metadata',
type: {
defined: 'MerkleTreeMetadata',
},
},
],
},
},
{
name: 'queueMetadata',
type: {
kind: 'struct',
fields: [
{
name: 'accessMetadata',
type: {
defined: 'AccessMetadata',
},
},
{
name: 'rolloverMetadata',
type: {
defined: 'RolloverMetadata',
},
},
{
name: 'associatedMerkleTree',
type: 'publicKey',
},
{
name: 'nextQueue',
type: 'publicKey',
},
{
name: 'queueType',
type: 'u64',
},
],
},
},
{
name: 'queueAccount',
type: {
kind: 'struct',
fields: [
{
name: 'metadata',
type: {
defined: 'QueueMetadata',
},
},
],
},
},
{
name: 'rolloverMetadata',
type: {
kind: 'struct',
fields: [
{
name: 'index',
docs: ['Unique index.'],
type: 'u64',
},
{
name: 'rolloverFee',
docs: [
'This fee is used for rent for the next account.',
'It accumulates in the account so that once the corresponding Merkle tree account is full it can be rolled over',
],
type: 'u64',
},
{
name: 'rolloverThreshold',
docs: [
'The threshold in percentage points when the account should be rolled over (95 corresponds to 95% filled).',
],
type: 'u64',
},
{
name: 'networkFee',
docs: ['Tip for maintaining the account.'],
type: 'u64',
},
{
name: 'rolledoverSlot',
docs: [
'The slot when the account was rolled over, a rolled over account should not be written to.',
],
type: 'u64',
},
{
name: 'closeThreshold',
docs: [
'If current slot is greater than rolledover_slot + close_threshold and',
"the account is empty it can be closed. No 'close' functionality has been",
'implemented yet.',
],
type: 'u64',
},
{
name: 'additionalBytes',
docs: [
'Placeholder for bytes of additional accounts which are tied to the',
'Merkle trees operation and need to be rolled over as well.',
],
type: 'u64',
},
],
},
},
],
types: [
{
name: 'AddressMerkleTreeConfig',
type: {
kind: 'struct',
fields: [
{
name: 'height',
type: 'u32',
},
{
name: 'changelogSize',
type: 'u64',
},
{
name: 'rootsSize',
type: 'u64',
},
{
name: 'canopyDepth',
type: 'u64',
},
{
name: 'addressChangelogSize',
type: 'u64',
},
{
name: 'networkFee',
type: {
option: 'u64',
},
},
{
name: 'rolloverThreshold',
type: {
option: 'u64',
},
},
{
name: 'closeThreshold',
type: {
option: 'u64',
},
},
],
},
},
{
name: 'StateMerkleTreeConfig',
type: {
kind: 'struct',
fields: [
{
name: 'height',
type: 'u32',
},
{
name: 'changelogSize',
type: 'u64',
},
{
name: 'rootsSize',
type: 'u64',
},
{
name: 'canopyDepth',
type: 'u64',
},
{
name: 'networkFee',
type: {
option: 'u64',
},
},
{
name: 'rolloverThreshold',
type: {
option: 'u64',
},
},
{
name: 'closeThreshold',
type: {
option: 'u64',
},
},
],
},
},
{
name: 'NullifierQueueConfig',
type: {
kind: 'struct',
fields: [
{
name: 'capacity',
type: 'u16',
},
{
name: 'sequenceThreshold',
type: 'u64',
},
{
name: 'networkFee',
type: {
option: 'u64',
},
},
],
},
},
{
name: 'QueueType',
type: {
kind: 'enum',
variants: [
{
name: 'NullifierQueue',
},
{
name: 'AddressQueue',
},
],
},
},
{
name: 'AddressQueueConfig',
type: {
kind: 'alias',
value: {
defined: 'NullifierQueueConfig',
},
},
},
],
errors: [
{
code: 6000,
name: 'IntegerOverflow',
msg: 'Integer overflow',
},
{
code: 6001,
name: 'InvalidAuthority',
msg: 'InvalidAuthority',
},
{
code: 6002,
name: 'NumberOfLeavesMismatch',
msg: 'Leaves <> remaining accounts mismatch. The number of remaining accounts must match the number of leaves.',
},
{
code: 6003,
name: 'InvalidNoopPubkey',
msg: 'Provided noop program public key is invalid',
},
{
code: 6004,
name: 'NumberOfChangeLogIndicesMismatch',
msg: 'Number of change log indices mismatch',
},
{
code: 6005,
name: 'NumberOfIndicesMismatch',
msg: 'Number of indices mismatch',
},
{
code: 6006,
name: 'NumberOfProofsMismatch',
msg: 'NumberOfProofsMismatch',
},
{
code: 6007,
name: 'InvalidMerkleProof',
msg: 'InvalidMerkleProof',
},
{
code: 6008,
name: 'LeafNotFound',
msg: 'Could not find the leaf in the queue',
},
{
code: 6009,
name: 'MerkleTreeAndQueueNotAssociated',
msg: 'MerkleTreeAndQueueNotAssociated',
},
{
code: 6010,
name: 'MerkleTreeAlreadyRolledOver',
msg: 'MerkleTreeAlreadyRolledOver',
},
{
code: 6011,
name: 'NotReadyForRollover',
msg: 'NotReadyForRollover',
},
{
code: 6012,
name: 'RolloverNotConfigured',
msg: 'RolloverNotConfigured',
},
{
code: 6013,
name: 'NotAllLeavesProcessed',
msg: 'NotAllLeavesProcessed',
},
{
code: 6014,
name: 'InvalidQueueType',
msg: 'InvalidQueueType',
},
{
code: 6015,
name: 'InputElementsEmpty',
msg: 'InputElementsEmpty',
},
{
code: 6016,
name: 'NoLeavesForMerkleTree',
msg: 'NoLeavesForMerkleTree',
},
{
code: 6017,
name: 'InvalidAccountSize',
msg: 'InvalidAccountSize',
},
{
code: 6018,
name: 'InsufficientRolloverFee',
msg: 'InsufficientRolloverFee',
},
{
code: 6019,
name: 'UnsupportedHeight',
msg: 'Unsupported Merkle tree height',
},
{
code: 6020,
name: 'UnsupportedCanopyDepth',
msg: 'Unsupported canopy depth',
},
{
code: 6021,
name: 'InvalidSequenceThreshold',
msg: 'Invalid sequence threshold',
},
{
code: 6022,
name: 'UnsupportedCloseThreshold',
msg: 'Unsupported close threshold',
},
{
code: 6023,
name: 'InvalidAccountBalance',
msg: 'InvalidAccountBalance',
},
{
code: 6024,
name: 'UnsupportedAdditionalBytes',
},
{
code: 6025,
name: 'InvalidGroup',
},
{
code: 6026,
name: 'ProofLengthMismatch',
},
],
};
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/idls/light_system_program.ts
|
export type LightSystemProgram = {
version: '1.2.0';
name: 'light_system_program';
constants: [
{
name: 'SOL_POOL_PDA_SEED';
type: 'bytes';
value: '[115, 111, 108, 95, 112, 111, 111, 108, 95, 112, 100, 97]';
},
];
instructions: [
{
name: 'initCpiContextAccount';
accounts: [
{
name: 'feePayer';
isMut: true;
isSigner: true;
},
{
name: 'cpiContextAccount';
isMut: true;
isSigner: false;
},
{
name: 'associatedMerkleTree';
isMut: false;
isSigner: false;
},
];
args: [];
},
{
name: 'invoke';
accounts: [
{
name: 'feePayer';
isMut: true;
isSigner: true;
docs: [
'Fee payer needs to be mutable to pay rollover and protocol fees.',
];
},
{
name: 'authority';
isMut: false;
isSigner: true;
},
{
name: 'registeredProgramPda';
isMut: false;
isSigner: false;
},
{
name: 'noopProgram';
isMut: false;
isSigner: false;
},
{
name: 'accountCompressionAuthority';
isMut: false;
isSigner: false;
docs: [
'This pda is used to invoke the account compression program.',
];
},
{
name: 'accountCompressionProgram';
isMut: false;
isSigner: false;
docs: ['Merkle trees.'];
},
{
name: 'solPoolPda';
isMut: true;
isSigner: false;
isOptional: true;
docs: [
'Sol pool pda is used to store the native sol that has been compressed.',
"It's only required when compressing or decompressing sol.",
];
},
{
name: 'decompressionRecipient';
isMut: true;
isSigner: false;
isOptional: true;
docs: [
'Only needs to be provided for decompression as a recipient for the',
'decompressed sol.',
'Compressed sol originate from authority.',
];
},
{
name: 'systemProgram';
isMut: false;
isSigner: false;
},
];
args: [
{
name: 'inputs';
type: 'bytes';
},
];
},
{
name: 'invokeCpi';
accounts: [
{
name: 'feePayer';
isMut: true;
isSigner: true;
docs: [
'Fee payer needs to be mutable to pay rollover and protocol fees.',
];
},
{
name: 'authority';
isMut: false;
isSigner: true;
},
{
name: 'registeredProgramPda';
isMut: false;
isSigner: false;
},
{
name: 'noopProgram';
isMut: false;
isSigner: false;
},
{
name: 'accountCompressionAuthority';
isMut: false;
isSigner: false;
},
{
name: 'accountCompressionProgram';
isMut: false;
isSigner: false;
},
{
name: 'invokingProgram';
isMut: false;
isSigner: false;
},
{
name: 'solPoolPda';
isMut: true;
isSigner: false;
isOptional: true;
},
{
name: 'decompressionRecipient';
isMut: true;
isSigner: false;
isOptional: true;
},
{
name: 'systemProgram';
isMut: false;
isSigner: false;
},
{
name: 'cpiContextAccount';
isMut: true;
isSigner: false;
isOptional: true;
},
];
args: [
{
name: 'inputs';
type: 'bytes';
},
];
},
{
name: 'stubIdlBuild';
docs: [
'This function is a stub to allow Anchor to include the input types in',
'the IDL. It should not be included in production builds nor be called in',
'practice.',
];
accounts: [
{
name: 'feePayer';
isMut: true;
isSigner: true;
docs: [
'Fee payer needs to be mutable to pay rollover and protocol fees.',
];
},
{
name: 'authority';
isMut: false;
isSigner: true;
},
{
name: 'registeredProgramPda';
isMut: false;
isSigner: false;
},
{
name: 'noopProgram';
isMut: false;
isSigner: false;
},
{
name: 'accountCompressionAuthority';
isMut: false;
isSigner: false;
docs: [
'This pda is used to invoke the account compression program.',
];
},
{
name: 'accountCompressionProgram';
isMut: false;
isSigner: false;
docs: ['Merkle trees.'];
},
{
name: 'solPoolPda';
isMut: true;
isSigner: false;
isOptional: true;
docs: [
'Sol pool pda is used to store the native sol that has been compressed.',
"It's only required when compressing or decompressing sol.",
];
},
{
name: 'decompressionRecipient';
isMut: true;
isSigner: false;
isOptional: true;
docs: [
'Only needs to be provided for decompression as a recipient for the',
'decompressed sol.',
'Compressed sol originate from authority.',
];
},
{
name: 'systemProgram';
isMut: false;
isSigner: false;
},
];
args: [
{
name: 'inputs1';
type: {
defined: 'InstructionDataInvoke';
};
},
{
name: 'inputs2';
type: {
defined: 'InstructionDataInvokeCpi';
};
},
{
name: 'inputs3';
type: {
defined: 'PublicTransactionEvent';
};
},
];
},
];
accounts: [
{
name: 'stateMerkleTreeAccount';
docs: [
'Concurrent state Merkle tree used for public compressed transactions.',
];
type: {
kind: 'struct';
fields: [
{
name: 'metadata';
type: {
defined: 'MerkleTreeMetadata';
};
},
];
};
},
{
name: 'cpiContextAccount';
docs: [
'Collects instruction data without executing a compressed transaction.',
'Signer checks are performed on instruction data.',
'Collected instruction data is combined with the instruction data of the executing cpi,',
'and executed as a single transaction.',
'This enables to use input compressed accounts that are owned by multiple programs,',
'with one zero-knowledge proof.',
];
type: {
kind: 'struct';
fields: [
{
name: 'feePayer';
type: 'publicKey';
},
{
name: 'associatedMerkleTree';
type: 'publicKey';
},
{
name: 'context';
type: {
vec: {
defined: 'InstructionDataInvokeCpi';
};
};
},
];
};
},
];
types: [
{
name: 'AccessMetadata';
type: {
kind: 'struct';
fields: [
{
name: 'owner';
docs: ['Owner of the Merkle tree.'];
type: 'publicKey';
},
{
name: 'programOwner';
docs: [
'Program owner of the Merkle tree. This will be used for program owned Merkle trees.',
];
type: 'publicKey';
},
{
name: 'forester';
docs: [
'Optional privileged forester pubkey, can be set for custom Merkle trees',
'without a network fee. Merkle trees without network fees are not',
'forested by light foresters. The variable is not used in the account',
'compression program but the registry program. The registry program',
'implements access control to prevent contention during forester. The',
'forester pubkey specified in this struct can bypass contention checks.',
];
type: 'publicKey';
},
];
};
},
{
name: 'MerkleTreeMetadata';
type: {
kind: 'struct';
fields: [
{
name: 'accessMetadata';
type: {
defined: 'AccessMetadata';
};
},
{
name: 'rolloverMetadata';
type: {
defined: 'RolloverMetadata';
};
},
{
name: 'associatedQueue';
type: 'publicKey';
},
{
name: 'nextMerkleTree';
type: 'publicKey';
},
];
};
},
{
name: 'RolloverMetadata';
type: {
kind: 'struct';
fields: [
{
name: 'index';
docs: ['Unique index.'];
type: 'u64';
},
{
name: 'rolloverFee';
docs: [
'This fee is used for rent for the next account.',
'It accumulates in the account so that once the corresponding Merkle tree account is full it can be rolled over',
];
type: 'u64';
},
{
name: 'rolloverThreshold';
docs: [
'The threshold in percentage points when the account should be rolled over (95 corresponds to 95% filled).',
];
type: 'u64';
},
{
name: 'networkFee';
docs: ['Tip for maintaining the account.'];
type: 'u64';
},
{
name: 'rolledoverSlot';
docs: [
'The slot when the account was rolled over, a rolled over account should not be written to.',
];
type: 'u64';
},
{
name: 'closeThreshold';
docs: [
'If current slot is greater than rolledover_slot + close_threshold and',
"the account is empty it can be closed. No 'close' functionality has been",
'implemented yet.',
];
type: 'u64';
},
{
name: 'additionalBytes';
docs: [
'Placeholder for bytes of additional accounts which are tied to the',
'Merkle trees operation and need to be rolled over as well.',
];
type: 'u64';
},
];
};
},
{
name: 'InstructionDataInvoke';
type: {
kind: 'struct';
fields: [
{
name: 'proof';
type: {
option: {
defined: 'CompressedProof';
};
};
},
{
name: 'inputCompressedAccountsWithMerkleContext';
type: {
vec: {
defined: 'PackedCompressedAccountWithMerkleContext';
};
};
},
{
name: 'outputCompressedAccounts';
type: {
vec: {
defined: 'OutputCompressedAccountWithPackedContext';
};
};
},
{
name: 'relayFee';
type: {
option: 'u64';
};
},
{
name: 'newAddressParams';
type: {
vec: {
defined: 'NewAddressParamsPacked';
};
};
},
{
name: 'compressOrDecompressLamports';
type: {
option: 'u64';
};
},
{
name: 'isCompress';
type: 'bool';
},
];
};
},
{
name: 'NewAddressParamsPacked';
type: {
kind: 'struct';
fields: [
{
name: 'seed';
type: {
array: ['u8', 32];
};
},
{
name: 'addressQueueAccountIndex';
type: 'u8';
},
{
name: 'addressMerkleTreeAccountIndex';
type: 'u8';
},
{
name: 'addressMerkleTreeRootIndex';
type: 'u16';
},
];
};
},
{
name: 'OutputCompressedAccountWithPackedContext';
type: {
kind: 'struct';
fields: [
{
name: 'compressedAccount';
type: {
defined: 'CompressedAccount';
};
},
{
name: 'merkleTreeIndex';
type: 'u8';
},
];
};
},
{
name: 'CompressedProof';
type: {
kind: 'struct';
fields: [
{
name: 'a';
type: {
array: ['u8', 32];
};
},
{
name: 'b';
type: {
array: ['u8', 64];
};
},
{
name: 'c';
type: {
array: ['u8', 32];
};
},
];
};
},
{
name: 'InstructionDataInvokeCpi';
type: {
kind: 'struct';
fields: [
{
name: 'proof';
type: {
option: {
defined: 'CompressedProof';
};
};
},
{
name: 'newAddressParams';
type: {
vec: {
defined: 'NewAddressParamsPacked';
};
};
},
{
name: 'inputCompressedAccountsWithMerkleContext';
type: {
vec: {
defined: 'PackedCompressedAccountWithMerkleContext';
};
};
},
{
name: 'outputCompressedAccounts';
type: {
vec: {
defined: 'OutputCompressedAccountWithPackedContext';
};
};
},
{
name: 'relayFee';
type: {
option: 'u64';
};
},
{
name: 'compressOrDecompressLamports';
type: {
option: 'u64';
};
},
{
name: 'isCompress';
type: 'bool';
},
{
name: 'cpiContext';
type: {
option: {
defined: 'CompressedCpiContext';
};
};
},
];
};
},
{
name: 'CompressedCpiContext';
type: {
kind: 'struct';
fields: [
{
name: 'setContext';
docs: [
'Is set by the program that is invoking the CPI to signal that is should',
'set the cpi context.',
];
type: 'bool';
},
{
name: 'firstSetContext';
docs: [
'Is set to wipe the cpi context since someone could have set it before',
'with unrelated data.',
];
type: 'bool';
},
{
name: 'cpiContextAccountIndex';
docs: [
'Index of cpi context account in remaining accounts.',
];
type: 'u8';
},
];
};
},
{
name: 'CompressedAccount';
type: {
kind: 'struct';
fields: [
{
name: 'owner';
type: 'publicKey';
},
{
name: 'lamports';
type: 'u64';
},
{
name: 'address';
type: {
option: {
array: ['u8', 32];
};
};
},
{
name: 'data';
type: {
option: {
defined: 'CompressedAccountData';
};
};
},
];
};
},
{
name: 'CompressedAccountData';
type: {
kind: 'struct';
fields: [
{
name: 'discriminator';
type: {
array: ['u8', 8];
};
},
{
name: 'data';
type: 'bytes';
},
{
name: 'dataHash';
type: {
array: ['u8', 32];
};
},
];
};
},
{
name: 'PackedCompressedAccountWithMerkleContext';
type: {
kind: 'struct';
fields: [
{
name: 'compressedAccount';
type: {
defined: 'CompressedAccount';
};
},
{
name: 'merkleContext';
type: {
defined: 'PackedMerkleContext';
};
},
{
name: 'rootIndex';
docs: [
'Index of root used in inclusion validity proof.',
];
type: 'u16';
},
{
name: 'readOnly';
docs: [
'Placeholder to mark accounts read-only unimplemented set to false.',
];
type: 'bool';
},
];
};
},
{
name: 'PackedMerkleContext';
type: {
kind: 'struct';
fields: [
{
name: 'merkleTreePubkeyIndex';
type: 'u8';
},
{
name: 'nullifierQueuePubkeyIndex';
type: 'u8';
},
{
name: 'leafIndex';
type: 'u32';
},
{
name: 'queueIndex';
docs: [
'Index of leaf in queue. Placeholder of batched Merkle tree updates',
'currently unimplemented.',
];
type: {
option: {
defined: 'QueueIndex';
};
};
},
];
};
},
{
name: 'QueueIndex';
type: {
kind: 'struct';
fields: [
{
name: 'queueId';
docs: ['Id of queue in queue account.'];
type: 'u8';
},
{
name: 'index';
docs: ['Index of compressed account hash in queue.'];
type: 'u16';
},
];
};
},
{
name: 'MerkleTreeSequenceNumber';
type: {
kind: 'struct';
fields: [
{
name: 'pubkey';
type: 'publicKey';
},
{
name: 'seq';
type: 'u64';
},
];
};
},
{
name: 'PublicTransactionEvent';
type: {
kind: 'struct';
fields: [
{
name: 'inputCompressedAccountHashes';
type: {
vec: {
array: ['u8', 32];
};
};
},
{
name: 'outputCompressedAccountHashes';
type: {
vec: {
array: ['u8', 32];
};
};
},
{
name: 'outputCompressedAccounts';
type: {
vec: {
defined: 'OutputCompressedAccountWithPackedContext';
};
};
},
{
name: 'outputLeafIndices';
type: {
vec: 'u32';
};
},
{
name: 'sequenceNumbers';
type: {
vec: {
defined: 'MerkleTreeSequenceNumber';
};
};
},
{
name: 'relayFee';
type: {
option: 'u64';
};
},
{
name: 'isCompress';
type: 'bool';
},
{
name: 'compressOrDecompressLamports';
type: {
option: 'u64';
};
},
{
name: 'pubkeyArray';
type: {
vec: 'publicKey';
};
},
{
name: 'message';
type: {
option: 'bytes';
};
},
];
};
},
];
errors: [
{
code: 6000;
name: 'SumCheckFailed';
msg: 'Sum check failed';
},
{
code: 6001;
name: 'SignerCheckFailed';
msg: 'Signer check failed';
},
{
code: 6002;
name: 'CpiSignerCheckFailed';
msg: 'Cpi signer check failed';
},
{
code: 6003;
name: 'ComputeInputSumFailed';
msg: 'Computing input sum failed.';
},
{
code: 6004;
name: 'ComputeOutputSumFailed';
msg: 'Computing output sum failed.';
},
{
code: 6005;
name: 'ComputeRpcSumFailed';
msg: 'Computing rpc sum failed.';
},
{
code: 6006;
name: 'InvalidAddress';
msg: 'InvalidAddress';
},
{
code: 6007;
name: 'DeriveAddressError';
msg: 'DeriveAddressError';
},
{
code: 6008;
name: 'CompressedSolPdaUndefinedForCompressSol';
msg: 'CompressedSolPdaUndefinedForCompressSol';
},
{
code: 6009;
name: 'DeCompressLamportsUndefinedForCompressSol';
msg: 'DeCompressLamportsUndefinedForCompressSol';
},
{
code: 6010;
name: 'CompressedSolPdaUndefinedForDecompressSol';
msg: 'CompressedSolPdaUndefinedForDecompressSol';
},
{
code: 6011;
name: 'DeCompressLamportsUndefinedForDecompressSol';
msg: 'DeCompressLamportsUndefinedForDecompressSol';
},
{
code: 6012;
name: 'DecompressRecipientUndefinedForDecompressSol';
msg: 'DecompressRecipientUndefinedForDecompressSol';
},
{
code: 6013;
name: 'WriteAccessCheckFailed';
msg: 'WriteAccessCheckFailed';
},
{
code: 6014;
name: 'InvokingProgramNotProvided';
msg: 'InvokingProgramNotProvided';
},
{
code: 6015;
name: 'InvalidCapacity';
msg: 'InvalidCapacity';
},
{
code: 6016;
name: 'InvalidMerkleTreeOwner';
msg: 'InvalidMerkleTreeOwner';
},
{
code: 6017;
name: 'ProofIsNone';
msg: 'ProofIsNone';
},
{
code: 6018;
name: 'ProofIsSome';
msg: 'Proof is some but no input compressed accounts or new addresses provided.';
},
{
code: 6019;
name: 'EmptyInputs';
msg: 'EmptyInputs';
},
{
code: 6020;
name: 'CpiContextAccountUndefined';
msg: 'CpiContextAccountUndefined';
},
{
code: 6021;
name: 'CpiContextEmpty';
msg: 'CpiContextEmpty';
},
{
code: 6022;
name: 'CpiContextMissing';
msg: 'CpiContextMissing';
},
{
code: 6023;
name: 'DecompressionRecipientDefined';
msg: 'DecompressionRecipientDefined';
},
{
code: 6024;
name: 'SolPoolPdaDefined';
msg: 'SolPoolPdaDefined';
},
{
code: 6025;
name: 'AppendStateFailed';
msg: 'AppendStateFailed';
},
{
code: 6026;
name: 'InstructionNotCallable';
msg: 'The instruction is not callable';
},
{
code: 6027;
name: 'CpiContextFeePayerMismatch';
msg: 'CpiContextFeePayerMismatch';
},
{
code: 6028;
name: 'CpiContextAssociatedMerkleTreeMismatch';
msg: 'CpiContextAssociatedMerkleTreeMismatch';
},
{
code: 6029;
name: 'NoInputs';
msg: 'NoInputs';
},
{
code: 6030;
name: 'InputMerkleTreeIndicesNotInOrder';
msg: 'Input merkle tree indices are not in ascending order.';
},
{
code: 6031;
name: 'OutputMerkleTreeIndicesNotInOrder';
msg: 'Output merkle tree indices are not in ascending order.';
},
{
code: 6032;
name: 'OutputMerkleTreeNotUnique';
},
{
code: 6033;
name: 'DataFieldUndefined';
},
];
};
export const IDL: LightSystemProgram = {
version: '1.2.0',
name: 'light_system_program',
constants: [
{
name: 'SOL_POOL_PDA_SEED',
type: 'bytes',
value: '[115, 111, 108, 95, 112, 111, 111, 108, 95, 112, 100, 97]',
},
],
instructions: [
{
name: 'initCpiContextAccount',
accounts: [
{
name: 'feePayer',
isMut: true,
isSigner: true,
},
{
name: 'cpiContextAccount',
isMut: true,
isSigner: false,
},
{
name: 'associatedMerkleTree',
isMut: false,
isSigner: false,
},
],
args: [],
},
{
name: 'invoke',
accounts: [
{
name: 'feePayer',
isMut: true,
isSigner: true,
docs: [
'Fee payer needs to be mutable to pay rollover and protocol fees.',
],
},
{
name: 'authority',
isMut: false,
isSigner: true,
},
{
name: 'registeredProgramPda',
isMut: false,
isSigner: false,
},
{
name: 'noopProgram',
isMut: false,
isSigner: false,
},
{
name: 'accountCompressionAuthority',
isMut: false,
isSigner: false,
docs: [
'This pda is used to invoke the account compression program.',
],
},
{
name: 'accountCompressionProgram',
isMut: false,
isSigner: false,
docs: ['Merkle trees.'],
},
{
name: 'solPoolPda',
isMut: true,
isSigner: false,
isOptional: true,
docs: [
'Sol pool pda is used to store the native sol that has been compressed.',
"It's only required when compressing or decompressing sol.",
],
},
{
name: 'decompressionRecipient',
isMut: true,
isSigner: false,
isOptional: true,
docs: [
'Only needs to be provided for decompression as a recipient for the',
'decompressed sol.',
'Compressed sol originate from authority.',
],
},
{
name: 'systemProgram',
isMut: false,
isSigner: false,
},
],
args: [
{
name: 'inputs',
type: 'bytes',
},
],
},
{
name: 'invokeCpi',
accounts: [
{
name: 'feePayer',
isMut: true,
isSigner: true,
docs: [
'Fee payer needs to be mutable to pay rollover and protocol fees.',
],
},
{
name: 'authority',
isMut: false,
isSigner: true,
},
{
name: 'registeredProgramPda',
isMut: false,
isSigner: false,
},
{
name: 'noopProgram',
isMut: false,
isSigner: false,
},
{
name: 'accountCompressionAuthority',
isMut: false,
isSigner: false,
},
{
name: 'accountCompressionProgram',
isMut: false,
isSigner: false,
},
{
name: 'invokingProgram',
isMut: false,
isSigner: false,
},
{
name: 'solPoolPda',
isMut: true,
isSigner: false,
isOptional: true,
},
{
name: 'decompressionRecipient',
isMut: true,
isSigner: false,
isOptional: true,
},
{
name: 'systemProgram',
isMut: false,
isSigner: false,
},
{
name: 'cpiContextAccount',
isMut: true,
isSigner: false,
isOptional: true,
},
],
args: [
{
name: 'inputs',
type: 'bytes',
},
],
},
{
name: 'stubIdlBuild',
docs: [
'This function is a stub to allow Anchor to include the input types in',
'the IDL. It should not be included in production builds nor be called in',
'practice.',
],
accounts: [
{
name: 'feePayer',
isMut: true,
isSigner: true,
docs: [
'Fee payer needs to be mutable to pay rollover and protocol fees.',
],
},
{
name: 'authority',
isMut: false,
isSigner: true,
},
{
name: 'registeredProgramPda',
isMut: false,
isSigner: false,
},
{
name: 'noopProgram',
isMut: false,
isSigner: false,
},
{
name: 'accountCompressionAuthority',
isMut: false,
isSigner: false,
docs: [
'This pda is used to invoke the account compression program.',
],
},
{
name: 'accountCompressionProgram',
isMut: false,
isSigner: false,
docs: ['Merkle trees.'],
},
{
name: 'solPoolPda',
isMut: true,
isSigner: false,
isOptional: true,
docs: [
'Sol pool pda is used to store the native sol that has been compressed.',
"It's only required when compressing or decompressing sol.",
],
},
{
name: 'decompressionRecipient',
isMut: true,
isSigner: false,
isOptional: true,
docs: [
'Only needs to be provided for decompression as a recipient for the',
'decompressed sol.',
'Compressed sol originate from authority.',
],
},
{
name: 'systemProgram',
isMut: false,
isSigner: false,
},
],
args: [
{
name: 'inputs1',
type: {
defined: 'InstructionDataInvoke',
},
},
{
name: 'inputs2',
type: {
defined: 'InstructionDataInvokeCpi',
},
},
{
name: 'inputs3',
type: {
defined: 'PublicTransactionEvent',
},
},
],
},
],
accounts: [
{
name: 'stateMerkleTreeAccount',
docs: [
'Concurrent state Merkle tree used for public compressed transactions.',
],
type: {
kind: 'struct',
fields: [
{
name: 'metadata',
type: {
defined: 'MerkleTreeMetadata',
},
},
],
},
},
{
name: 'cpiContextAccount',
docs: [
'Collects instruction data without executing a compressed transaction.',
'Signer checks are performed on instruction data.',
'Collected instruction data is combined with the instruction data of the executing cpi,',
'and executed as a single transaction.',
'This enables to use input compressed accounts that are owned by multiple programs,',
'with one zero-knowledge proof.',
],
type: {
kind: 'struct',
fields: [
{
name: 'feePayer',
type: 'publicKey',
},
{
name: 'associatedMerkleTree',
type: 'publicKey',
},
{
name: 'context',
type: {
vec: {
defined: 'InstructionDataInvokeCpi',
},
},
},
],
},
},
],
types: [
{
name: 'AccessMetadata',
type: {
kind: 'struct',
fields: [
{
name: 'owner',
docs: ['Owner of the Merkle tree.'],
type: 'publicKey',
},
{
name: 'programOwner',
docs: [
'Program owner of the Merkle tree. This will be used for program owned Merkle trees.',
],
type: 'publicKey',
},
{
name: 'forester',
docs: [
'Optional privileged forester pubkey, can be set for custom Merkle trees',
'without a network fee. Merkle trees without network fees are not',
'forested by light foresters. The variable is not used in the account',
'compression program but the registry program. The registry program',
'implements access control to prevent contention during forester. The',
'forester pubkey specified in this struct can bypass contention checks.',
],
type: 'publicKey',
},
],
},
},
{
name: 'MerkleTreeMetadata',
type: {
kind: 'struct',
fields: [
{
name: 'accessMetadata',
type: {
defined: 'AccessMetadata',
},
},
{
name: 'rolloverMetadata',
type: {
defined: 'RolloverMetadata',
},
},
{
name: 'associatedQueue',
type: 'publicKey',
},
{
name: 'nextMerkleTree',
type: 'publicKey',
},
],
},
},
{
name: 'RolloverMetadata',
type: {
kind: 'struct',
fields: [
{
name: 'index',
docs: ['Unique index.'],
type: 'u64',
},
{
name: 'rolloverFee',
docs: [
'This fee is used for rent for the next account.',
'It accumulates in the account so that once the corresponding Merkle tree account is full it can be rolled over',
],
type: 'u64',
},
{
name: 'rolloverThreshold',
docs: [
'The threshold in percentage points when the account should be rolled over (95 corresponds to 95% filled).',
],
type: 'u64',
},
{
name: 'networkFee',
docs: ['Tip for maintaining the account.'],
type: 'u64',
},
{
name: 'rolledoverSlot',
docs: [
'The slot when the account was rolled over, a rolled over account should not be written to.',
],
type: 'u64',
},
{
name: 'closeThreshold',
docs: [
'If current slot is greater than rolledover_slot + close_threshold and',
"the account is empty it can be closed. No 'close' functionality has been",
'implemented yet.',
],
type: 'u64',
},
{
name: 'additionalBytes',
docs: [
'Placeholder for bytes of additional accounts which are tied to the',
'Merkle trees operation and need to be rolled over as well.',
],
type: 'u64',
},
],
},
},
{
name: 'InstructionDataInvoke',
type: {
kind: 'struct',
fields: [
{
name: 'proof',
type: {
option: {
defined: 'CompressedProof',
},
},
},
{
name: 'inputCompressedAccountsWithMerkleContext',
type: {
vec: {
defined:
'PackedCompressedAccountWithMerkleContext',
},
},
},
{
name: 'outputCompressedAccounts',
type: {
vec: {
defined:
'OutputCompressedAccountWithPackedContext',
},
},
},
{
name: 'relayFee',
type: {
option: 'u64',
},
},
{
name: 'newAddressParams',
type: {
vec: {
defined: 'NewAddressParamsPacked',
},
},
},
{
name: 'compressOrDecompressLamports',
type: {
option: 'u64',
},
},
{
name: 'isCompress',
type: 'bool',
},
],
},
},
{
name: 'NewAddressParamsPacked',
type: {
kind: 'struct',
fields: [
{
name: 'seed',
type: {
array: ['u8', 32],
},
},
{
name: 'addressQueueAccountIndex',
type: 'u8',
},
{
name: 'addressMerkleTreeAccountIndex',
type: 'u8',
},
{
name: 'addressMerkleTreeRootIndex',
type: 'u16',
},
],
},
},
{
name: 'OutputCompressedAccountWithPackedContext',
type: {
kind: 'struct',
fields: [
{
name: 'compressedAccount',
type: {
defined: 'CompressedAccount',
},
},
{
name: 'merkleTreeIndex',
type: 'u8',
},
],
},
},
{
name: 'CompressedProof',
type: {
kind: 'struct',
fields: [
{
name: 'a',
type: {
array: ['u8', 32],
},
},
{
name: 'b',
type: {
array: ['u8', 64],
},
},
{
name: 'c',
type: {
array: ['u8', 32],
},
},
],
},
},
{
name: 'InstructionDataInvokeCpi',
type: {
kind: 'struct',
fields: [
{
name: 'proof',
type: {
option: {
defined: 'CompressedProof',
},
},
},
{
name: 'newAddressParams',
type: {
vec: {
defined: 'NewAddressParamsPacked',
},
},
},
{
name: 'inputCompressedAccountsWithMerkleContext',
type: {
vec: {
defined:
'PackedCompressedAccountWithMerkleContext',
},
},
},
{
name: 'outputCompressedAccounts',
type: {
vec: {
defined:
'OutputCompressedAccountWithPackedContext',
},
},
},
{
name: 'relayFee',
type: {
option: 'u64',
},
},
{
name: 'compressOrDecompressLamports',
type: {
option: 'u64',
},
},
{
name: 'isCompress',
type: 'bool',
},
{
name: 'cpiContext',
type: {
option: {
defined: 'CompressedCpiContext',
},
},
},
],
},
},
{
name: 'CompressedCpiContext',
type: {
kind: 'struct',
fields: [
{
name: 'setContext',
docs: [
'Is set by the program that is invoking the CPI to signal that is should',
'set the cpi context.',
],
type: 'bool',
},
{
name: 'firstSetContext',
docs: [
'Is set to wipe the cpi context since someone could have set it before',
'with unrelated data.',
],
type: 'bool',
},
{
name: 'cpiContextAccountIndex',
docs: [
'Index of cpi context account in remaining accounts.',
],
type: 'u8',
},
],
},
},
{
name: 'CompressedAccount',
type: {
kind: 'struct',
fields: [
{
name: 'owner',
type: 'publicKey',
},
{
name: 'lamports',
type: 'u64',
},
{
name: 'address',
type: {
option: {
array: ['u8', 32],
},
},
},
{
name: 'data',
type: {
option: {
defined: 'CompressedAccountData',
},
},
},
],
},
},
{
name: 'CompressedAccountData',
type: {
kind: 'struct',
fields: [
{
name: 'discriminator',
type: {
array: ['u8', 8],
},
},
{
name: 'data',
type: 'bytes',
},
{
name: 'dataHash',
type: {
array: ['u8', 32],
},
},
],
},
},
{
name: 'PackedCompressedAccountWithMerkleContext',
type: {
kind: 'struct',
fields: [
{
name: 'compressedAccount',
type: {
defined: 'CompressedAccount',
},
},
{
name: 'merkleContext',
type: {
defined: 'PackedMerkleContext',
},
},
{
name: 'rootIndex',
docs: [
'Index of root used in inclusion validity proof.',
],
type: 'u16',
},
{
name: 'readOnly',
docs: [
'Placeholder to mark accounts read-only unimplemented set to false.',
],
type: 'bool',
},
],
},
},
{
name: 'PackedMerkleContext',
type: {
kind: 'struct',
fields: [
{
name: 'merkleTreePubkeyIndex',
type: 'u8',
},
{
name: 'nullifierQueuePubkeyIndex',
type: 'u8',
},
{
name: 'leafIndex',
type: 'u32',
},
{
name: 'queueIndex',
docs: [
'Index of leaf in queue. Placeholder of batched Merkle tree updates',
'currently unimplemented.',
],
type: {
option: {
defined: 'QueueIndex',
},
},
},
],
},
},
{
name: 'QueueIndex',
type: {
kind: 'struct',
fields: [
{
name: 'queueId',
docs: ['Id of queue in queue account.'],
type: 'u8',
},
{
name: 'index',
docs: ['Index of compressed account hash in queue.'],
type: 'u16',
},
],
},
},
{
name: 'MerkleTreeSequenceNumber',
type: {
kind: 'struct',
fields: [
{
name: 'pubkey',
type: 'publicKey',
},
{
name: 'seq',
type: 'u64',
},
],
},
},
{
name: 'PublicTransactionEvent',
type: {
kind: 'struct',
fields: [
{
name: 'inputCompressedAccountHashes',
type: {
vec: {
array: ['u8', 32],
},
},
},
{
name: 'outputCompressedAccountHashes',
type: {
vec: {
array: ['u8', 32],
},
},
},
{
name: 'outputCompressedAccounts',
type: {
vec: {
defined:
'OutputCompressedAccountWithPackedContext',
},
},
},
{
name: 'outputLeafIndices',
type: {
vec: 'u32',
},
},
{
name: 'sequenceNumbers',
type: {
vec: {
defined: 'MerkleTreeSequenceNumber',
},
},
},
{
name: 'relayFee',
type: {
option: 'u64',
},
},
{
name: 'isCompress',
type: 'bool',
},
{
name: 'compressOrDecompressLamports',
type: {
option: 'u64',
},
},
{
name: 'pubkeyArray',
type: {
vec: 'publicKey',
},
},
{
name: 'message',
type: {
option: 'bytes',
},
},
],
},
},
],
errors: [
{
code: 6000,
name: 'SumCheckFailed',
msg: 'Sum check failed',
},
{
code: 6001,
name: 'SignerCheckFailed',
msg: 'Signer check failed',
},
{
code: 6002,
name: 'CpiSignerCheckFailed',
msg: 'Cpi signer check failed',
},
{
code: 6003,
name: 'ComputeInputSumFailed',
msg: 'Computing input sum failed.',
},
{
code: 6004,
name: 'ComputeOutputSumFailed',
msg: 'Computing output sum failed.',
},
{
code: 6005,
name: 'ComputeRpcSumFailed',
msg: 'Computing rpc sum failed.',
},
{
code: 6006,
name: 'InvalidAddress',
msg: 'InvalidAddress',
},
{
code: 6007,
name: 'DeriveAddressError',
msg: 'DeriveAddressError',
},
{
code: 6008,
name: 'CompressedSolPdaUndefinedForCompressSol',
msg: 'CompressedSolPdaUndefinedForCompressSol',
},
{
code: 6009,
name: 'DeCompressLamportsUndefinedForCompressSol',
msg: 'DeCompressLamportsUndefinedForCompressSol',
},
{
code: 6010,
name: 'CompressedSolPdaUndefinedForDecompressSol',
msg: 'CompressedSolPdaUndefinedForDecompressSol',
},
{
code: 6011,
name: 'DeCompressLamportsUndefinedForDecompressSol',
msg: 'DeCompressLamportsUndefinedForDecompressSol',
},
{
code: 6012,
name: 'DecompressRecipientUndefinedForDecompressSol',
msg: 'DecompressRecipientUndefinedForDecompressSol',
},
{
code: 6013,
name: 'WriteAccessCheckFailed',
msg: 'WriteAccessCheckFailed',
},
{
code: 6014,
name: 'InvokingProgramNotProvided',
msg: 'InvokingProgramNotProvided',
},
{
code: 6015,
name: 'InvalidCapacity',
msg: 'InvalidCapacity',
},
{
code: 6016,
name: 'InvalidMerkleTreeOwner',
msg: 'InvalidMerkleTreeOwner',
},
{
code: 6017,
name: 'ProofIsNone',
msg: 'ProofIsNone',
},
{
code: 6018,
name: 'ProofIsSome',
msg: 'Proof is some but no input compressed accounts or new addresses provided.',
},
{
code: 6019,
name: 'EmptyInputs',
msg: 'EmptyInputs',
},
{
code: 6020,
name: 'CpiContextAccountUndefined',
msg: 'CpiContextAccountUndefined',
},
{
code: 6021,
name: 'CpiContextEmpty',
msg: 'CpiContextEmpty',
},
{
code: 6022,
name: 'CpiContextMissing',
msg: 'CpiContextMissing',
},
{
code: 6023,
name: 'DecompressionRecipientDefined',
msg: 'DecompressionRecipientDefined',
},
{
code: 6024,
name: 'SolPoolPdaDefined',
msg: 'SolPoolPdaDefined',
},
{
code: 6025,
name: 'AppendStateFailed',
msg: 'AppendStateFailed',
},
{
code: 6026,
name: 'InstructionNotCallable',
msg: 'The instruction is not callable',
},
{
code: 6027,
name: 'CpiContextFeePayerMismatch',
msg: 'CpiContextFeePayerMismatch',
},
{
code: 6028,
name: 'CpiContextAssociatedMerkleTreeMismatch',
msg: 'CpiContextAssociatedMerkleTreeMismatch',
},
{
code: 6029,
name: 'NoInputs',
msg: 'NoInputs',
},
{
code: 6030,
name: 'InputMerkleTreeIndicesNotInOrder',
msg: 'Input merkle tree indices are not in ascending order.',
},
{
code: 6031,
name: 'OutputMerkleTreeIndicesNotInOrder',
msg: 'Output merkle tree indices are not in ascending order.',
},
{
code: 6032,
name: 'OutputMerkleTreeNotUnique',
},
{
code: 6033,
name: 'DataFieldUndefined',
},
],
};
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/idls/index.ts
|
import {
IDL as AccountCompressionIDL,
AccountCompression,
} from './account_compression';
import { IDL as LightRegistryIDL, LightRegistry } from './light_registry';
import {
IDL as LightSystemIDL,
LightSystemProgram as LightSystem,
} from './light_system_program';
import {
IDL as LightCompressedTokenIDL,
LightCompressedToken,
} from './light_compressed_token';
export {
AccountCompressionIDL,
AccountCompression,
LightRegistryIDL,
LightRegistry,
LightSystemIDL,
LightSystem,
LightCompressedTokenIDL,
LightCompressedToken,
};
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/idls/light_registry.ts
|
export type LightRegistry = {
version: '1.2.0';
name: 'light_registry';
constants: [
{
name: 'FORESTER_SEED';
type: 'bytes';
value: '[102, 111, 114, 101, 115, 116, 101, 114]';
},
{
name: 'FORESTER_EPOCH_SEED';
type: 'bytes';
value: '[102, 111, 114, 101, 115, 116, 101, 114, 95, 101, 112, 111, 99, 104]';
},
{
name: 'PROTOCOL_CONFIG_PDA_SEED';
type: 'bytes';
value: '[97, 117, 116, 104, 111, 114, 105, 116, 121]';
},
];
instructions: [
{
name: 'initializeProtocolConfig';
docs: [
'Initializes the protocol config pda. Can only be called once by the',
'program account keypair.',
];
accounts: [
{
name: 'feePayer';
isMut: true;
isSigner: true;
},
{
name: 'authority';
isMut: false;
isSigner: true;
},
{
name: 'protocolConfigPda';
isMut: true;
isSigner: false;
},
{
name: 'systemProgram';
isMut: false;
isSigner: false;
},
{
name: 'selfProgram';
isMut: false;
isSigner: false;
},
];
args: [
{
name: 'bump';
type: 'u8';
},
{
name: 'protocolConfig';
type: {
defined: 'ProtocolConfig';
};
},
];
},
{
name: 'updateProtocolConfig';
accounts: [
{
name: 'feePayer';
isMut: false;
isSigner: true;
},
{
name: 'authority';
isMut: false;
isSigner: true;
},
{
name: 'protocolConfigPda';
isMut: true;
isSigner: false;
},
{
name: 'newAuthority';
isMut: false;
isSigner: true;
isOptional: true;
},
];
args: [
{
name: 'protocolConfig';
type: {
option: {
defined: 'ProtocolConfig';
};
};
},
];
},
{
name: 'registerSystemProgram';
accounts: [
{
name: 'authority';
isMut: true;
isSigner: true;
},
{
name: 'protocolConfigPda';
isMut: true;
isSigner: false;
},
{
name: 'cpiAuthority';
isMut: true;
isSigner: false;
},
{
name: 'groupPda';
isMut: true;
isSigner: false;
},
{
name: 'accountCompressionProgram';
isMut: false;
isSigner: false;
},
{
name: 'systemProgram';
isMut: false;
isSigner: false;
},
{
name: 'registeredProgramPda';
isMut: true;
isSigner: false;
},
{
name: 'programToBeRegistered';
isMut: false;
isSigner: true;
docs: [
'- is signer so that only the program deployer can register a program.',
];
},
];
args: [
{
name: 'bump';
type: 'u8';
},
];
},
{
name: 'deregisterSystemProgram';
accounts: [
{
name: 'authority';
isMut: true;
isSigner: true;
},
{
name: 'protocolConfigPda';
isMut: true;
isSigner: false;
},
{
name: 'cpiAuthority';
isMut: true;
isSigner: false;
},
{
name: 'groupPda';
isMut: true;
isSigner: false;
},
{
name: 'accountCompressionProgram';
isMut: false;
isSigner: false;
},
{
name: 'registeredProgramPda';
isMut: true;
isSigner: false;
},
];
args: [
{
name: 'bump';
type: 'u8';
},
];
},
{
name: 'registerForester';
accounts: [
{
name: 'feePayer';
isMut: true;
isSigner: true;
},
{
name: 'authority';
isMut: false;
isSigner: true;
},
{
name: 'protocolConfigPda';
isMut: false;
isSigner: false;
},
{
name: 'foresterPda';
isMut: true;
isSigner: false;
},
{
name: 'systemProgram';
isMut: false;
isSigner: false;
},
];
args: [
{
name: 'bump';
type: 'u8';
},
{
name: 'authority';
type: 'publicKey';
},
{
name: 'config';
type: {
defined: 'ForesterConfig';
};
},
{
name: 'weight';
type: {
option: 'u64';
};
},
];
},
{
name: 'updateForesterPda';
accounts: [
{
name: 'authority';
isMut: false;
isSigner: true;
},
{
name: 'foresterPda';
isMut: true;
isSigner: false;
},
{
name: 'newAuthority';
isMut: false;
isSigner: true;
isOptional: true;
},
];
args: [
{
name: 'config';
type: {
option: {
defined: 'ForesterConfig';
};
};
},
];
},
{
name: 'updateForesterPdaWeight';
accounts: [
{
name: 'authority';
isMut: false;
isSigner: true;
},
{
name: 'protocolConfigPda';
isMut: false;
isSigner: false;
},
{
name: 'foresterPda';
isMut: true;
isSigner: false;
},
];
args: [
{
name: 'newWeight';
type: 'u64';
},
];
},
{
name: 'registerForesterEpoch';
docs: [
'Registers the forester for the epoch.',
'1. Only the forester can register herself for the epoch.',
'2. Protocol config is copied.',
'3. Epoch account is created if needed.',
];
accounts: [
{
name: 'feePayer';
isMut: true;
isSigner: true;
},
{
name: 'authority';
isMut: false;
isSigner: true;
},
{
name: 'foresterPda';
isMut: false;
isSigner: false;
},
{
name: 'foresterEpochPda';
isMut: true;
isSigner: false;
docs: [
'Instruction checks that current_epoch is the the current epoch and that',
'the epoch is in registration phase.',
];
},
{
name: 'protocolConfig';
isMut: false;
isSigner: false;
},
{
name: 'epochPda';
isMut: true;
isSigner: false;
},
{
name: 'systemProgram';
isMut: false;
isSigner: false;
},
];
args: [
{
name: 'epoch';
type: 'u64';
},
];
},
{
name: 'finalizeRegistration';
docs: [
'This transaction can be included as additional instruction in the first',
'work instructions during the active phase.',
'Registration Period must be over.',
];
accounts: [
{
name: 'authority';
isMut: false;
isSigner: true;
},
{
name: 'foresterEpochPda';
isMut: true;
isSigner: false;
},
{
name: 'epochPda';
isMut: false;
isSigner: false;
},
];
args: [];
},
{
name: 'reportWork';
accounts: [
{
name: 'authority';
isMut: false;
isSigner: true;
},
{
name: 'foresterEpochPda';
isMut: true;
isSigner: false;
},
{
name: 'epochPda';
isMut: true;
isSigner: false;
},
];
args: [];
},
{
name: 'initializeAddressMerkleTree';
accounts: [
{
name: 'authority';
isMut: true;
isSigner: true;
docs: [
'Anyone can create new trees just the fees cannot be set arbitrarily.',
];
},
{
name: 'merkleTree';
isMut: true;
isSigner: false;
},
{
name: 'queue';
isMut: true;
isSigner: false;
},
{
name: 'registeredProgramPda';
isMut: false;
isSigner: false;
},
{
name: 'cpiAuthority';
isMut: true;
isSigner: false;
},
{
name: 'accountCompressionProgram';
isMut: false;
isSigner: false;
},
{
name: 'protocolConfigPda';
isMut: false;
isSigner: false;
},
{
name: 'cpiContextAccount';
isMut: false;
isSigner: false;
isOptional: true;
},
{
name: 'lightSystemProgram';
isMut: false;
isSigner: false;
isOptional: true;
},
];
args: [
{
name: 'bump';
type: 'u8';
},
{
name: 'programOwner';
type: {
option: 'publicKey';
};
},
{
name: 'forester';
type: {
option: 'publicKey';
};
},
{
name: 'merkleTreeConfig';
type: {
defined: 'AddressMerkleTreeConfig';
};
},
{
name: 'queueConfig';
type: {
defined: 'AddressQueueConfig';
};
},
];
},
{
name: 'initializeStateMerkleTree';
accounts: [
{
name: 'authority';
isMut: true;
isSigner: true;
docs: [
'Anyone can create new trees just the fees cannot be set arbitrarily.',
];
},
{
name: 'merkleTree';
isMut: true;
isSigner: false;
},
{
name: 'queue';
isMut: true;
isSigner: false;
},
{
name: 'registeredProgramPda';
isMut: false;
isSigner: false;
},
{
name: 'cpiAuthority';
isMut: true;
isSigner: false;
},
{
name: 'accountCompressionProgram';
isMut: false;
isSigner: false;
},
{
name: 'protocolConfigPda';
isMut: false;
isSigner: false;
},
{
name: 'cpiContextAccount';
isMut: false;
isSigner: false;
isOptional: true;
},
{
name: 'lightSystemProgram';
isMut: false;
isSigner: false;
isOptional: true;
},
];
args: [
{
name: 'bump';
type: 'u8';
},
{
name: 'programOwner';
type: {
option: 'publicKey';
};
},
{
name: 'forester';
type: {
option: 'publicKey';
};
},
{
name: 'merkleTreeConfig';
type: {
defined: 'StateMerkleTreeConfig';
};
},
{
name: 'queueConfig';
type: {
defined: 'NullifierQueueConfig';
};
},
];
},
{
name: 'nullify';
accounts: [
{
name: 'registeredForesterPda';
isMut: true;
isSigner: false;
isOptional: true;
},
{
name: 'authority';
isMut: false;
isSigner: true;
},
{
name: 'cpiAuthority';
isMut: false;
isSigner: false;
},
{
name: 'registeredProgramPda';
isMut: false;
isSigner: false;
},
{
name: 'accountCompressionProgram';
isMut: false;
isSigner: false;
},
{
name: 'logWrapper';
isMut: false;
isSigner: false;
},
{
name: 'merkleTree';
isMut: true;
isSigner: false;
},
{
name: 'nullifierQueue';
isMut: true;
isSigner: false;
},
];
args: [
{
name: 'bump';
type: 'u8';
},
{
name: 'changeLogIndices';
type: {
vec: 'u64';
};
},
{
name: 'leavesQueueIndices';
type: {
vec: 'u16';
};
},
{
name: 'indices';
type: {
vec: 'u64';
};
},
{
name: 'proofs';
type: {
vec: {
vec: {
array: ['u8', 32];
};
};
};
},
];
},
{
name: 'updateAddressMerkleTree';
accounts: [
{
name: 'registeredForesterPda';
isMut: true;
isSigner: false;
isOptional: true;
},
{
name: 'authority';
isMut: false;
isSigner: true;
},
{
name: 'cpiAuthority';
isMut: false;
isSigner: false;
},
{
name: 'registeredProgramPda';
isMut: false;
isSigner: false;
},
{
name: 'accountCompressionProgram';
isMut: false;
isSigner: false;
},
{
name: 'queue';
isMut: true;
isSigner: false;
},
{
name: 'merkleTree';
isMut: true;
isSigner: false;
},
{
name: 'logWrapper';
isMut: false;
isSigner: false;
},
];
args: [
{
name: 'bump';
type: 'u8';
},
{
name: 'changelogIndex';
type: 'u16';
},
{
name: 'indexedChangelogIndex';
type: 'u16';
},
{
name: 'value';
type: 'u16';
},
{
name: 'lowAddressIndex';
type: 'u64';
},
{
name: 'lowAddressValue';
type: {
array: ['u8', 32];
};
},
{
name: 'lowAddressNextIndex';
type: 'u64';
},
{
name: 'lowAddressNextValue';
type: {
array: ['u8', 32];
};
},
{
name: 'lowAddressProof';
type: {
array: [
{
array: ['u8', 32];
},
16,
];
};
},
];
},
{
name: 'rolloverAddressMerkleTreeAndQueue';
accounts: [
{
name: 'registeredForesterPda';
isMut: true;
isSigner: false;
isOptional: true;
},
{
name: 'authority';
isMut: true;
isSigner: true;
},
{
name: 'cpiAuthority';
isMut: false;
isSigner: false;
},
{
name: 'registeredProgramPda';
isMut: false;
isSigner: false;
},
{
name: 'accountCompressionProgram';
isMut: false;
isSigner: false;
},
{
name: 'newMerkleTree';
isMut: true;
isSigner: false;
},
{
name: 'newQueue';
isMut: true;
isSigner: false;
},
{
name: 'oldMerkleTree';
isMut: true;
isSigner: false;
},
{
name: 'oldQueue';
isMut: true;
isSigner: false;
},
];
args: [
{
name: 'bump';
type: 'u8';
},
];
},
{
name: 'rolloverStateMerkleTreeAndQueue';
accounts: [
{
name: 'registeredForesterPda';
isMut: true;
isSigner: false;
isOptional: true;
},
{
name: 'authority';
isMut: true;
isSigner: true;
},
{
name: 'cpiAuthority';
isMut: false;
isSigner: false;
},
{
name: 'registeredProgramPda';
isMut: false;
isSigner: false;
},
{
name: 'accountCompressionProgram';
isMut: false;
isSigner: false;
},
{
name: 'newMerkleTree';
isMut: true;
isSigner: false;
},
{
name: 'newQueue';
isMut: true;
isSigner: false;
},
{
name: 'oldMerkleTree';
isMut: true;
isSigner: false;
},
{
name: 'oldQueue';
isMut: true;
isSigner: false;
},
{
name: 'cpiContextAccount';
isMut: false;
isSigner: false;
},
{
name: 'lightSystemProgram';
isMut: false;
isSigner: false;
},
{
name: 'protocolConfigPda';
isMut: false;
isSigner: false;
},
];
args: [
{
name: 'bump';
type: 'u8';
},
];
},
];
accounts: [
{
name: 'epochPda';
docs: ['Is used for tallying and rewards calculation'];
type: {
kind: 'struct';
fields: [
{
name: 'epoch';
type: 'u64';
},
{
name: 'protocolConfig';
type: {
defined: 'ProtocolConfig';
};
},
{
name: 'totalWork';
type: 'u64';
},
{
name: 'registeredWeight';
type: 'u64';
},
];
};
},
{
name: 'foresterEpochPda';
type: {
kind: 'struct';
fields: [
{
name: 'authority';
type: 'publicKey';
},
{
name: 'config';
type: {
defined: 'ForesterConfig';
};
},
{
name: 'epoch';
type: 'u64';
},
{
name: 'weight';
type: 'u64';
},
{
name: 'workCounter';
type: 'u64';
},
{
name: 'hasReportedWork';
docs: [
'Work can be reported in an extra round to earn extra performance based',
'rewards.',
];
type: 'bool';
},
{
name: 'foresterIndex';
docs: [
'Start index of the range that determines when the forester is eligible to perform work.',
'End index is forester_start_index + weight',
];
type: 'u64';
},
{
name: 'epochActivePhaseStartSlot';
type: 'u64';
},
{
name: 'totalEpochWeight';
docs: [
'Total epoch weight is registered weight of the epoch account after',
'registration is concluded and active epoch period starts.',
];
type: {
option: 'u64';
};
},
{
name: 'protocolConfig';
type: {
defined: 'ProtocolConfig';
};
},
{
name: 'finalizeCounter';
docs: [
'Incremented every time finalize registration is called.',
];
type: 'u64';
},
];
};
},
{
name: 'protocolConfigPda';
type: {
kind: 'struct';
fields: [
{
name: 'authority';
type: 'publicKey';
},
{
name: 'bump';
type: 'u8';
},
{
name: 'config';
type: {
defined: 'ProtocolConfig';
};
},
];
};
},
{
name: 'foresterPda';
type: {
kind: 'struct';
fields: [
{
name: 'authority';
type: 'publicKey';
},
{
name: 'config';
type: {
defined: 'ForesterConfig';
};
},
{
name: 'activeWeight';
type: 'u64';
},
{
name: 'pendingWeight';
docs: [
'Pending weight which will get active once the next epoch starts.',
];
type: 'u64';
},
{
name: 'currentEpoch';
type: 'u64';
},
{
name: 'lastCompressedForesterEpochPdaHash';
docs: [
'Link to previous compressed forester epoch account hash.',
];
type: {
array: ['u8', 32];
};
},
{
name: 'lastRegisteredEpoch';
type: 'u64';
},
];
};
},
];
types: [
{
name: 'ProtocolConfig';
docs: [
'Epoch Phases:',
'1. Registration',
'2. Active',
'3. Report Work',
'4. Post (Epoch has ended, and rewards can be claimed.)',
'- There is always an active phase in progress, registration and report work',
'phases run in parallel to a currently active phase.',
];
type: {
kind: 'struct';
fields: [
{
name: 'genesisSlot';
docs: [
'Solana slot when the protocol starts operating.',
];
type: 'u64';
},
{
name: 'minWeight';
docs: [
'Minimum weight required for a forester to register to an epoch.',
];
type: 'u64';
},
{
name: 'slotLength';
docs: ['Light protocol slot length.'];
type: 'u64';
},
{
name: 'registrationPhaseLength';
docs: ['Foresters can register for this phase.'];
type: 'u64';
},
{
name: 'activePhaseLength';
docs: ['Foresters can perform work in this phase.'];
type: 'u64';
},
{
name: 'reportWorkPhaseLength';
docs: [
'Foresters can report work to receive performance based rewards in this',
'phase.',
];
type: 'u64';
},
{
name: 'networkFee';
type: 'u64';
},
{
name: 'cpiContextSize';
type: 'u64';
},
{
name: 'finalizeCounterLimit';
type: 'u64';
},
{
name: 'placeHolder';
docs: ['Placeholder for future protocol updates.'];
type: 'publicKey';
},
{
name: 'placeHolderA';
type: 'u64';
},
{
name: 'placeHolderB';
type: 'u64';
},
{
name: 'placeHolderC';
type: 'u64';
},
{
name: 'placeHolderD';
type: 'u64';
},
{
name: 'placeHolderE';
type: 'u64';
},
{
name: 'placeHolderF';
type: 'u64';
},
];
};
},
{
name: 'ForesterConfig';
type: {
kind: 'struct';
fields: [
{
name: 'fee';
docs: ['Fee in percentage points.'];
type: 'u64';
},
];
};
},
{
name: 'EpochState';
type: {
kind: 'enum';
variants: [
{
name: 'Registration';
},
{
name: 'Active';
},
{
name: 'ReportWork';
},
{
name: 'Post';
},
{
name: 'Pre';
},
];
};
},
];
errors: [
{
code: 6000;
name: 'InvalidForester';
msg: 'InvalidForester';
},
{
code: 6001;
name: 'NotInReportWorkPhase';
},
{
code: 6002;
name: 'StakeAccountAlreadySynced';
},
{
code: 6003;
name: 'EpochEnded';
},
{
code: 6004;
name: 'ForesterNotEligible';
},
{
code: 6005;
name: 'NotInRegistrationPeriod';
},
{
code: 6006;
name: 'WeightInsuffient';
},
{
code: 6007;
name: 'ForesterAlreadyRegistered';
},
{
code: 6008;
name: 'InvalidEpochAccount';
},
{
code: 6009;
name: 'InvalidEpoch';
},
{
code: 6010;
name: 'EpochStillInProgress';
},
{
code: 6011;
name: 'NotInActivePhase';
},
{
code: 6012;
name: 'ForesterAlreadyReportedWork';
},
{
code: 6013;
name: 'InvalidNetworkFee';
},
{
code: 6014;
name: 'FinalizeCounterExceeded';
},
{
code: 6015;
name: 'CpiContextAccountMissing';
},
{
code: 6016;
name: 'ArithmeticUnderflow';
},
{
code: 6017;
name: 'RegistrationNotFinalized';
},
{
code: 6018;
name: 'CpiContextAccountInvalidDataLen';
},
{
code: 6019;
name: 'InvalidConfigUpdate';
},
{
code: 6020;
name: 'InvalidSigner';
},
{
code: 6021;
name: 'GetLatestRegisterEpochFailed';
},
{
code: 6022;
name: 'GetCurrentActiveEpochFailed';
},
{
code: 6023;
name: 'ForesterUndefined';
},
{
code: 6024;
name: 'ForesterDefined';
},
];
};
export const IDL: LightRegistry = {
version: '1.2.0',
name: 'light_registry',
constants: [
{
name: 'FORESTER_SEED',
type: 'bytes',
value: '[102, 111, 114, 101, 115, 116, 101, 114]',
},
{
name: 'FORESTER_EPOCH_SEED',
type: 'bytes',
value: '[102, 111, 114, 101, 115, 116, 101, 114, 95, 101, 112, 111, 99, 104]',
},
{
name: 'PROTOCOL_CONFIG_PDA_SEED',
type: 'bytes',
value: '[97, 117, 116, 104, 111, 114, 105, 116, 121]',
},
],
instructions: [
{
name: 'initializeProtocolConfig',
docs: [
'Initializes the protocol config pda. Can only be called once by the',
'program account keypair.',
],
accounts: [
{
name: 'feePayer',
isMut: true,
isSigner: true,
},
{
name: 'authority',
isMut: false,
isSigner: true,
},
{
name: 'protocolConfigPda',
isMut: true,
isSigner: false,
},
{
name: 'systemProgram',
isMut: false,
isSigner: false,
},
{
name: 'selfProgram',
isMut: false,
isSigner: false,
},
],
args: [
{
name: 'bump',
type: 'u8',
},
{
name: 'protocolConfig',
type: {
defined: 'ProtocolConfig',
},
},
],
},
{
name: 'updateProtocolConfig',
accounts: [
{
name: 'feePayer',
isMut: false,
isSigner: true,
},
{
name: 'authority',
isMut: false,
isSigner: true,
},
{
name: 'protocolConfigPda',
isMut: true,
isSigner: false,
},
{
name: 'newAuthority',
isMut: false,
isSigner: true,
isOptional: true,
},
],
args: [
{
name: 'protocolConfig',
type: {
option: {
defined: 'ProtocolConfig',
},
},
},
],
},
{
name: 'registerSystemProgram',
accounts: [
{
name: 'authority',
isMut: true,
isSigner: true,
},
{
name: 'protocolConfigPda',
isMut: true,
isSigner: false,
},
{
name: 'cpiAuthority',
isMut: true,
isSigner: false,
},
{
name: 'groupPda',
isMut: true,
isSigner: false,
},
{
name: 'accountCompressionProgram',
isMut: false,
isSigner: false,
},
{
name: 'systemProgram',
isMut: false,
isSigner: false,
},
{
name: 'registeredProgramPda',
isMut: true,
isSigner: false,
},
{
name: 'programToBeRegistered',
isMut: false,
isSigner: true,
docs: [
'- is signer so that only the program deployer can register a program.',
],
},
],
args: [
{
name: 'bump',
type: 'u8',
},
],
},
{
name: 'deregisterSystemProgram',
accounts: [
{
name: 'authority',
isMut: true,
isSigner: true,
},
{
name: 'protocolConfigPda',
isMut: true,
isSigner: false,
},
{
name: 'cpiAuthority',
isMut: true,
isSigner: false,
},
{
name: 'groupPda',
isMut: true,
isSigner: false,
},
{
name: 'accountCompressionProgram',
isMut: false,
isSigner: false,
},
{
name: 'registeredProgramPda',
isMut: true,
isSigner: false,
},
],
args: [
{
name: 'bump',
type: 'u8',
},
],
},
{
name: 'registerForester',
accounts: [
{
name: 'feePayer',
isMut: true,
isSigner: true,
},
{
name: 'authority',
isMut: false,
isSigner: true,
},
{
name: 'protocolConfigPda',
isMut: false,
isSigner: false,
},
{
name: 'foresterPda',
isMut: true,
isSigner: false,
},
{
name: 'systemProgram',
isMut: false,
isSigner: false,
},
],
args: [
{
name: 'bump',
type: 'u8',
},
{
name: 'authority',
type: 'publicKey',
},
{
name: 'config',
type: {
defined: 'ForesterConfig',
},
},
{
name: 'weight',
type: {
option: 'u64',
},
},
],
},
{
name: 'updateForesterPda',
accounts: [
{
name: 'authority',
isMut: false,
isSigner: true,
},
{
name: 'foresterPda',
isMut: true,
isSigner: false,
},
{
name: 'newAuthority',
isMut: false,
isSigner: true,
isOptional: true,
},
],
args: [
{
name: 'config',
type: {
option: {
defined: 'ForesterConfig',
},
},
},
],
},
{
name: 'updateForesterPdaWeight',
accounts: [
{
name: 'authority',
isMut: false,
isSigner: true,
},
{
name: 'protocolConfigPda',
isMut: false,
isSigner: false,
},
{
name: 'foresterPda',
isMut: true,
isSigner: false,
},
],
args: [
{
name: 'newWeight',
type: 'u64',
},
],
},
{
name: 'registerForesterEpoch',
docs: [
'Registers the forester for the epoch.',
'1. Only the forester can register herself for the epoch.',
'2. Protocol config is copied.',
'3. Epoch account is created if needed.',
],
accounts: [
{
name: 'feePayer',
isMut: true,
isSigner: true,
},
{
name: 'authority',
isMut: false,
isSigner: true,
},
{
name: 'foresterPda',
isMut: false,
isSigner: false,
},
{
name: 'foresterEpochPda',
isMut: true,
isSigner: false,
docs: [
'Instruction checks that current_epoch is the the current epoch and that',
'the epoch is in registration phase.',
],
},
{
name: 'protocolConfig',
isMut: false,
isSigner: false,
},
{
name: 'epochPda',
isMut: true,
isSigner: false,
},
{
name: 'systemProgram',
isMut: false,
isSigner: false,
},
],
args: [
{
name: 'epoch',
type: 'u64',
},
],
},
{
name: 'finalizeRegistration',
docs: [
'This transaction can be included as additional instruction in the first',
'work instructions during the active phase.',
'Registration Period must be over.',
],
accounts: [
{
name: 'authority',
isMut: false,
isSigner: true,
},
{
name: 'foresterEpochPda',
isMut: true,
isSigner: false,
},
{
name: 'epochPda',
isMut: false,
isSigner: false,
},
],
args: [],
},
{
name: 'reportWork',
accounts: [
{
name: 'authority',
isMut: false,
isSigner: true,
},
{
name: 'foresterEpochPda',
isMut: true,
isSigner: false,
},
{
name: 'epochPda',
isMut: true,
isSigner: false,
},
],
args: [],
},
{
name: 'initializeAddressMerkleTree',
accounts: [
{
name: 'authority',
isMut: true,
isSigner: true,
docs: [
'Anyone can create new trees just the fees cannot be set arbitrarily.',
],
},
{
name: 'merkleTree',
isMut: true,
isSigner: false,
},
{
name: 'queue',
isMut: true,
isSigner: false,
},
{
name: 'registeredProgramPda',
isMut: false,
isSigner: false,
},
{
name: 'cpiAuthority',
isMut: true,
isSigner: false,
},
{
name: 'accountCompressionProgram',
isMut: false,
isSigner: false,
},
{
name: 'protocolConfigPda',
isMut: false,
isSigner: false,
},
{
name: 'cpiContextAccount',
isMut: false,
isSigner: false,
isOptional: true,
},
{
name: 'lightSystemProgram',
isMut: false,
isSigner: false,
isOptional: true,
},
],
args: [
{
name: 'bump',
type: 'u8',
},
{
name: 'programOwner',
type: {
option: 'publicKey',
},
},
{
name: 'forester',
type: {
option: 'publicKey',
},
},
{
name: 'merkleTreeConfig',
type: {
defined: 'AddressMerkleTreeConfig',
},
},
{
name: 'queueConfig',
type: {
defined: 'AddressQueueConfig',
},
},
],
},
{
name: 'initializeStateMerkleTree',
accounts: [
{
name: 'authority',
isMut: true,
isSigner: true,
docs: [
'Anyone can create new trees just the fees cannot be set arbitrarily.',
],
},
{
name: 'merkleTree',
isMut: true,
isSigner: false,
},
{
name: 'queue',
isMut: true,
isSigner: false,
},
{
name: 'registeredProgramPda',
isMut: false,
isSigner: false,
},
{
name: 'cpiAuthority',
isMut: true,
isSigner: false,
},
{
name: 'accountCompressionProgram',
isMut: false,
isSigner: false,
},
{
name: 'protocolConfigPda',
isMut: false,
isSigner: false,
},
{
name: 'cpiContextAccount',
isMut: false,
isSigner: false,
isOptional: true,
},
{
name: 'lightSystemProgram',
isMut: false,
isSigner: false,
isOptional: true,
},
],
args: [
{
name: 'bump',
type: 'u8',
},
{
name: 'programOwner',
type: {
option: 'publicKey',
},
},
{
name: 'forester',
type: {
option: 'publicKey',
},
},
{
name: 'merkleTreeConfig',
type: {
defined: 'StateMerkleTreeConfig',
},
},
{
name: 'queueConfig',
type: {
defined: 'NullifierQueueConfig',
},
},
],
},
{
name: 'nullify',
accounts: [
{
name: 'registeredForesterPda',
isMut: true,
isSigner: false,
isOptional: true,
},
{
name: 'authority',
isMut: false,
isSigner: true,
},
{
name: 'cpiAuthority',
isMut: false,
isSigner: false,
},
{
name: 'registeredProgramPda',
isMut: false,
isSigner: false,
},
{
name: 'accountCompressionProgram',
isMut: false,
isSigner: false,
},
{
name: 'logWrapper',
isMut: false,
isSigner: false,
},
{
name: 'merkleTree',
isMut: true,
isSigner: false,
},
{
name: 'nullifierQueue',
isMut: true,
isSigner: false,
},
],
args: [
{
name: 'bump',
type: 'u8',
},
{
name: 'changeLogIndices',
type: {
vec: 'u64',
},
},
{
name: 'leavesQueueIndices',
type: {
vec: 'u16',
},
},
{
name: 'indices',
type: {
vec: 'u64',
},
},
{
name: 'proofs',
type: {
vec: {
vec: {
array: ['u8', 32],
},
},
},
},
],
},
{
name: 'updateAddressMerkleTree',
accounts: [
{
name: 'registeredForesterPda',
isMut: true,
isSigner: false,
isOptional: true,
},
{
name: 'authority',
isMut: false,
isSigner: true,
},
{
name: 'cpiAuthority',
isMut: false,
isSigner: false,
},
{
name: 'registeredProgramPda',
isMut: false,
isSigner: false,
},
{
name: 'accountCompressionProgram',
isMut: false,
isSigner: false,
},
{
name: 'queue',
isMut: true,
isSigner: false,
},
{
name: 'merkleTree',
isMut: true,
isSigner: false,
},
{
name: 'logWrapper',
isMut: false,
isSigner: false,
},
],
args: [
{
name: 'bump',
type: 'u8',
},
{
name: 'changelogIndex',
type: 'u16',
},
{
name: 'indexedChangelogIndex',
type: 'u16',
},
{
name: 'value',
type: 'u16',
},
{
name: 'lowAddressIndex',
type: 'u64',
},
{
name: 'lowAddressValue',
type: {
array: ['u8', 32],
},
},
{
name: 'lowAddressNextIndex',
type: 'u64',
},
{
name: 'lowAddressNextValue',
type: {
array: ['u8', 32],
},
},
{
name: 'lowAddressProof',
type: {
array: [
{
array: ['u8', 32],
},
16,
],
},
},
],
},
{
name: 'rolloverAddressMerkleTreeAndQueue',
accounts: [
{
name: 'registeredForesterPda',
isMut: true,
isSigner: false,
isOptional: true,
},
{
name: 'authority',
isMut: true,
isSigner: true,
},
{
name: 'cpiAuthority',
isMut: false,
isSigner: false,
},
{
name: 'registeredProgramPda',
isMut: false,
isSigner: false,
},
{
name: 'accountCompressionProgram',
isMut: false,
isSigner: false,
},
{
name: 'newMerkleTree',
isMut: true,
isSigner: false,
},
{
name: 'newQueue',
isMut: true,
isSigner: false,
},
{
name: 'oldMerkleTree',
isMut: true,
isSigner: false,
},
{
name: 'oldQueue',
isMut: true,
isSigner: false,
},
],
args: [
{
name: 'bump',
type: 'u8',
},
],
},
{
name: 'rolloverStateMerkleTreeAndQueue',
accounts: [
{
name: 'registeredForesterPda',
isMut: true,
isSigner: false,
isOptional: true,
},
{
name: 'authority',
isMut: true,
isSigner: true,
},
{
name: 'cpiAuthority',
isMut: false,
isSigner: false,
},
{
name: 'registeredProgramPda',
isMut: false,
isSigner: false,
},
{
name: 'accountCompressionProgram',
isMut: false,
isSigner: false,
},
{
name: 'newMerkleTree',
isMut: true,
isSigner: false,
},
{
name: 'newQueue',
isMut: true,
isSigner: false,
},
{
name: 'oldMerkleTree',
isMut: true,
isSigner: false,
},
{
name: 'oldQueue',
isMut: true,
isSigner: false,
},
{
name: 'cpiContextAccount',
isMut: false,
isSigner: false,
},
{
name: 'lightSystemProgram',
isMut: false,
isSigner: false,
},
{
name: 'protocolConfigPda',
isMut: false,
isSigner: false,
},
],
args: [
{
name: 'bump',
type: 'u8',
},
],
},
],
accounts: [
{
name: 'epochPda',
docs: ['Is used for tallying and rewards calculation'],
type: {
kind: 'struct',
fields: [
{
name: 'epoch',
type: 'u64',
},
{
name: 'protocolConfig',
type: {
defined: 'ProtocolConfig',
},
},
{
name: 'totalWork',
type: 'u64',
},
{
name: 'registeredWeight',
type: 'u64',
},
],
},
},
{
name: 'foresterEpochPda',
type: {
kind: 'struct',
fields: [
{
name: 'authority',
type: 'publicKey',
},
{
name: 'config',
type: {
defined: 'ForesterConfig',
},
},
{
name: 'epoch',
type: 'u64',
},
{
name: 'weight',
type: 'u64',
},
{
name: 'workCounter',
type: 'u64',
},
{
name: 'hasReportedWork',
docs: [
'Work can be reported in an extra round to earn extra performance based',
'rewards.',
],
type: 'bool',
},
{
name: 'foresterIndex',
docs: [
'Start index of the range that determines when the forester is eligible to perform work.',
'End index is forester_start_index + weight',
],
type: 'u64',
},
{
name: 'epochActivePhaseStartSlot',
type: 'u64',
},
{
name: 'totalEpochWeight',
docs: [
'Total epoch weight is registered weight of the epoch account after',
'registration is concluded and active epoch period starts.',
],
type: {
option: 'u64',
},
},
{
name: 'protocolConfig',
type: {
defined: 'ProtocolConfig',
},
},
{
name: 'finalizeCounter',
docs: [
'Incremented every time finalize registration is called.',
],
type: 'u64',
},
],
},
},
{
name: 'protocolConfigPda',
type: {
kind: 'struct',
fields: [
{
name: 'authority',
type: 'publicKey',
},
{
name: 'bump',
type: 'u8',
},
{
name: 'config',
type: {
defined: 'ProtocolConfig',
},
},
],
},
},
{
name: 'foresterPda',
type: {
kind: 'struct',
fields: [
{
name: 'authority',
type: 'publicKey',
},
{
name: 'config',
type: {
defined: 'ForesterConfig',
},
},
{
name: 'activeWeight',
type: 'u64',
},
{
name: 'pendingWeight',
docs: [
'Pending weight which will get active once the next epoch starts.',
],
type: 'u64',
},
{
name: 'currentEpoch',
type: 'u64',
},
{
name: 'lastCompressedForesterEpochPdaHash',
docs: [
'Link to previous compressed forester epoch account hash.',
],
type: {
array: ['u8', 32],
},
},
{
name: 'lastRegisteredEpoch',
type: 'u64',
},
],
},
},
],
types: [
{
name: 'ProtocolConfig',
docs: [
'Epoch Phases:',
'1. Registration',
'2. Active',
'3. Report Work',
'4. Post (Epoch has ended, and rewards can be claimed.)',
'- There is always an active phase in progress, registration and report work',
'phases run in parallel to a currently active phase.',
],
type: {
kind: 'struct',
fields: [
{
name: 'genesisSlot',
docs: [
'Solana slot when the protocol starts operating.',
],
type: 'u64',
},
{
name: 'minWeight',
docs: [
'Minimum weight required for a forester to register to an epoch.',
],
type: 'u64',
},
{
name: 'slotLength',
docs: ['Light protocol slot length.'],
type: 'u64',
},
{
name: 'registrationPhaseLength',
docs: ['Foresters can register for this phase.'],
type: 'u64',
},
{
name: 'activePhaseLength',
docs: ['Foresters can perform work in this phase.'],
type: 'u64',
},
{
name: 'reportWorkPhaseLength',
docs: [
'Foresters can report work to receive performance based rewards in this',
'phase.',
],
type: 'u64',
},
{
name: 'networkFee',
type: 'u64',
},
{
name: 'cpiContextSize',
type: 'u64',
},
{
name: 'finalizeCounterLimit',
type: 'u64',
},
{
name: 'placeHolder',
docs: ['Placeholder for future protocol updates.'],
type: 'publicKey',
},
{
name: 'placeHolderA',
type: 'u64',
},
{
name: 'placeHolderB',
type: 'u64',
},
{
name: 'placeHolderC',
type: 'u64',
},
{
name: 'placeHolderD',
type: 'u64',
},
{
name: 'placeHolderE',
type: 'u64',
},
{
name: 'placeHolderF',
type: 'u64',
},
],
},
},
{
name: 'ForesterConfig',
type: {
kind: 'struct',
fields: [
{
name: 'fee',
docs: ['Fee in percentage points.'],
type: 'u64',
},
],
},
},
{
name: 'EpochState',
type: {
kind: 'enum',
variants: [
{
name: 'Registration',
},
{
name: 'Active',
},
{
name: 'ReportWork',
},
{
name: 'Post',
},
{
name: 'Pre',
},
],
},
},
],
errors: [
{
code: 6000,
name: 'InvalidForester',
msg: 'InvalidForester',
},
{
code: 6001,
name: 'NotInReportWorkPhase',
},
{
code: 6002,
name: 'StakeAccountAlreadySynced',
},
{
code: 6003,
name: 'EpochEnded',
},
{
code: 6004,
name: 'ForesterNotEligible',
},
{
code: 6005,
name: 'NotInRegistrationPeriod',
},
{
code: 6006,
name: 'WeightInsuffient',
},
{
code: 6007,
name: 'ForesterAlreadyRegistered',
},
{
code: 6008,
name: 'InvalidEpochAccount',
},
{
code: 6009,
name: 'InvalidEpoch',
},
{
code: 6010,
name: 'EpochStillInProgress',
},
{
code: 6011,
name: 'NotInActivePhase',
},
{
code: 6012,
name: 'ForesterAlreadyReportedWork',
},
{
code: 6013,
name: 'InvalidNetworkFee',
},
{
code: 6014,
name: 'FinalizeCounterExceeded',
},
{
code: 6015,
name: 'CpiContextAccountMissing',
},
{
code: 6016,
name: 'ArithmeticUnderflow',
},
{
code: 6017,
name: 'RegistrationNotFinalized',
},
{
code: 6018,
name: 'CpiContextAccountInvalidDataLen',
},
{
code: 6019,
name: 'InvalidConfigUpdate',
},
{
code: 6020,
name: 'InvalidSigner',
},
{
code: 6021,
name: 'GetLatestRegisterEpochFailed',
},
{
code: 6022,
name: 'GetCurrentActiveEpochFailed',
},
{
code: 6023,
name: 'ForesterUndefined',
},
{
code: 6024,
name: 'ForesterDefined',
},
],
};
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/idls/light_compressed_token.ts
|
export type LightCompressedToken = {
version: '1.2.0';
name: 'light_compressed_token';
instructions: [
{
name: 'createTokenPool';
docs: [
'This instruction creates a token pool for a given mint. Every spl mint',
'can have one token pool. When a token is compressed the tokens are',
'transferrred to the token pool, and their compressed equivalent is',
'minted into a Merkle tree.',
];
accounts: [
{
name: 'feePayer';
isMut: true;
isSigner: true;
docs: ['UNCHECKED: only pays fees.'];
},
{
name: 'tokenPoolPda';
isMut: true;
isSigner: false;
},
{
name: 'systemProgram';
isMut: false;
isSigner: false;
},
{
name: 'mint';
isMut: true;
isSigner: false;
},
{
name: 'tokenProgram';
isMut: false;
isSigner: false;
},
{
name: 'cpiAuthorityPda';
isMut: false;
isSigner: false;
},
];
args: [];
},
{
name: 'mintTo';
docs: [
'Mints tokens from an spl token mint to a list of compressed accounts.',
'Minted tokens are transferred to a pool account owned by the compressed',
'token program. The instruction creates one compressed output account for',
'every amount and pubkey input pair. A constant amount of lamports can be',
'transferred to each output account to enable. A use case to add lamports',
'to a compressed token account is to prevent spam. This is the only way',
'to add lamports to a compressed token account.',
];
accounts: [
{
name: 'feePayer';
isMut: true;
isSigner: true;
docs: ['UNCHECKED: only pays fees.'];
},
{
name: 'authority';
isMut: false;
isSigner: true;
},
{
name: 'cpiAuthorityPda';
isMut: false;
isSigner: false;
},
{
name: 'mint';
isMut: true;
isSigner: false;
},
{
name: 'tokenPoolPda';
isMut: true;
isSigner: false;
},
{
name: 'tokenProgram';
isMut: false;
isSigner: false;
},
{
name: 'lightSystemProgram';
isMut: false;
isSigner: false;
},
{
name: 'registeredProgramPda';
isMut: false;
isSigner: false;
},
{
name: 'noopProgram';
isMut: false;
isSigner: false;
docs: ['programs'];
},
{
name: 'accountCompressionAuthority';
isMut: false;
isSigner: false;
},
{
name: 'accountCompressionProgram';
isMut: false;
isSigner: false;
},
{
name: 'merkleTree';
isMut: true;
isSigner: false;
},
{
name: 'selfProgram';
isMut: false;
isSigner: false;
},
{
name: 'systemProgram';
isMut: false;
isSigner: false;
},
{
name: 'solPoolPda';
isMut: true;
isSigner: false;
isOptional: true;
},
];
args: [
{
name: 'publicKeys';
type: {
vec: 'publicKey';
};
},
{
name: 'amounts';
type: {
vec: 'u64';
};
},
{
name: 'lamports';
type: {
option: 'u64';
};
},
];
},
{
name: 'compressSplTokenAccount';
docs: [
'Compresses the balance of an spl token account sub an optional remaining',
'amount. This instruction does not close the spl token account. To close',
'the account bundle a close spl account instruction in your transaction.',
];
accounts: [
{
name: 'feePayer';
isMut: true;
isSigner: true;
docs: ['UNCHECKED: only pays fees.'];
},
{
name: 'authority';
isMut: false;
isSigner: true;
docs: [
'Authority is verified through proof since both owner and delegate',
'are included in the token data hash, which is a public input to the',
'validity proof.',
];
},
{
name: 'cpiAuthorityPda';
isMut: false;
isSigner: false;
},
{
name: 'lightSystemProgram';
isMut: false;
isSigner: false;
},
{
name: 'registeredProgramPda';
isMut: false;
isSigner: false;
},
{
name: 'noopProgram';
isMut: false;
isSigner: false;
},
{
name: 'accountCompressionAuthority';
isMut: false;
isSigner: false;
},
{
name: 'accountCompressionProgram';
isMut: false;
isSigner: false;
},
{
name: 'selfProgram';
isMut: false;
isSigner: false;
docs: ['this program is the signer of the cpi.'];
},
{
name: 'tokenPoolPda';
isMut: true;
isSigner: false;
isOptional: true;
},
{
name: 'compressOrDecompressTokenAccount';
isMut: true;
isSigner: false;
isOptional: true;
},
{
name: 'tokenProgram';
isMut: false;
isSigner: false;
isOptional: true;
},
{
name: 'systemProgram';
isMut: false;
isSigner: false;
},
];
args: [
{
name: 'owner';
type: 'publicKey';
},
{
name: 'remainingAmount';
type: {
option: 'u64';
};
},
{
name: 'cpiContext';
type: {
option: {
defined: 'CompressedCpiContext';
};
};
},
];
},
{
name: 'transfer';
docs: [
'Transfers compressed tokens from one account to another. All accounts',
'must be of the same mint. Additional spl tokens can be compressed or',
'decompressed. In one transaction only compression or decompression is',
'possible. Lamports can be transferred alongside tokens. If output token',
'accounts specify less lamports than inputs the remaining lamports are',
'transferred to an output compressed account. Signer must be owner or',
'delegate. If a delegated token account is transferred the delegate is',
'not preserved.',
];
accounts: [
{
name: 'feePayer';
isMut: true;
isSigner: true;
docs: ['UNCHECKED: only pays fees.'];
},
{
name: 'authority';
isMut: false;
isSigner: true;
docs: [
'Authority is verified through proof since both owner and delegate',
'are included in the token data hash, which is a public input to the',
'validity proof.',
];
},
{
name: 'cpiAuthorityPda';
isMut: false;
isSigner: false;
},
{
name: 'lightSystemProgram';
isMut: false;
isSigner: false;
},
{
name: 'registeredProgramPda';
isMut: false;
isSigner: false;
},
{
name: 'noopProgram';
isMut: false;
isSigner: false;
},
{
name: 'accountCompressionAuthority';
isMut: false;
isSigner: false;
},
{
name: 'accountCompressionProgram';
isMut: false;
isSigner: false;
},
{
name: 'selfProgram';
isMut: false;
isSigner: false;
docs: ['this program is the signer of the cpi.'];
},
{
name: 'tokenPoolPda';
isMut: true;
isSigner: false;
isOptional: true;
},
{
name: 'compressOrDecompressTokenAccount';
isMut: true;
isSigner: false;
isOptional: true;
},
{
name: 'tokenProgram';
isMut: false;
isSigner: false;
isOptional: true;
},
{
name: 'systemProgram';
isMut: false;
isSigner: false;
},
];
args: [
{
name: 'inputs';
type: 'bytes';
},
];
},
{
name: 'approve';
docs: [
'Delegates an amount to a delegate. A compressed token account is either',
'completely delegated or not. Prior delegates are not preserved. Cannot',
'be called by a delegate.',
'The instruction creates two output accounts:',
'1. one account with delegated amount',
'2. one account with remaining(change) amount',
];
accounts: [
{
name: 'feePayer';
isMut: true;
isSigner: true;
docs: ['UNCHECKED: only pays fees.'];
},
{
name: 'authority';
isMut: false;
isSigner: true;
docs: [
'Authority is verified through proof since both owner and delegate',
'are included in the token data hash, which is a public input to the',
'validity proof.',
];
},
{
name: 'cpiAuthorityPda';
isMut: false;
isSigner: false;
},
{
name: 'lightSystemProgram';
isMut: false;
isSigner: false;
},
{
name: 'registeredProgramPda';
isMut: false;
isSigner: false;
},
{
name: 'noopProgram';
isMut: false;
isSigner: false;
},
{
name: 'accountCompressionAuthority';
isMut: false;
isSigner: false;
},
{
name: 'accountCompressionProgram';
isMut: false;
isSigner: false;
},
{
name: 'selfProgram';
isMut: false;
isSigner: false;
docs: ['this program is the signer of the cpi.'];
},
{
name: 'systemProgram';
isMut: false;
isSigner: false;
},
];
args: [
{
name: 'inputs';
type: 'bytes';
},
];
},
{
name: 'revoke';
docs: [
'Revokes a delegation. The instruction merges all inputs into one output',
'account. Cannot be called by a delegate. Delegates are not preserved.',
];
accounts: [
{
name: 'feePayer';
isMut: true;
isSigner: true;
docs: ['UNCHECKED: only pays fees.'];
},
{
name: 'authority';
isMut: false;
isSigner: true;
docs: [
'Authority is verified through proof since both owner and delegate',
'are included in the token data hash, which is a public input to the',
'validity proof.',
];
},
{
name: 'cpiAuthorityPda';
isMut: false;
isSigner: false;
},
{
name: 'lightSystemProgram';
isMut: false;
isSigner: false;
},
{
name: 'registeredProgramPda';
isMut: false;
isSigner: false;
},
{
name: 'noopProgram';
isMut: false;
isSigner: false;
},
{
name: 'accountCompressionAuthority';
isMut: false;
isSigner: false;
},
{
name: 'accountCompressionProgram';
isMut: false;
isSigner: false;
},
{
name: 'selfProgram';
isMut: false;
isSigner: false;
docs: ['this program is the signer of the cpi.'];
},
{
name: 'systemProgram';
isMut: false;
isSigner: false;
},
];
args: [
{
name: 'inputs';
type: 'bytes';
},
];
},
{
name: 'freeze';
docs: [
'Freezes compressed token accounts. Inputs must not be frozen. Creates as',
'many outputs as inputs. Balances and delegates are preserved.',
];
accounts: [
{
name: 'feePayer';
isMut: true;
isSigner: true;
docs: ['UNCHECKED: only pays fees.'];
},
{
name: 'authority';
isMut: false;
isSigner: true;
},
{
name: 'cpiAuthorityPda';
isMut: false;
isSigner: false;
},
{
name: 'lightSystemProgram';
isMut: false;
isSigner: false;
},
{
name: 'registeredProgramPda';
isMut: false;
isSigner: false;
},
{
name: 'noopProgram';
isMut: false;
isSigner: false;
},
{
name: 'accountCompressionAuthority';
isMut: false;
isSigner: false;
},
{
name: 'accountCompressionProgram';
isMut: false;
isSigner: false;
},
{
name: 'selfProgram';
isMut: false;
isSigner: false;
docs: ['that this program is the signer of the cpi.'];
},
{
name: 'systemProgram';
isMut: false;
isSigner: false;
},
{
name: 'mint';
isMut: false;
isSigner: false;
},
];
args: [
{
name: 'inputs';
type: 'bytes';
},
];
},
{
name: 'thaw';
docs: [
'Thaws frozen compressed token accounts. Inputs must be frozen. Creates',
'as many outputs as inputs. Balances and delegates are preserved.',
];
accounts: [
{
name: 'feePayer';
isMut: true;
isSigner: true;
docs: ['UNCHECKED: only pays fees.'];
},
{
name: 'authority';
isMut: false;
isSigner: true;
},
{
name: 'cpiAuthorityPda';
isMut: false;
isSigner: false;
},
{
name: 'lightSystemProgram';
isMut: false;
isSigner: false;
},
{
name: 'registeredProgramPda';
isMut: false;
isSigner: false;
},
{
name: 'noopProgram';
isMut: false;
isSigner: false;
},
{
name: 'accountCompressionAuthority';
isMut: false;
isSigner: false;
},
{
name: 'accountCompressionProgram';
isMut: false;
isSigner: false;
},
{
name: 'selfProgram';
isMut: false;
isSigner: false;
docs: ['that this program is the signer of the cpi.'];
},
{
name: 'systemProgram';
isMut: false;
isSigner: false;
},
{
name: 'mint';
isMut: false;
isSigner: false;
},
];
args: [
{
name: 'inputs';
type: 'bytes';
},
];
},
{
name: 'burn';
docs: [
'Burns compressed tokens and spl tokens from the pool account. Delegates',
'can burn tokens. The output compressed token account remains delegated.',
'Creates one output compressed token account.',
];
accounts: [
{
name: 'feePayer';
isMut: true;
isSigner: true;
docs: ['UNCHECKED: only pays fees.'];
},
{
name: 'authority';
isMut: false;
isSigner: true;
docs: [
'Authority is verified through proof since both owner and delegate',
'are included in the token data hash, which is a public input to the',
'validity proof.',
];
},
{
name: 'cpiAuthorityPda';
isMut: false;
isSigner: false;
},
{
name: 'mint';
isMut: true;
isSigner: false;
},
{
name: 'tokenPoolPda';
isMut: true;
isSigner: false;
},
{
name: 'tokenProgram';
isMut: false;
isSigner: false;
},
{
name: 'lightSystemProgram';
isMut: false;
isSigner: false;
},
{
name: 'registeredProgramPda';
isMut: false;
isSigner: false;
},
{
name: 'noopProgram';
isMut: false;
isSigner: false;
},
{
name: 'accountCompressionAuthority';
isMut: false;
isSigner: false;
},
{
name: 'accountCompressionProgram';
isMut: false;
isSigner: false;
},
{
name: 'selfProgram';
isMut: false;
isSigner: false;
},
{
name: 'systemProgram';
isMut: false;
isSigner: false;
},
];
args: [
{
name: 'inputs';
type: 'bytes';
},
];
},
{
name: 'stubIdlBuild';
docs: [
'This function is a stub to allow Anchor to include the input types in',
'the IDL. It should not be included in production builds nor be called in',
'practice.',
];
accounts: [
{
name: 'feePayer';
isMut: true;
isSigner: true;
docs: ['UNCHECKED: only pays fees.'];
},
{
name: 'authority';
isMut: false;
isSigner: true;
docs: [
'Authority is verified through proof since both owner and delegate',
'are included in the token data hash, which is a public input to the',
'validity proof.',
];
},
{
name: 'cpiAuthorityPda';
isMut: false;
isSigner: false;
},
{
name: 'lightSystemProgram';
isMut: false;
isSigner: false;
},
{
name: 'registeredProgramPda';
isMut: false;
isSigner: false;
},
{
name: 'noopProgram';
isMut: false;
isSigner: false;
},
{
name: 'accountCompressionAuthority';
isMut: false;
isSigner: false;
},
{
name: 'accountCompressionProgram';
isMut: false;
isSigner: false;
},
{
name: 'selfProgram';
isMut: false;
isSigner: false;
docs: ['this program is the signer of the cpi.'];
},
{
name: 'tokenPoolPda';
isMut: true;
isSigner: false;
isOptional: true;
},
{
name: 'compressOrDecompressTokenAccount';
isMut: true;
isSigner: false;
isOptional: true;
},
{
name: 'tokenProgram';
isMut: false;
isSigner: false;
isOptional: true;
},
{
name: 'systemProgram';
isMut: false;
isSigner: false;
},
];
args: [
{
name: 'inputs1';
type: {
defined: 'CompressedTokenInstructionDataTransfer';
};
},
{
name: 'inputs2';
type: {
defined: 'TokenData';
};
},
];
},
];
types: [
{
name: 'AccessMetadata';
type: {
kind: 'struct';
fields: [
{
name: 'owner';
docs: ['Owner of the Merkle tree.'];
type: 'publicKey';
},
{
name: 'programOwner';
docs: [
'Program owner of the Merkle tree. This will be used for program owned Merkle trees.',
];
type: 'publicKey';
},
{
name: 'forester';
docs: [
'Optional privileged forester pubkey, can be set for custom Merkle trees',
'without a network fee. Merkle trees without network fees are not',
'forested by light foresters. The variable is not used in the account',
'compression program but the registry program. The registry program',
'implements access control to prevent contention during forester. The',
'forester pubkey specified in this struct can bypass contention checks.',
];
type: 'publicKey';
},
];
};
},
{
name: 'AccountState';
type: {
kind: 'enum';
variants: [
{
name: 'Initialized';
},
{
name: 'Frozen';
},
];
};
},
{
name: 'CompressedAccount';
type: {
kind: 'struct';
fields: [
{
name: 'owner';
type: 'publicKey';
},
{
name: 'lamports';
type: 'u64';
},
{
name: 'address';
type: {
option: {
array: ['u8', 32];
};
};
},
{
name: 'data';
type: {
option: {
defined: 'CompressedAccountData';
};
};
},
];
};
},
{
name: 'CompressedAccountData';
type: {
kind: 'struct';
fields: [
{
name: 'discriminator';
type: {
array: ['u8', 8];
};
},
{
name: 'data';
type: 'bytes';
},
{
name: 'dataHash';
type: {
array: ['u8', 32];
};
},
];
};
},
{
name: 'CompressedCpiContext';
type: {
kind: 'struct';
fields: [
{
name: 'setContext';
docs: [
'Is set by the program that is invoking the CPI to signal that is should',
'set the cpi context.',
];
type: 'bool';
},
{
name: 'firstSetContext';
docs: [
'Is set to wipe the cpi context since someone could have set it before',
'with unrelated data.',
];
type: 'bool';
},
{
name: 'cpiContextAccountIndex';
docs: [
'Index of cpi context account in remaining accounts.',
];
type: 'u8';
},
];
};
},
{
name: 'CompressedProof';
type: {
kind: 'struct';
fields: [
{
name: 'a';
type: {
array: ['u8', 32];
};
},
{
name: 'b';
type: {
array: ['u8', 64];
};
},
{
name: 'c';
type: {
array: ['u8', 32];
};
},
];
};
},
{
name: 'CompressedTokenInstructionDataTransfer';
type: {
kind: 'struct';
fields: [
{
name: 'proof';
type: {
option: {
defined: 'CompressedProof';
};
};
},
{
name: 'mint';
type: 'publicKey';
},
{
name: 'delegatedTransfer';
docs: [
'Is required if the signer is delegate,',
'-> delegate is authority account,',
'owner = Some(owner) is the owner of the token account.',
];
type: {
option: {
defined: 'DelegatedTransfer';
};
};
},
{
name: 'inputTokenDataWithContext';
type: {
vec: {
defined: 'InputTokenDataWithContext';
};
};
},
{
name: 'outputCompressedAccounts';
type: {
vec: {
defined: 'PackedTokenTransferOutputData';
};
};
},
{
name: 'isCompress';
type: 'bool';
},
{
name: 'compressOrDecompressAmount';
type: {
option: 'u64';
};
},
{
name: 'cpiContext';
type: {
option: {
defined: 'CompressedCpiContext';
};
};
},
{
name: 'lamportsChangeAccountMerkleTreeIndex';
type: {
option: 'u8';
};
},
];
};
},
{
name: 'DelegatedTransfer';
docs: [
'Struct to provide the owner when the delegate is signer of the transaction.',
];
type: {
kind: 'struct';
fields: [
{
name: 'owner';
type: 'publicKey';
},
{
name: 'delegateChangeAccountIndex';
docs: [
'Index of change compressed account in output compressed accounts. In',
"case that the delegate didn't spend the complete delegated compressed",
'account balance the change compressed account will be delegated to her',
'as well.',
];
type: {
option: 'u8';
};
},
];
};
},
{
name: 'InputTokenDataWithContext';
type: {
kind: 'struct';
fields: [
{
name: 'amount';
type: 'u64';
},
{
name: 'delegateIndex';
type: {
option: 'u8';
};
},
{
name: 'merkleContext';
type: {
defined: 'PackedMerkleContext';
};
},
{
name: 'rootIndex';
type: 'u16';
},
{
name: 'lamports';
type: {
option: 'u64';
};
},
{
name: 'tlv';
docs: [
'Placeholder for TokenExtension tlv data (unimplemented)',
];
type: {
option: 'bytes';
};
},
];
};
},
{
name: 'InstructionDataInvoke';
type: {
kind: 'struct';
fields: [
{
name: 'proof';
type: {
option: {
defined: 'CompressedProof';
};
};
},
{
name: 'inputCompressedAccountsWithMerkleContext';
type: {
vec: {
defined: 'PackedCompressedAccountWithMerkleContext';
};
};
},
{
name: 'outputCompressedAccounts';
type: {
vec: {
defined: 'OutputCompressedAccountWithPackedContext';
};
};
},
{
name: 'relayFee';
type: {
option: 'u64';
};
},
{
name: 'newAddressParams';
type: {
vec: {
defined: 'NewAddressParamsPacked';
};
};
},
{
name: 'compressOrDecompressLamports';
type: {
option: 'u64';
};
},
{
name: 'isCompress';
type: 'bool';
},
];
};
},
{
name: 'InstructionDataInvokeCpi';
type: {
kind: 'struct';
fields: [
{
name: 'proof';
type: {
option: {
defined: 'CompressedProof';
};
};
},
{
name: 'newAddressParams';
type: {
vec: {
defined: 'NewAddressParamsPacked';
};
};
},
{
name: 'inputCompressedAccountsWithMerkleContext';
type: {
vec: {
defined: 'PackedCompressedAccountWithMerkleContext';
};
};
},
{
name: 'outputCompressedAccounts';
type: {
vec: {
defined: 'OutputCompressedAccountWithPackedContext';
};
};
},
{
name: 'relayFee';
type: {
option: 'u64';
};
},
{
name: 'compressOrDecompressLamports';
type: {
option: 'u64';
};
},
{
name: 'isCompress';
type: 'bool';
},
{
name: 'cpiContext';
type: {
option: {
defined: 'CompressedCpiContext';
};
};
},
];
};
},
{
name: 'MerkleTreeMetadata';
type: {
kind: 'struct';
fields: [
{
name: 'accessMetadata';
type: {
defined: 'AccessMetadata';
};
},
{
name: 'rolloverMetadata';
type: {
defined: 'RolloverMetadata';
};
},
{
name: 'associatedQueue';
type: 'publicKey';
},
{
name: 'nextMerkleTree';
type: 'publicKey';
},
];
};
},
{
name: 'MerkleTreeSequenceNumber';
type: {
kind: 'struct';
fields: [
{
name: 'pubkey';
type: 'publicKey';
},
{
name: 'seq';
type: 'u64';
},
];
};
},
{
name: 'NewAddressParamsPacked';
type: {
kind: 'struct';
fields: [
{
name: 'seed';
type: {
array: ['u8', 32];
};
},
{
name: 'addressQueueAccountIndex';
type: 'u8';
},
{
name: 'addressMerkleTreeAccountIndex';
type: 'u8';
},
{
name: 'addressMerkleTreeRootIndex';
type: 'u16';
},
];
};
},
{
name: 'OutputCompressedAccountWithPackedContext';
type: {
kind: 'struct';
fields: [
{
name: 'compressedAccount';
type: {
defined: 'CompressedAccount';
};
},
{
name: 'merkleTreeIndex';
type: 'u8';
},
];
};
},
{
name: 'PackedCompressedAccountWithMerkleContext';
type: {
kind: 'struct';
fields: [
{
name: 'compressedAccount';
type: {
defined: 'CompressedAccount';
};
},
{
name: 'merkleContext';
type: {
defined: 'PackedMerkleContext';
};
},
{
name: 'rootIndex';
docs: [
'Index of root used in inclusion validity proof.',
];
type: 'u16';
},
{
name: 'readOnly';
docs: [
'Placeholder to mark accounts read-only unimplemented set to false.',
];
type: 'bool';
},
];
};
},
{
name: 'PackedMerkleContext';
type: {
kind: 'struct';
fields: [
{
name: 'merkleTreePubkeyIndex';
type: 'u8';
},
{
name: 'nullifierQueuePubkeyIndex';
type: 'u8';
},
{
name: 'leafIndex';
type: 'u32';
},
{
name: 'queueIndex';
docs: [
'Index of leaf in queue. Placeholder of batched Merkle tree updates',
'currently unimplemented.',
];
type: {
option: {
defined: 'QueueIndex';
};
};
},
];
};
},
{
name: 'PackedTokenTransferOutputData';
type: {
kind: 'struct';
fields: [
{
name: 'owner';
type: 'publicKey';
},
{
name: 'amount';
type: 'u64';
},
{
name: 'lamports';
type: {
option: 'u64';
};
},
{
name: 'merkleTreeIndex';
type: 'u8';
},
{
name: 'tlv';
docs: [
'Placeholder for TokenExtension tlv data (unimplemented)',
];
type: {
option: 'bytes';
};
},
];
};
},
{
name: 'PublicTransactionEvent';
type: {
kind: 'struct';
fields: [
{
name: 'inputCompressedAccountHashes';
type: {
vec: {
array: ['u8', 32];
};
};
},
{
name: 'outputCompressedAccountHashes';
type: {
vec: {
array: ['u8', 32];
};
};
},
{
name: 'outputCompressedAccounts';
type: {
vec: {
defined: 'OutputCompressedAccountWithPackedContext';
};
};
},
{
name: 'outputLeafIndices';
type: {
vec: 'u32';
};
},
{
name: 'sequenceNumbers';
type: {
vec: {
defined: 'MerkleTreeSequenceNumber';
};
};
},
{
name: 'relayFee';
type: {
option: 'u64';
};
},
{
name: 'isCompress';
type: 'bool';
},
{
name: 'compressOrDecompressLamports';
type: {
option: 'u64';
};
},
{
name: 'pubkeyArray';
type: {
vec: 'publicKey';
};
},
{
name: 'message';
type: {
option: 'bytes';
};
},
];
};
},
{
name: 'QueueIndex';
type: {
kind: 'struct';
fields: [
{
name: 'queueId';
docs: ['Id of queue in queue account.'];
type: 'u8';
},
{
name: 'index';
docs: ['Index of compressed account hash in queue.'];
type: 'u16';
},
];
};
},
{
name: 'RolloverMetadata';
type: {
kind: 'struct';
fields: [
{
name: 'index';
docs: ['Unique index.'];
type: 'u64';
},
{
name: 'rolloverFee';
docs: [
'This fee is used for rent for the next account.',
'It accumulates in the account so that once the corresponding Merkle tree account is full it can be rolled over',
];
type: 'u64';
},
{
name: 'rolloverThreshold';
docs: [
'The threshold in percentage points when the account should be rolled over (95 corresponds to 95% filled).',
];
type: 'u64';
},
{
name: 'networkFee';
docs: ['Tip for maintaining the account.'];
type: 'u64';
},
{
name: 'rolledoverSlot';
docs: [
'The slot when the account was rolled over, a rolled over account should not be written to.',
];
type: 'u64';
},
{
name: 'closeThreshold';
docs: [
'If current slot is greater than rolledover_slot + close_threshold and',
"the account is empty it can be closed. No 'close' functionality has been",
'implemented yet.',
];
type: 'u64';
},
{
name: 'additionalBytes';
docs: [
'Placeholder for bytes of additional accounts which are tied to the',
'Merkle trees operation and need to be rolled over as well.',
];
type: 'u64';
},
];
};
},
{
name: 'TokenData';
type: {
kind: 'struct';
fields: [
{
name: 'mint';
docs: ['The mint associated with this account'];
type: 'publicKey';
},
{
name: 'owner';
docs: ['The owner of this account.'];
type: 'publicKey';
},
{
name: 'amount';
docs: ['The amount of tokens this account holds.'];
type: 'u64';
},
{
name: 'delegate';
docs: [
'If `delegate` is `Some` then `delegated_amount` represents',
'the amount authorized by the delegate',
];
type: {
option: 'publicKey';
};
},
{
name: 'state';
docs: ["The account's state"];
type: {
defined: 'AccountState';
};
},
{
name: 'tlv';
docs: [
'Placeholder for TokenExtension tlv data (unimplemented)',
];
type: {
option: 'bytes';
};
},
];
};
},
];
errors: [
{
code: 6000;
name: 'SignerCheckFailed';
msg: 'Signer check failed';
},
{
code: 6001;
name: 'CreateTransferInstructionFailed';
msg: 'Create transfer instruction failed';
},
{
code: 6002;
name: 'AccountNotFound';
msg: 'Account not found';
},
{
code: 6003;
name: 'SerializationError';
msg: 'Serialization error';
},
];
};
export const IDL: LightCompressedToken = {
version: '1.2.0',
name: 'light_compressed_token',
instructions: [
{
name: 'createTokenPool',
docs: [
'This instruction creates a token pool for a given mint. Every spl mint',
'can have one token pool. When a token is compressed the tokens are',
'transferrred to the token pool, and their compressed equivalent is',
'minted into a Merkle tree.',
],
accounts: [
{
name: 'feePayer',
isMut: true,
isSigner: true,
docs: ['UNCHECKED: only pays fees.'],
},
{
name: 'tokenPoolPda',
isMut: true,
isSigner: false,
},
{
name: 'systemProgram',
isMut: false,
isSigner: false,
},
{
name: 'mint',
isMut: true,
isSigner: false,
},
{
name: 'tokenProgram',
isMut: false,
isSigner: false,
},
{
name: 'cpiAuthorityPda',
isMut: false,
isSigner: false,
},
],
args: [],
},
{
name: 'mintTo',
docs: [
'Mints tokens from an spl token mint to a list of compressed accounts.',
'Minted tokens are transferred to a pool account owned by the compressed',
'token program. The instruction creates one compressed output account for',
'every amount and pubkey input pair. A constant amount of lamports can be',
'transferred to each output account to enable. A use case to add lamports',
'to a compressed token account is to prevent spam. This is the only way',
'to add lamports to a compressed token account.',
],
accounts: [
{
name: 'feePayer',
isMut: true,
isSigner: true,
docs: ['UNCHECKED: only pays fees.'],
},
{
name: 'authority',
isMut: false,
isSigner: true,
},
{
name: 'cpiAuthorityPda',
isMut: false,
isSigner: false,
},
{
name: 'mint',
isMut: true,
isSigner: false,
},
{
name: 'tokenPoolPda',
isMut: true,
isSigner: false,
},
{
name: 'tokenProgram',
isMut: false,
isSigner: false,
},
{
name: 'lightSystemProgram',
isMut: false,
isSigner: false,
},
{
name: 'registeredProgramPda',
isMut: false,
isSigner: false,
},
{
name: 'noopProgram',
isMut: false,
isSigner: false,
docs: ['programs'],
},
{
name: 'accountCompressionAuthority',
isMut: false,
isSigner: false,
},
{
name: 'accountCompressionProgram',
isMut: false,
isSigner: false,
},
{
name: 'merkleTree',
isMut: true,
isSigner: false,
},
{
name: 'selfProgram',
isMut: false,
isSigner: false,
},
{
name: 'systemProgram',
isMut: false,
isSigner: false,
},
{
name: 'solPoolPda',
isMut: true,
isSigner: false,
isOptional: true,
},
],
args: [
{
name: 'publicKeys',
type: {
vec: 'publicKey',
},
},
{
name: 'amounts',
type: {
vec: 'u64',
},
},
{
name: 'lamports',
type: {
option: 'u64',
},
},
],
},
{
name: 'compressSplTokenAccount',
docs: [
'Compresses the balance of an spl token account sub an optional remaining',
'amount. This instruction does not close the spl token account. To close',
'the account bundle a close spl account instruction in your transaction.',
],
accounts: [
{
name: 'feePayer',
isMut: true,
isSigner: true,
docs: ['UNCHECKED: only pays fees.'],
},
{
name: 'authority',
isMut: false,
isSigner: true,
docs: [
'Authority is verified through proof since both owner and delegate',
'are included in the token data hash, which is a public input to the',
'validity proof.',
],
},
{
name: 'cpiAuthorityPda',
isMut: false,
isSigner: false,
},
{
name: 'lightSystemProgram',
isMut: false,
isSigner: false,
},
{
name: 'registeredProgramPda',
isMut: false,
isSigner: false,
},
{
name: 'noopProgram',
isMut: false,
isSigner: false,
},
{
name: 'accountCompressionAuthority',
isMut: false,
isSigner: false,
},
{
name: 'accountCompressionProgram',
isMut: false,
isSigner: false,
},
{
name: 'selfProgram',
isMut: false,
isSigner: false,
docs: ['this program is the signer of the cpi.'],
},
{
name: 'tokenPoolPda',
isMut: true,
isSigner: false,
isOptional: true,
},
{
name: 'compressOrDecompressTokenAccount',
isMut: true,
isSigner: false,
isOptional: true,
},
{
name: 'tokenProgram',
isMut: false,
isSigner: false,
isOptional: true,
},
{
name: 'systemProgram',
isMut: false,
isSigner: false,
},
],
args: [
{
name: 'owner',
type: 'publicKey',
},
{
name: 'remainingAmount',
type: {
option: 'u64',
},
},
{
name: 'cpiContext',
type: {
option: {
defined: 'CompressedCpiContext',
},
},
},
],
},
{
name: 'transfer',
docs: [
'Transfers compressed tokens from one account to another. All accounts',
'must be of the same mint. Additional spl tokens can be compressed or',
'decompressed. In one transaction only compression or decompression is',
'possible. Lamports can be transferred alongside tokens. If output token',
'accounts specify less lamports than inputs the remaining lamports are',
'transferred to an output compressed account. Signer must be owner or',
'delegate. If a delegated token account is transferred the delegate is',
'not preserved.',
],
accounts: [
{
name: 'feePayer',
isMut: true,
isSigner: true,
docs: ['UNCHECKED: only pays fees.'],
},
{
name: 'authority',
isMut: false,
isSigner: true,
docs: [
'Authority is verified through proof since both owner and delegate',
'are included in the token data hash, which is a public input to the',
'validity proof.',
],
},
{
name: 'cpiAuthorityPda',
isMut: false,
isSigner: false,
},
{
name: 'lightSystemProgram',
isMut: false,
isSigner: false,
},
{
name: 'registeredProgramPda',
isMut: false,
isSigner: false,
},
{
name: 'noopProgram',
isMut: false,
isSigner: false,
},
{
name: 'accountCompressionAuthority',
isMut: false,
isSigner: false,
},
{
name: 'accountCompressionProgram',
isMut: false,
isSigner: false,
},
{
name: 'selfProgram',
isMut: false,
isSigner: false,
docs: ['this program is the signer of the cpi.'],
},
{
name: 'tokenPoolPda',
isMut: true,
isSigner: false,
isOptional: true,
},
{
name: 'compressOrDecompressTokenAccount',
isMut: true,
isSigner: false,
isOptional: true,
},
{
name: 'tokenProgram',
isMut: false,
isSigner: false,
isOptional: true,
},
{
name: 'systemProgram',
isMut: false,
isSigner: false,
},
],
args: [
{
name: 'inputs',
type: 'bytes',
},
],
},
{
name: 'approve',
docs: [
'Delegates an amount to a delegate. A compressed token account is either',
'completely delegated or not. Prior delegates are not preserved. Cannot',
'be called by a delegate.',
'The instruction creates two output accounts:',
'1. one account with delegated amount',
'2. one account with remaining(change) amount',
],
accounts: [
{
name: 'feePayer',
isMut: true,
isSigner: true,
docs: ['UNCHECKED: only pays fees.'],
},
{
name: 'authority',
isMut: false,
isSigner: true,
docs: [
'Authority is verified through proof since both owner and delegate',
'are included in the token data hash, which is a public input to the',
'validity proof.',
],
},
{
name: 'cpiAuthorityPda',
isMut: false,
isSigner: false,
},
{
name: 'lightSystemProgram',
isMut: false,
isSigner: false,
},
{
name: 'registeredProgramPda',
isMut: false,
isSigner: false,
},
{
name: 'noopProgram',
isMut: false,
isSigner: false,
},
{
name: 'accountCompressionAuthority',
isMut: false,
isSigner: false,
},
{
name: 'accountCompressionProgram',
isMut: false,
isSigner: false,
},
{
name: 'selfProgram',
isMut: false,
isSigner: false,
docs: ['this program is the signer of the cpi.'],
},
{
name: 'systemProgram',
isMut: false,
isSigner: false,
},
],
args: [
{
name: 'inputs',
type: 'bytes',
},
],
},
{
name: 'revoke',
docs: [
'Revokes a delegation. The instruction merges all inputs into one output',
'account. Cannot be called by a delegate. Delegates are not preserved.',
],
accounts: [
{
name: 'feePayer',
isMut: true,
isSigner: true,
docs: ['UNCHECKED: only pays fees.'],
},
{
name: 'authority',
isMut: false,
isSigner: true,
docs: [
'Authority is verified through proof since both owner and delegate',
'are included in the token data hash, which is a public input to the',
'validity proof.',
],
},
{
name: 'cpiAuthorityPda',
isMut: false,
isSigner: false,
},
{
name: 'lightSystemProgram',
isMut: false,
isSigner: false,
},
{
name: 'registeredProgramPda',
isMut: false,
isSigner: false,
},
{
name: 'noopProgram',
isMut: false,
isSigner: false,
},
{
name: 'accountCompressionAuthority',
isMut: false,
isSigner: false,
},
{
name: 'accountCompressionProgram',
isMut: false,
isSigner: false,
},
{
name: 'selfProgram',
isMut: false,
isSigner: false,
docs: ['this program is the signer of the cpi.'],
},
{
name: 'systemProgram',
isMut: false,
isSigner: false,
},
],
args: [
{
name: 'inputs',
type: 'bytes',
},
],
},
{
name: 'freeze',
docs: [
'Freezes compressed token accounts. Inputs must not be frozen. Creates as',
'many outputs as inputs. Balances and delegates are preserved.',
],
accounts: [
{
name: 'feePayer',
isMut: true,
isSigner: true,
docs: ['UNCHECKED: only pays fees.'],
},
{
name: 'authority',
isMut: false,
isSigner: true,
},
{
name: 'cpiAuthorityPda',
isMut: false,
isSigner: false,
},
{
name: 'lightSystemProgram',
isMut: false,
isSigner: false,
},
{
name: 'registeredProgramPda',
isMut: false,
isSigner: false,
},
{
name: 'noopProgram',
isMut: false,
isSigner: false,
},
{
name: 'accountCompressionAuthority',
isMut: false,
isSigner: false,
},
{
name: 'accountCompressionProgram',
isMut: false,
isSigner: false,
},
{
name: 'selfProgram',
isMut: false,
isSigner: false,
docs: ['that this program is the signer of the cpi.'],
},
{
name: 'systemProgram',
isMut: false,
isSigner: false,
},
{
name: 'mint',
isMut: false,
isSigner: false,
},
],
args: [
{
name: 'inputs',
type: 'bytes',
},
],
},
{
name: 'thaw',
docs: [
'Thaws frozen compressed token accounts. Inputs must be frozen. Creates',
'as many outputs as inputs. Balances and delegates are preserved.',
],
accounts: [
{
name: 'feePayer',
isMut: true,
isSigner: true,
docs: ['UNCHECKED: only pays fees.'],
},
{
name: 'authority',
isMut: false,
isSigner: true,
},
{
name: 'cpiAuthorityPda',
isMut: false,
isSigner: false,
},
{
name: 'lightSystemProgram',
isMut: false,
isSigner: false,
},
{
name: 'registeredProgramPda',
isMut: false,
isSigner: false,
},
{
name: 'noopProgram',
isMut: false,
isSigner: false,
},
{
name: 'accountCompressionAuthority',
isMut: false,
isSigner: false,
},
{
name: 'accountCompressionProgram',
isMut: false,
isSigner: false,
},
{
name: 'selfProgram',
isMut: false,
isSigner: false,
docs: ['that this program is the signer of the cpi.'],
},
{
name: 'systemProgram',
isMut: false,
isSigner: false,
},
{
name: 'mint',
isMut: false,
isSigner: false,
},
],
args: [
{
name: 'inputs',
type: 'bytes',
},
],
},
{
name: 'burn',
docs: [
'Burns compressed tokens and spl tokens from the pool account. Delegates',
'can burn tokens. The output compressed token account remains delegated.',
'Creates one output compressed token account.',
],
accounts: [
{
name: 'feePayer',
isMut: true,
isSigner: true,
docs: ['UNCHECKED: only pays fees.'],
},
{
name: 'authority',
isMut: false,
isSigner: true,
docs: [
'Authority is verified through proof since both owner and delegate',
'are included in the token data hash, which is a public input to the',
'validity proof.',
],
},
{
name: 'cpiAuthorityPda',
isMut: false,
isSigner: false,
},
{
name: 'mint',
isMut: true,
isSigner: false,
},
{
name: 'tokenPoolPda',
isMut: true,
isSigner: false,
},
{
name: 'tokenProgram',
isMut: false,
isSigner: false,
},
{
name: 'lightSystemProgram',
isMut: false,
isSigner: false,
},
{
name: 'registeredProgramPda',
isMut: false,
isSigner: false,
},
{
name: 'noopProgram',
isMut: false,
isSigner: false,
},
{
name: 'accountCompressionAuthority',
isMut: false,
isSigner: false,
},
{
name: 'accountCompressionProgram',
isMut: false,
isSigner: false,
},
{
name: 'selfProgram',
isMut: false,
isSigner: false,
},
{
name: 'systemProgram',
isMut: false,
isSigner: false,
},
],
args: [
{
name: 'inputs',
type: 'bytes',
},
],
},
{
name: 'stubIdlBuild',
docs: [
'This function is a stub to allow Anchor to include the input types in',
'the IDL. It should not be included in production builds nor be called in',
'practice.',
],
accounts: [
{
name: 'feePayer',
isMut: true,
isSigner: true,
docs: ['UNCHECKED: only pays fees.'],
},
{
name: 'authority',
isMut: false,
isSigner: true,
docs: [
'Authority is verified through proof since both owner and delegate',
'are included in the token data hash, which is a public input to the',
'validity proof.',
],
},
{
name: 'cpiAuthorityPda',
isMut: false,
isSigner: false,
},
{
name: 'lightSystemProgram',
isMut: false,
isSigner: false,
},
{
name: 'registeredProgramPda',
isMut: false,
isSigner: false,
},
{
name: 'noopProgram',
isMut: false,
isSigner: false,
},
{
name: 'accountCompressionAuthority',
isMut: false,
isSigner: false,
},
{
name: 'accountCompressionProgram',
isMut: false,
isSigner: false,
},
{
name: 'selfProgram',
isMut: false,
isSigner: false,
docs: ['this program is the signer of the cpi.'],
},
{
name: 'tokenPoolPda',
isMut: true,
isSigner: false,
isOptional: true,
},
{
name: 'compressOrDecompressTokenAccount',
isMut: true,
isSigner: false,
isOptional: true,
},
{
name: 'tokenProgram',
isMut: false,
isSigner: false,
isOptional: true,
},
{
name: 'systemProgram',
isMut: false,
isSigner: false,
},
],
args: [
{
name: 'inputs1',
type: {
defined: 'CompressedTokenInstructionDataTransfer',
},
},
{
name: 'inputs2',
type: {
defined: 'TokenData',
},
},
],
},
],
types: [
{
name: 'AccessMetadata',
type: {
kind: 'struct',
fields: [
{
name: 'owner',
docs: ['Owner of the Merkle tree.'],
type: 'publicKey',
},
{
name: 'programOwner',
docs: [
'Program owner of the Merkle tree. This will be used for program owned Merkle trees.',
],
type: 'publicKey',
},
{
name: 'forester',
docs: [
'Optional privileged forester pubkey, can be set for custom Merkle trees',
'without a network fee. Merkle trees without network fees are not',
'forested by light foresters. The variable is not used in the account',
'compression program but the registry program. The registry program',
'implements access control to prevent contention during forester. The',
'forester pubkey specified in this struct can bypass contention checks.',
],
type: 'publicKey',
},
],
},
},
{
name: 'AccountState',
type: {
kind: 'enum',
variants: [
{
name: 'Initialized',
},
{
name: 'Frozen',
},
],
},
},
{
name: 'CompressedAccount',
type: {
kind: 'struct',
fields: [
{
name: 'owner',
type: 'publicKey',
},
{
name: 'lamports',
type: 'u64',
},
{
name: 'address',
type: {
option: {
array: ['u8', 32],
},
},
},
{
name: 'data',
type: {
option: {
defined: 'CompressedAccountData',
},
},
},
],
},
},
{
name: 'CompressedAccountData',
type: {
kind: 'struct',
fields: [
{
name: 'discriminator',
type: {
array: ['u8', 8],
},
},
{
name: 'data',
type: 'bytes',
},
{
name: 'dataHash',
type: {
array: ['u8', 32],
},
},
],
},
},
{
name: 'CompressedCpiContext',
type: {
kind: 'struct',
fields: [
{
name: 'setContext',
docs: [
'Is set by the program that is invoking the CPI to signal that is should',
'set the cpi context.',
],
type: 'bool',
},
{
name: 'firstSetContext',
docs: [
'Is set to wipe the cpi context since someone could have set it before',
'with unrelated data.',
],
type: 'bool',
},
{
name: 'cpiContextAccountIndex',
docs: [
'Index of cpi context account in remaining accounts.',
],
type: 'u8',
},
],
},
},
{
name: 'CompressedProof',
type: {
kind: 'struct',
fields: [
{
name: 'a',
type: {
array: ['u8', 32],
},
},
{
name: 'b',
type: {
array: ['u8', 64],
},
},
{
name: 'c',
type: {
array: ['u8', 32],
},
},
],
},
},
{
name: 'CompressedTokenInstructionDataTransfer',
type: {
kind: 'struct',
fields: [
{
name: 'proof',
type: {
option: {
defined: 'CompressedProof',
},
},
},
{
name: 'mint',
type: 'publicKey',
},
{
name: 'delegatedTransfer',
docs: [
'Is required if the signer is delegate,',
'-> delegate is authority account,',
'owner = Some(owner) is the owner of the token account.',
],
type: {
option: {
defined: 'DelegatedTransfer',
},
},
},
{
name: 'inputTokenDataWithContext',
type: {
vec: {
defined: 'InputTokenDataWithContext',
},
},
},
{
name: 'outputCompressedAccounts',
type: {
vec: {
defined: 'PackedTokenTransferOutputData',
},
},
},
{
name: 'isCompress',
type: 'bool',
},
{
name: 'compressOrDecompressAmount',
type: {
option: 'u64',
},
},
{
name: 'cpiContext',
type: {
option: {
defined: 'CompressedCpiContext',
},
},
},
{
name: 'lamportsChangeAccountMerkleTreeIndex',
type: {
option: 'u8',
},
},
],
},
},
{
name: 'DelegatedTransfer',
docs: [
'Struct to provide the owner when the delegate is signer of the transaction.',
],
type: {
kind: 'struct',
fields: [
{
name: 'owner',
type: 'publicKey',
},
{
name: 'delegateChangeAccountIndex',
docs: [
'Index of change compressed account in output compressed accounts. In',
"case that the delegate didn't spend the complete delegated compressed",
'account balance the change compressed account will be delegated to her',
'as well.',
],
type: {
option: 'u8',
},
},
],
},
},
{
name: 'InputTokenDataWithContext',
type: {
kind: 'struct',
fields: [
{
name: 'amount',
type: 'u64',
},
{
name: 'delegateIndex',
type: {
option: 'u8',
},
},
{
name: 'merkleContext',
type: {
defined: 'PackedMerkleContext',
},
},
{
name: 'rootIndex',
type: 'u16',
},
{
name: 'lamports',
type: {
option: 'u64',
},
},
{
name: 'tlv',
docs: [
'Placeholder for TokenExtension tlv data (unimplemented)',
],
type: {
option: 'bytes',
},
},
],
},
},
{
name: 'InstructionDataInvoke',
type: {
kind: 'struct',
fields: [
{
name: 'proof',
type: {
option: {
defined: 'CompressedProof',
},
},
},
{
name: 'inputCompressedAccountsWithMerkleContext',
type: {
vec: {
defined:
'PackedCompressedAccountWithMerkleContext',
},
},
},
{
name: 'outputCompressedAccounts',
type: {
vec: {
defined:
'OutputCompressedAccountWithPackedContext',
},
},
},
{
name: 'relayFee',
type: {
option: 'u64',
},
},
{
name: 'newAddressParams',
type: {
vec: {
defined: 'NewAddressParamsPacked',
},
},
},
{
name: 'compressOrDecompressLamports',
type: {
option: 'u64',
},
},
{
name: 'isCompress',
type: 'bool',
},
],
},
},
{
name: 'InstructionDataInvokeCpi',
type: {
kind: 'struct',
fields: [
{
name: 'proof',
type: {
option: {
defined: 'CompressedProof',
},
},
},
{
name: 'newAddressParams',
type: {
vec: {
defined: 'NewAddressParamsPacked',
},
},
},
{
name: 'inputCompressedAccountsWithMerkleContext',
type: {
vec: {
defined:
'PackedCompressedAccountWithMerkleContext',
},
},
},
{
name: 'outputCompressedAccounts',
type: {
vec: {
defined:
'OutputCompressedAccountWithPackedContext',
},
},
},
{
name: 'relayFee',
type: {
option: 'u64',
},
},
{
name: 'compressOrDecompressLamports',
type: {
option: 'u64',
},
},
{
name: 'isCompress',
type: 'bool',
},
{
name: 'cpiContext',
type: {
option: {
defined: 'CompressedCpiContext',
},
},
},
],
},
},
{
name: 'MerkleTreeMetadata',
type: {
kind: 'struct',
fields: [
{
name: 'accessMetadata',
type: {
defined: 'AccessMetadata',
},
},
{
name: 'rolloverMetadata',
type: {
defined: 'RolloverMetadata',
},
},
{
name: 'associatedQueue',
type: 'publicKey',
},
{
name: 'nextMerkleTree',
type: 'publicKey',
},
],
},
},
{
name: 'MerkleTreeSequenceNumber',
type: {
kind: 'struct',
fields: [
{
name: 'pubkey',
type: 'publicKey',
},
{
name: 'seq',
type: 'u64',
},
],
},
},
{
name: 'NewAddressParamsPacked',
type: {
kind: 'struct',
fields: [
{
name: 'seed',
type: {
array: ['u8', 32],
},
},
{
name: 'addressQueueAccountIndex',
type: 'u8',
},
{
name: 'addressMerkleTreeAccountIndex',
type: 'u8',
},
{
name: 'addressMerkleTreeRootIndex',
type: 'u16',
},
],
},
},
{
name: 'OutputCompressedAccountWithPackedContext',
type: {
kind: 'struct',
fields: [
{
name: 'compressedAccount',
type: {
defined: 'CompressedAccount',
},
},
{
name: 'merkleTreeIndex',
type: 'u8',
},
],
},
},
{
name: 'PackedCompressedAccountWithMerkleContext',
type: {
kind: 'struct',
fields: [
{
name: 'compressedAccount',
type: {
defined: 'CompressedAccount',
},
},
{
name: 'merkleContext',
type: {
defined: 'PackedMerkleContext',
},
},
{
name: 'rootIndex',
docs: [
'Index of root used in inclusion validity proof.',
],
type: 'u16',
},
{
name: 'readOnly',
docs: [
'Placeholder to mark accounts read-only unimplemented set to false.',
],
type: 'bool',
},
],
},
},
{
name: 'PackedMerkleContext',
type: {
kind: 'struct',
fields: [
{
name: 'merkleTreePubkeyIndex',
type: 'u8',
},
{
name: 'nullifierQueuePubkeyIndex',
type: 'u8',
},
{
name: 'leafIndex',
type: 'u32',
},
{
name: 'queueIndex',
docs: [
'Index of leaf in queue. Placeholder of batched Merkle tree updates',
'currently unimplemented.',
],
type: {
option: {
defined: 'QueueIndex',
},
},
},
],
},
},
{
name: 'PackedTokenTransferOutputData',
type: {
kind: 'struct',
fields: [
{
name: 'owner',
type: 'publicKey',
},
{
name: 'amount',
type: 'u64',
},
{
name: 'lamports',
type: {
option: 'u64',
},
},
{
name: 'merkleTreeIndex',
type: 'u8',
},
{
name: 'tlv',
docs: [
'Placeholder for TokenExtension tlv data (unimplemented)',
],
type: {
option: 'bytes',
},
},
],
},
},
{
name: 'PublicTransactionEvent',
type: {
kind: 'struct',
fields: [
{
name: 'inputCompressedAccountHashes',
type: {
vec: {
array: ['u8', 32],
},
},
},
{
name: 'outputCompressedAccountHashes',
type: {
vec: {
array: ['u8', 32],
},
},
},
{
name: 'outputCompressedAccounts',
type: {
vec: {
defined:
'OutputCompressedAccountWithPackedContext',
},
},
},
{
name: 'outputLeafIndices',
type: {
vec: 'u32',
},
},
{
name: 'sequenceNumbers',
type: {
vec: {
defined: 'MerkleTreeSequenceNumber',
},
},
},
{
name: 'relayFee',
type: {
option: 'u64',
},
},
{
name: 'isCompress',
type: 'bool',
},
{
name: 'compressOrDecompressLamports',
type: {
option: 'u64',
},
},
{
name: 'pubkeyArray',
type: {
vec: 'publicKey',
},
},
{
name: 'message',
type: {
option: 'bytes',
},
},
],
},
},
{
name: 'QueueIndex',
type: {
kind: 'struct',
fields: [
{
name: 'queueId',
docs: ['Id of queue in queue account.'],
type: 'u8',
},
{
name: 'index',
docs: ['Index of compressed account hash in queue.'],
type: 'u16',
},
],
},
},
{
name: 'RolloverMetadata',
type: {
kind: 'struct',
fields: [
{
name: 'index',
docs: ['Unique index.'],
type: 'u64',
},
{
name: 'rolloverFee',
docs: [
'This fee is used for rent for the next account.',
'It accumulates in the account so that once the corresponding Merkle tree account is full it can be rolled over',
],
type: 'u64',
},
{
name: 'rolloverThreshold',
docs: [
'The threshold in percentage points when the account should be rolled over (95 corresponds to 95% filled).',
],
type: 'u64',
},
{
name: 'networkFee',
docs: ['Tip for maintaining the account.'],
type: 'u64',
},
{
name: 'rolledoverSlot',
docs: [
'The slot when the account was rolled over, a rolled over account should not be written to.',
],
type: 'u64',
},
{
name: 'closeThreshold',
docs: [
'If current slot is greater than rolledover_slot + close_threshold and',
"the account is empty it can be closed. No 'close' functionality has been",
'implemented yet.',
],
type: 'u64',
},
{
name: 'additionalBytes',
docs: [
'Placeholder for bytes of additional accounts which are tied to the',
'Merkle trees operation and need to be rolled over as well.',
],
type: 'u64',
},
],
},
},
{
name: 'TokenData',
type: {
kind: 'struct',
fields: [
{
name: 'mint',
docs: ['The mint associated with this account'],
type: 'publicKey',
},
{
name: 'owner',
docs: ['The owner of this account.'],
type: 'publicKey',
},
{
name: 'amount',
docs: ['The amount of tokens this account holds.'],
type: 'u64',
},
{
name: 'delegate',
docs: [
'If `delegate` is `Some` then `delegated_amount` represents',
'the amount authorized by the delegate',
],
type: {
option: 'publicKey',
},
},
{
name: 'state',
docs: ["The account's state"],
type: {
defined: 'AccountState',
},
},
{
name: 'tlv',
docs: [
'Placeholder for TokenExtension tlv data (unimplemented)',
],
type: {
option: 'bytes',
},
},
],
},
},
],
errors: [
{
code: 6000,
name: 'SignerCheckFailed',
msg: 'Signer check failed',
},
{
code: 6001,
name: 'CreateTransferInstructionFailed',
msg: 'Create transfer instruction failed',
},
{
code: 6002,
name: 'AccountNotFound',
msg: 'Account not found',
},
{
code: 6003,
name: 'SerializationError',
msg: 'Serialization error',
},
],
};
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/programs/index.ts
|
export * from './system';
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/programs/system.ts
|
import { Program, AnchorProvider, setProvider, BN } from '@coral-xyz/anchor';
import {
PublicKey,
Keypair,
Connection,
TransactionInstruction,
SystemProgram,
} from '@solana/web3.js';
import { Buffer } from 'buffer';
import {
IDL,
LightSystemProgram as LightSystemProgramIDL,
} from '../idls/light_system_program';
import { useWallet } from '../wallet';
import {
CompressedAccount,
CompressedAccountWithMerkleContext,
CompressedProof,
InstructionDataInvoke,
bn,
createCompressedAccount,
} from '../state';
import { packCompressedAccounts, toAccountMetas } from '../instruction';
import {
defaultStaticAccountsStruct,
defaultTestStateTreeAccounts,
} from '../constants';
import {
validateSameOwner,
validateSufficientBalance,
} from '../utils/validation';
import { packNewAddressParams, NewAddressParams } from '../utils';
export const sumUpLamports = (
accounts: CompressedAccountWithMerkleContext[],
): BN => {
return accounts.reduce(
(acc, account) => acc.add(bn(account.lamports)),
bn(0),
);
};
/**
* Create compressed account system transaction params
*/
type CreateAccountWithSeedParams = {
/**
* The payer of the transaction.
*/
payer: PublicKey;
/**
* Address params for the new compressed account
*/
newAddressParams: NewAddressParams;
newAddress: number[];
/**
* Recent validity proof proving that there's no existing compressed account
* registered with newAccountAddress
*/
recentValidityProof: CompressedProof;
/**
* State tree pubkey. Defaults to a public state tree if unspecified.
*/
outputStateTree?: PublicKey;
/**
* Public key of the program to assign as the owner of the created account
*/
programId?: PublicKey;
/**
* Optional input accounts to transfer lamports from into the new compressed
* account.
*/
inputCompressedAccounts?: CompressedAccountWithMerkleContext[];
/**
* Optional input state root indices of 'inputCompressedAccounts'. The
* expiry is tied to the 'recentValidityProof'.
*/
inputStateRootIndices?: number[];
/**
* Optional lamports to transfer into the new compressed account.
*/
lamports?: number | BN;
};
/**
* Defines the parameters for the transfer method
*/
type TransferParams = {
/**
* The payer of the transaction.
*/
payer: PublicKey;
/**
* The input state to be consumed.
*/
inputCompressedAccounts: CompressedAccountWithMerkleContext[];
/**
* Recipient address
*/
toAddress: PublicKey;
/**
* amount of lamports to transfer.
*/
lamports: number | BN;
/**
* The recent state root indices of the input state. The expiry is tied to
* the proof.
*
* TODO: Add support for passing recent-values after instruction creation.
*/
recentInputStateRootIndices: number[];
/**
* The recent validity proof for state inclusion of the input state. It
* expires after n slots.
*/
recentValidityProof: CompressedProof;
/**
* The state trees that the tx output should be inserted into. This can be a
* single PublicKey or an array of PublicKey. Defaults to the 0th state tree
* of input state.
*/
outputStateTrees?: PublicKey[] | PublicKey;
};
/// TODO:
/// - add option to compress to another owner
/// - add option to merge with input state
/**
* Defines the parameters for the transfer method
*/
type CompressParams = {
/**
* The payer of the transaction.
*/
payer: PublicKey;
/**
* address that the lamports are attached to. also defaults to the recipient owner
*/
toAddress: PublicKey;
/**
* amount of lamports to compress.
*/
lamports: number | BN;
/**
* The state tree that the tx output should be inserted into. Defaults to a
* public state tree if unspecified.
*/
outputStateTree?: PublicKey;
};
/**
* Defines the parameters for the transfer method
*/
type DecompressParams = {
/**
* The payer of the transaction.
*/
payer: PublicKey;
/**
* The input state to be consumed.
*/
inputCompressedAccounts: CompressedAccountWithMerkleContext[];
/**
* Recipient address of uncompressed lamports
*/
toAddress: PublicKey;
/**
* amount of lamports to decompress.
*/
lamports: number | BN;
/**
* The recent state root indices of the input state. The expiry is tied to
* the proof.
*
* TODO: Add support for passing recent-values after instruction creation.
*/
recentInputStateRootIndices: number[];
/**
* The recent validity proof for state inclusion of the input state. It
* expires after n slots.
*/
recentValidityProof: CompressedProof;
/**
* The state trees that the tx output should be inserted into. This can be a
* single PublicKey or an array of PublicKey. Defaults to the 0th state tree
* of input state.
*/
outputStateTree?: PublicKey;
};
const SOL_POOL_PDA_SEED = Buffer.from('sol_pool_pda');
export class LightSystemProgram {
/**
* @internal
*/
constructor() {}
/**
* Public key that identifies the CompressedPda program
*/
static programId: PublicKey = new PublicKey(
// TODO: can add check to ensure its consistent with the idl
'SySTEM1eSU2p4BGQfQpimFEWWSC1XDFeun3Nqzz3rT7',
);
private static _program: Program<LightSystemProgramIDL> | null = null;
static get program(): Program<LightSystemProgramIDL> {
if (!this._program) {
this.initializeProgram();
}
return this._program!;
}
/**
* @internal
* Cwct1kQLwJm8Z3HetLu8m4SXkhD6FZ5fXbJQCxTxPnGY
*
*/
static deriveCompressedSolPda(): PublicKey {
const seeds = [SOL_POOL_PDA_SEED];
const [address, _] = PublicKey.findProgramAddressSync(
seeds,
this.programId,
);
return address;
}
/**
* Initializes the program statically if not already initialized.
*/
private static initializeProgram() {
if (!this._program) {
const mockKeypair = Keypair.generate();
const mockConnection = new Connection(
'http://127.0.0.1:8899',
'confirmed',
);
const mockProvider = new AnchorProvider(
mockConnection,
useWallet(mockKeypair),
{
commitment: 'confirmed',
preflightCommitment: 'confirmed',
},
);
setProvider(mockProvider);
this._program = new Program(IDL, this.programId, mockProvider);
}
}
static createTransferOutputState(
inputCompressedAccounts: CompressedAccountWithMerkleContext[],
toAddress: PublicKey,
lamports: number | BN,
): CompressedAccount[] {
lamports = bn(lamports);
const inputLamports = sumUpLamports(inputCompressedAccounts);
const changeLamports = inputLamports.sub(lamports);
validateSufficientBalance(changeLamports);
if (changeLamports.eq(bn(0))) {
return [createCompressedAccount(toAddress, lamports)];
}
validateSameOwner(inputCompressedAccounts);
const outputCompressedAccounts: CompressedAccount[] = [
createCompressedAccount(
inputCompressedAccounts[0].owner,
changeLamports,
),
createCompressedAccount(toAddress, lamports),
];
return outputCompressedAccounts;
}
static createDecompressOutputState(
inputCompressedAccounts: CompressedAccountWithMerkleContext[],
lamports: number | BN,
): CompressedAccount[] {
lamports = bn(lamports);
const inputLamports = sumUpLamports(inputCompressedAccounts);
const changeLamports = inputLamports.sub(lamports);
validateSufficientBalance(changeLamports);
/// lamports gets decompressed
if (changeLamports.eq(bn(0))) {
return [];
}
validateSameOwner(inputCompressedAccounts);
const outputCompressedAccounts: CompressedAccount[] = [
createCompressedAccount(
inputCompressedAccounts[0].owner,
changeLamports,
),
];
return outputCompressedAccounts;
}
/**
* No data by default
*/
static createNewAddressOutputState(
address: number[],
owner: PublicKey,
lamports?: BN | number,
inputCompressedAccounts?: CompressedAccountWithMerkleContext[],
): CompressedAccount[] {
lamports = bn(lamports ?? 0);
const inputLamports = sumUpLamports(inputCompressedAccounts ?? []);
const changeLamports = inputLamports.sub(lamports);
validateSufficientBalance(changeLamports);
if (changeLamports.eq(bn(0)) || !inputCompressedAccounts) {
return [
createCompressedAccount(owner, lamports, undefined, address),
];
}
validateSameOwner(inputCompressedAccounts);
const outputCompressedAccounts: CompressedAccount[] = [
createCompressedAccount(
inputCompressedAccounts[0].owner,
changeLamports,
),
createCompressedAccount(owner, lamports, undefined, address),
];
return outputCompressedAccounts;
}
/**
* Creates instruction to create compressed account with PDA.
* Cannot write data.
*
* TODO: support transfer of lamports to the new account.
*/
static async createAccount({
payer,
newAddressParams,
newAddress,
recentValidityProof,
outputStateTree,
inputCompressedAccounts,
inputStateRootIndices,
lamports,
}: CreateAccountWithSeedParams): Promise<TransactionInstruction> {
const outputCompressedAccounts = this.createNewAddressOutputState(
newAddress,
payer,
lamports,
inputCompressedAccounts,
);
/// Pack accounts
const {
packedInputCompressedAccounts,
packedOutputCompressedAccounts,
remainingAccounts: _remainingAccounts,
} = packCompressedAccounts(
inputCompressedAccounts ?? [],
inputStateRootIndices ?? [],
outputCompressedAccounts,
outputStateTree,
);
const { newAddressParamsPacked, remainingAccounts } =
packNewAddressParams([newAddressParams], _remainingAccounts);
const rawData: InstructionDataInvoke = {
proof: recentValidityProof,
inputCompressedAccountsWithMerkleContext:
packedInputCompressedAccounts,
outputCompressedAccounts: packedOutputCompressedAccounts,
relayFee: null,
newAddressParams: newAddressParamsPacked,
compressOrDecompressLamports: null,
isCompress: false,
};
/// Encode instruction data
const ixData = this.program.coder.types.encode(
'InstructionDataInvoke',
rawData,
);
/// Build anchor instruction
const instruction = await this.program.methods
.invoke(ixData)
.accounts({
...defaultStaticAccountsStruct(),
feePayer: payer,
authority: payer,
solPoolPda: null,
decompressionRecipient: null,
systemProgram: SystemProgram.programId,
})
.remainingAccounts(toAccountMetas(remainingAccounts))
.instruction();
return instruction;
}
/**
* Creates a transaction instruction that transfers compressed lamports from
* one owner to another.
*/
static async transfer({
payer,
inputCompressedAccounts,
toAddress,
lamports,
recentInputStateRootIndices,
recentValidityProof,
outputStateTrees,
}: TransferParams): Promise<TransactionInstruction> {
/// Create output state
const outputCompressedAccounts = this.createTransferOutputState(
inputCompressedAccounts,
toAddress,
lamports,
);
/// Pack accounts
const {
packedInputCompressedAccounts,
packedOutputCompressedAccounts,
remainingAccounts,
} = packCompressedAccounts(
inputCompressedAccounts,
recentInputStateRootIndices,
outputCompressedAccounts,
outputStateTrees,
);
/// Encode instruction data
const data = this.program.coder.types.encode('InstructionDataInvoke', {
proof: recentValidityProof,
inputCompressedAccountsWithMerkleContext:
packedInputCompressedAccounts,
outputCompressedAccounts: packedOutputCompressedAccounts,
relayFee: null,
/// TODO: here and on-chain: option<newAddressInputs> or similar.
newAddressParams: [],
compressOrDecompressLamports: null,
isCompress: false,
});
/// Build anchor instruction
const instruction = await this.program.methods
.invoke(data)
.accounts({
...defaultStaticAccountsStruct(),
feePayer: payer,
authority: payer,
solPoolPda: null,
decompressionRecipient: null,
systemProgram: SystemProgram.programId,
})
.remainingAccounts(toAccountMetas(remainingAccounts))
.instruction();
return instruction;
}
/**
* Creates a transaction instruction that transfers compressed lamports from
* one owner to another.
*/
// TODO: add support for non-fee-payer owner
static async compress({
payer,
toAddress,
lamports,
outputStateTree,
}: CompressParams): Promise<TransactionInstruction> {
/// Create output state
lamports = bn(lamports);
const outputCompressedAccount = createCompressedAccount(
toAddress,
lamports,
);
/// Pack accounts
const {
packedInputCompressedAccounts,
packedOutputCompressedAccounts,
remainingAccounts,
} = packCompressedAccounts(
[],
[],
[outputCompressedAccount],
outputStateTree,
);
/// Encode instruction data
const rawInputs: InstructionDataInvoke = {
proof: null,
inputCompressedAccountsWithMerkleContext:
packedInputCompressedAccounts,
outputCompressedAccounts: packedOutputCompressedAccounts,
relayFee: null,
/// TODO: here and on-chain: option<newAddressInputs> or similar.
newAddressParams: [],
compressOrDecompressLamports: lamports,
isCompress: true,
};
const data = this.program.coder.types.encode(
'InstructionDataInvoke',
rawInputs,
);
/// Build anchor instruction
const instruction = await this.program.methods
.invoke(data)
.accounts({
...defaultStaticAccountsStruct(),
feePayer: payer,
authority: payer,
solPoolPda: this.deriveCompressedSolPda(),
decompressionRecipient: null,
systemProgram: SystemProgram.programId,
})
.remainingAccounts(toAccountMetas(remainingAccounts))
.instruction();
return instruction;
}
/**
* Creates a transaction instruction that transfers compressed lamports from
* one owner to another.
*/
static async decompress({
payer,
inputCompressedAccounts,
toAddress,
lamports,
recentInputStateRootIndices,
recentValidityProof,
outputStateTree,
}: DecompressParams): Promise<TransactionInstruction> {
/// Create output state
lamports = bn(lamports);
const outputCompressedAccounts = this.createDecompressOutputState(
inputCompressedAccounts,
lamports,
);
/// Pack accounts
const {
packedInputCompressedAccounts,
packedOutputCompressedAccounts,
remainingAccounts,
} = packCompressedAccounts(
inputCompressedAccounts,
recentInputStateRootIndices,
outputCompressedAccounts,
outputStateTree,
);
/// Encode instruction data
const data = this.program.coder.types.encode('InstructionDataInvoke', {
proof: recentValidityProof,
inputCompressedAccountsWithMerkleContext:
packedInputCompressedAccounts,
outputCompressedAccounts: packedOutputCompressedAccounts,
relayFee: null,
/// TODO: here and on-chain: option<newAddressInputs> or similar.
newAddressParams: [],
compressOrDecompressLamports: lamports,
isCompress: false,
});
/// Build anchor instruction
const instruction = await this.program.methods
.invoke(data)
.accounts({
...defaultStaticAccountsStruct(),
feePayer: payer,
authority: payer,
solPoolPda: this.deriveCompressedSolPda(),
decompressionRecipient: toAddress,
systemProgram: SystemProgram.programId,
})
.remainingAccounts(toAccountMetas(remainingAccounts))
.instruction();
return instruction;
}
}
/**
* Selects the minimal number of compressed SOL accounts for a transfer.
*
* 1. Sorts the accounts by amount in descending order
* 2. Accumulates the amount until it is greater than or equal to the transfer
* amount
*/
export function selectMinCompressedSolAccountsForTransfer(
accounts: CompressedAccountWithMerkleContext[],
transferLamports: BN | number,
): [selectedAccounts: CompressedAccountWithMerkleContext[], total: BN] {
let accumulatedLamports = bn(0);
transferLamports = bn(transferLamports);
const selectedAccounts: CompressedAccountWithMerkleContext[] = [];
accounts.sort((a, b) => b.lamports.cmp(a.lamports));
for (const account of accounts) {
if (accumulatedLamports.gte(bn(transferLamports))) break;
accumulatedLamports = accumulatedLamports.add(account.lamports);
selectedAccounts.push(account);
}
if (accumulatedLamports.lt(bn(transferLamports))) {
throw new Error(
`Not enough balance for transfer. Required: ${transferLamports.toString()}, available: ${accumulatedLamports.toString()}`,
);
}
return [selectedAccounts, accumulatedLamports];
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/actions/create-account.ts
|
import {
ComputeBudgetProgram,
ConfirmOptions,
PublicKey,
Signer,
TransactionSignature,
} from '@solana/web3.js';
import {
LightSystemProgram,
selectMinCompressedSolAccountsForTransfer,
} from '../programs';
import { Rpc } from '../rpc';
import {
NewAddressParams,
buildAndSignTx,
deriveAddress,
deriveAddressSeed,
sendAndConfirmTx,
} from '../utils';
import { defaultTestStateTreeAccounts } from '../constants';
import { bn } from '../state';
import { BN } from '@coral-xyz/anchor';
/**
* Create compressed account with address
*
* @param rpc RPC to use
* @param payer Payer of the transaction and initialization fees
* @param seeds Seeds to derive the new account address
* @param programId Owner of the new account
* @param addressTree Optional address tree. Defaults to a current shared
* address tree.
* @param addressQueue Optional address queue. Defaults to a current shared
* address queue.
* @param outputStateTree Optional output state tree. Defaults to a current
* shared state tree.
* @param confirmOptions Options for confirming the transaction
*
* @return Transaction signature
*/
export async function createAccount(
rpc: Rpc,
payer: Signer,
seeds: Uint8Array[],
programId: PublicKey,
addressTree?: PublicKey,
addressQueue?: PublicKey,
outputStateTree?: PublicKey,
confirmOptions?: ConfirmOptions,
): Promise<TransactionSignature> {
const { blockhash } = await rpc.getLatestBlockhash();
addressTree = addressTree ?? defaultTestStateTreeAccounts().addressTree;
addressQueue = addressQueue ?? defaultTestStateTreeAccounts().addressQueue;
const seed = deriveAddressSeed(seeds, programId);
const address = deriveAddress(seed, addressTree);
const proof = await rpc.getValidityProofV0(undefined, [
{
address: bn(address.toBytes()),
tree: addressTree,
queue: addressQueue,
},
]);
const params: NewAddressParams = {
seed: seed,
addressMerkleTreeRootIndex: proof.rootIndices[0],
addressMerkleTreePubkey: proof.merkleTrees[0],
addressQueuePubkey: proof.nullifierQueues[0],
};
const ix = await LightSystemProgram.createAccount({
payer: payer.publicKey,
newAddressParams: params,
newAddress: Array.from(address.toBytes()),
recentValidityProof: proof.compressedProof,
programId,
outputStateTree,
});
const tx = buildAndSignTx(
[ComputeBudgetProgram.setComputeUnitLimit({ units: 1_000_000 }), ix],
payer,
blockhash,
[],
);
const txId = await sendAndConfirmTx(rpc, tx, confirmOptions);
return txId;
}
/**
* Create compressed account with address and lamports
*
* @param rpc RPC to use
* @param payer Payer of the transaction and initialization fees
* @param seeds Seeds to derive the new account address
* @param lamports Number of compressed lamports to initialize the
* account with
* @param programId Owner of the new account
* @param addressTree Optional address tree. Defaults to a current shared
* address tree.
* @param addressQueue Optional address queue. Defaults to a current shared
* address queue.
* @param outputStateTree Optional output state tree. Defaults to a current
* shared state tree.
* @param confirmOptions Options for confirming the transaction
*
* @return Transaction signature
*/
// TODO: add support for payer != user owner
export async function createAccountWithLamports(
rpc: Rpc,
payer: Signer,
seeds: Uint8Array[],
lamports: number | BN,
programId: PublicKey,
addressTree?: PublicKey,
addressQueue?: PublicKey,
outputStateTree?: PublicKey,
confirmOptions?: ConfirmOptions,
): Promise<TransactionSignature> {
lamports = bn(lamports);
const compressedAccounts = await rpc.getCompressedAccountsByOwner(
payer.publicKey,
);
const [inputAccounts] = selectMinCompressedSolAccountsForTransfer(
compressedAccounts.items,
lamports,
);
const { blockhash } = await rpc.getLatestBlockhash();
addressTree = addressTree ?? defaultTestStateTreeAccounts().addressTree;
addressQueue = addressQueue ?? defaultTestStateTreeAccounts().addressQueue;
const seed = deriveAddressSeed(seeds, programId);
const address = deriveAddress(seed, addressTree);
const proof = await rpc.getValidityProof(
inputAccounts.map(account => bn(account.hash)),
[bn(address.toBytes())],
);
/// TODO(crank): Adapt before supporting addresses in rpc / cranked address trees.
/// Currently expects address roots to be consistent with one another and
/// static. See test-rpc.ts for more details.
const params: NewAddressParams = {
seed: seed,
addressMerkleTreeRootIndex:
proof.rootIndices[proof.rootIndices.length - 1],
addressMerkleTreePubkey:
proof.merkleTrees[proof.merkleTrees.length - 1],
addressQueuePubkey:
proof.nullifierQueues[proof.nullifierQueues.length - 1],
};
const ix = await LightSystemProgram.createAccount({
payer: payer.publicKey,
newAddressParams: params,
newAddress: Array.from(address.toBytes()),
recentValidityProof: proof.compressedProof,
inputCompressedAccounts: inputAccounts,
inputStateRootIndices: proof.rootIndices,
programId,
outputStateTree,
});
const tx = buildAndSignTx(
[ComputeBudgetProgram.setComputeUnitLimit({ units: 1_000_000 }), ix],
payer,
blockhash,
[],
);
const txId = await sendAndConfirmTx(rpc, tx, confirmOptions);
return txId;
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/actions/compress.ts
|
import {
ComputeBudgetProgram,
ConfirmOptions,
PublicKey,
Signer,
TransactionSignature,
} from '@solana/web3.js';
import { LightSystemProgram } from '../programs';
import { Rpc } from '../rpc';
import { buildAndSignTx, sendAndConfirmTx } from '../utils';
import { BN } from '@coral-xyz/anchor';
import { defaultTestStateTreeAccounts } from '../constants';
/**
* Compress lamports to a solana address
*
* @param rpc RPC to use
* @param payer Payer of the transaction and initialization fees
* @param lamports Amount of lamports to compress
* @param toAddress Address of the recipient compressed account
* @param outputStateTree Optional output state tree. Defaults to a current shared state tree.
* @param confirmOptions Options for confirming the transaction
*
* @return Transaction signature
*/
/// TODO: add multisig support
/// TODO: add support for payer != owner
export async function compress(
rpc: Rpc,
payer: Signer,
lamports: number | BN,
toAddress: PublicKey,
outputStateTree?: PublicKey,
confirmOptions?: ConfirmOptions,
): Promise<TransactionSignature> {
const { blockhash } = await rpc.getLatestBlockhash();
const ix = await LightSystemProgram.compress({
payer: payer.publicKey,
toAddress,
lamports,
outputStateTree,
});
const tx = buildAndSignTx(
[ComputeBudgetProgram.setComputeUnitLimit({ units: 1_000_000 }), ix],
payer,
blockhash,
[],
);
const txId = await sendAndConfirmTx(rpc, tx, confirmOptions);
return txId;
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/actions/common.ts
|
import { Signer } from '@solana/web3.js';
/** @internal remove signer from signers if part of signers */
export function dedupeSigner(signer: Signer, signers: Signer[]): Signer[] {
if (signers.includes(signer)) {
return signers.filter(
s => s.publicKey.toString() !== signer.publicKey.toString(),
);
}
return signers;
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/actions/transfer.ts
|
import {
ComputeBudgetProgram,
ConfirmOptions,
PublicKey,
Signer,
TransactionSignature,
} from '@solana/web3.js';
import { BN } from '@coral-xyz/anchor';
import {
LightSystemProgram,
selectMinCompressedSolAccountsForTransfer,
} from '../programs';
import { Rpc } from '../rpc';
import { bn, CompressedAccountWithMerkleContext } from '../state';
import { buildAndSignTx, sendAndConfirmTx } from '../utils';
import { GetCompressedAccountsByOwnerConfig } from '../rpc-interface';
/**
* Transfer compressed lamports from one owner to another
*
* @param rpc Rpc to use
* @param payer Payer of transaction fees
* @param lamports Number of lamports to transfer
* @param owner Owner of the compressed lamports
* @param toAddress Destination address of the recipient
* @param merkleTree State tree account that the compressed lamports should be
* inserted into. Defaults to the default state tree account.
* @param confirmOptions Options for confirming the transaction
* @param config Configuration for fetching compressed accounts
*
*
* @return Signature of the confirmed transaction
*/
export async function transfer(
rpc: Rpc,
payer: Signer,
lamports: number | BN,
owner: Signer,
toAddress: PublicKey,
/// TODO: allow multiple
merkleTree?: PublicKey,
confirmOptions?: ConfirmOptions,
): Promise<TransactionSignature> {
let accumulatedLamports = bn(0);
const compressedAccounts: CompressedAccountWithMerkleContext[] = [];
let cursor: string | undefined;
const batchSize = 1000; // Maximum allowed by the API
lamports = bn(lamports);
while (accumulatedLamports.lt(lamports)) {
const batchConfig: GetCompressedAccountsByOwnerConfig = {
filters: undefined,
dataSlice: undefined,
cursor,
limit: new BN(batchSize),
};
const batch = await rpc.getCompressedAccountsByOwner(
owner.publicKey,
batchConfig,
);
for (const account of batch.items) {
if (account.lamports.gt(new BN(0))) {
compressedAccounts.push(account);
accumulatedLamports = accumulatedLamports.add(account.lamports);
}
}
cursor = batch.cursor ?? undefined;
if (batch.items.length < batchSize || accumulatedLamports.gte(lamports))
break;
}
if (accumulatedLamports.lt(lamports)) {
throw new Error(
`Not enough balance for transfer. Required: ${lamports.toString()}, available: ${accumulatedLamports.toString()}`,
);
}
const [inputAccounts] = selectMinCompressedSolAccountsForTransfer(
compressedAccounts,
lamports,
);
const proof = await rpc.getValidityProof(
inputAccounts.map(account => bn(account.hash)),
);
const ix = await LightSystemProgram.transfer({
payer: payer.publicKey,
inputCompressedAccounts: inputAccounts,
toAddress,
lamports,
recentInputStateRootIndices: proof.rootIndices,
recentValidityProof: proof.compressedProof,
outputStateTrees: merkleTree,
});
const { blockhash } = await rpc.getLatestBlockhash();
const signedTx = buildAndSignTx(
[ComputeBudgetProgram.setComputeUnitLimit({ units: 1_000_000 }), ix],
payer,
blockhash,
);
const txId = await sendAndConfirmTx(rpc, signedTx, confirmOptions);
return txId;
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/actions/index.ts
|
export * from './compress';
export * from './create-account';
export * from './decompress';
export * from './common';
export * from './transfer';
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/actions/decompress.ts
|
import {
ComputeBudgetProgram,
ConfirmOptions,
PublicKey,
Signer,
TransactionSignature,
} from '@solana/web3.js';
import { LightSystemProgram, sumUpLamports } from '../programs';
import { Rpc } from '../rpc';
import { buildAndSignTx, sendAndConfirmTx } from '../utils';
import { BN } from '@coral-xyz/anchor';
import { CompressedAccountWithMerkleContext, bn } from '../state';
/**
* Decompress lamports into a solana account
*
* @param rpc RPC to use
* @param payer Payer of the transaction and initialization fees
* @param lamports Amount of lamports to compress
* @param toAddress Address of the recipient compressed account
* @param outputStateTree Optional output state tree. Defaults to a current shared state tree.
* @param confirmOptions Options for confirming the transaction
*
* @return Transaction signature
*/
/// TODO: add multisig support
/// TODO: add support for payer != owner
export async function decompress(
rpc: Rpc,
payer: Signer,
lamports: number | BN,
recipient: PublicKey,
outputStateTree?: PublicKey,
confirmOptions?: ConfirmOptions,
): Promise<TransactionSignature> {
/// TODO: use dynamic state tree and nullifier queue
const userCompressedAccountsWithMerkleContext: CompressedAccountWithMerkleContext[] =
(await rpc.getCompressedAccountsByOwner(payer.publicKey)).items;
lamports = bn(lamports);
const inputLamports = sumUpLamports(
userCompressedAccountsWithMerkleContext,
);
if (lamports.gt(inputLamports)) {
throw new Error(
`Not enough compressed lamports. Expected ${lamports}, got ${inputLamports}`,
);
}
const proof = await rpc.getValidityProof(
userCompressedAccountsWithMerkleContext.map(x => bn(x.hash)),
);
const { blockhash } = await rpc.getLatestBlockhash();
const ix = await LightSystemProgram.decompress({
payer: payer.publicKey,
toAddress: recipient,
outputStateTree: outputStateTree,
inputCompressedAccounts: userCompressedAccountsWithMerkleContext,
recentValidityProof: proof.compressedProof,
recentInputStateRootIndices: proof.rootIndices,
lamports,
});
const tx = buildAndSignTx(
[ComputeBudgetProgram.setComputeUnitLimit({ units: 1_000_000 }), ix],
payer,
blockhash,
[],
);
const txId = await sendAndConfirmTx(rpc, tx, confirmOptions);
return txId;
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/wallet/interface.ts
|
/// TODO: extract wallet into its own npm package
import {
Commitment,
Connection,
Keypair,
VersionedTransaction,
sendAndConfirmTransaction,
} from '@solana/web3.js';
import { PublicKey, Transaction } from '@solana/web3.js';
import nacl from 'tweetnacl';
const { sign } = nacl;
export type InclusionProofPublicInputs = {
root: string;
leaf: string;
};
export type InclusionProofPrivateInputs = {
merkleProof: string[];
leaf: string;
leafIndex: string;
};
/// On the system level, we're proving simple inclusion proofs in a
/// state tree, for each utxo used as input into a transaction.
export type InclusionProofInputs = (InclusionProofPublicInputs &
InclusionProofPrivateInputs)[];
/// Mock Solana web3 library
export class Wallet {
_publicKey: PublicKey;
_keypair: Keypair;
_connection: Connection;
_url: string;
_commitment: Commitment;
constructor(keypair: Keypair, url: string, commitment: Commitment) {
this._publicKey = keypair.publicKey;
this._keypair = keypair;
this._connection = new Connection(url);
this._url = url;
this._commitment = commitment;
}
signTransaction = async (tx: any): Promise<any> => {
await tx.sign([this._keypair!]);
return tx;
};
sendTransaction = async (
transaction: VersionedTransaction,
): Promise<string> => {
const signature = await this._connection.sendTransaction(transaction);
return signature;
};
signAllTransactions = async <T extends Transaction | VersionedTransaction>(
transactions: T[],
): Promise<T[]> => {
const signedTxs = await Promise.all(
transactions.map(async tx => {
return await this.signTransaction(tx);
}),
);
return signedTxs;
};
signMessage = async (message: Uint8Array): Promise<Uint8Array> => {
return sign.detached(message, this._keypair.secretKey);
};
sendAndConfirmTransaction = async (
transaction: Transaction,
signers = [],
): Promise<any> => {
const response = await sendAndConfirmTransaction(
this._connection,
transaction,
[this._keypair, ...signers],
{
commitment: this._commitment,
},
);
return response;
};
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/wallet/index.ts
|
export * from './use-wallet';
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/wallet/use-wallet.ts
|
import { Keypair, Commitment } from '@solana/web3.js';
import { Wallet } from './interface';
// TODO consider adding isNodeWallet
export const useWallet = (
keypair: Keypair,
url: string = 'http://127.0.0.1:8899',
commitment: Commitment = 'confirmed',
) => {
url = url !== 'mock' ? url : 'http://127.0.0.1:8899';
const wallet = new Wallet(keypair, url, commitment);
return {
publicKey: wallet._publicKey,
sendAndConfirmTransaction: wallet.sendAndConfirmTransaction,
signMessage: wallet.signMessage,
signTransaction: wallet.signTransaction,
signAllTransactions: wallet.signAllTransactions,
sendTransaction: wallet.sendTransaction,
};
};
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/instruction/index.ts
|
export * from './pack-compressed-accounts';
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/instruction/pack-compressed-accounts.ts
|
import { AccountMeta, PublicKey } from '@solana/web3.js';
import {
CompressedAccount,
OutputCompressedAccountWithPackedContext,
PackedCompressedAccountWithMerkleContext,
} from '../state';
import { CompressedAccountWithMerkleContext } from '../state/compressed-account';
import { toArray } from '../utils/conversion';
import { defaultTestStateTreeAccounts } from '../constants';
/**
* @internal Finds the index of a PublicKey in an array, or adds it if not
* present
* */
export function getIndexOrAdd(
accountsArray: PublicKey[],
key: PublicKey,
): number {
const index = accountsArray.findIndex(existingKey =>
existingKey.equals(key),
);
if (index === -1) {
accountsArray.push(key);
return accountsArray.length - 1;
}
return index;
}
/** @internal */
export function padOutputStateMerkleTrees(
outputStateMerkleTrees: PublicKey[] | PublicKey | undefined,
numberOfOutputCompressedAccounts: number,
inputCompressedAccountsWithMerkleContext: CompressedAccountWithMerkleContext[],
): PublicKey[] {
if (numberOfOutputCompressedAccounts <= 0) {
return [];
}
/// Default: use the 0th state tree of input state for all output accounts
if (outputStateMerkleTrees === undefined) {
if (inputCompressedAccountsWithMerkleContext.length === 0) {
return new Array(numberOfOutputCompressedAccounts).fill(
defaultTestStateTreeAccounts().merkleTree,
);
}
return new Array(numberOfOutputCompressedAccounts).fill(
inputCompressedAccountsWithMerkleContext[0].merkleTree,
);
/// Align the number of output state trees with the number of output
/// accounts, and fill up with 0th output state tree
} else {
/// Into array
const treesArray = toArray(outputStateMerkleTrees);
if (treesArray.length >= numberOfOutputCompressedAccounts) {
return treesArray.slice(0, numberOfOutputCompressedAccounts);
} else {
return treesArray.concat(
new Array(
numberOfOutputCompressedAccounts - treesArray.length,
).fill(treesArray[0]),
);
}
}
}
export function toAccountMetas(remainingAccounts: PublicKey[]): AccountMeta[] {
return remainingAccounts.map(
(account): AccountMeta => ({
pubkey: account,
isWritable: true,
isSigner: false,
}),
);
}
// TODO: include owner and lamports in packing.
/**
* Packs Compressed Accounts.
*
* Replaces PublicKey with index pointer to remaining accounts.
*
* @param inputCompressedAccounts Ix input state to be consumed
* @param inputStateRootIndices The recent state root indices of the
* input state. The expiry is tied to
* the proof.
* @param outputCompressedAccounts Ix output state to be created
* @param outputStateMerkleTrees Optional output state trees to be
* inserted into the output state.
* Defaults to the 0th state tree of
* the input state. Gets padded to the
* length of outputCompressedAccounts.
*
* @param remainingAccounts Optional existing array of accounts
* to append to.
**/
export function packCompressedAccounts(
inputCompressedAccounts: CompressedAccountWithMerkleContext[],
inputStateRootIndices: number[],
outputCompressedAccounts: CompressedAccount[],
outputStateMerkleTrees?: PublicKey[] | PublicKey,
remainingAccounts: PublicKey[] = [],
): {
packedInputCompressedAccounts: PackedCompressedAccountWithMerkleContext[];
packedOutputCompressedAccounts: OutputCompressedAccountWithPackedContext[];
remainingAccounts: PublicKey[];
} {
const _remainingAccounts = remainingAccounts.slice();
const packedInputCompressedAccounts: PackedCompressedAccountWithMerkleContext[] =
[];
const packedOutputCompressedAccounts: OutputCompressedAccountWithPackedContext[] =
[];
/// input
inputCompressedAccounts.forEach((account, index) => {
const merkleTreePubkeyIndex = getIndexOrAdd(
_remainingAccounts,
account.merkleTree,
);
const nullifierQueuePubkeyIndex = getIndexOrAdd(
_remainingAccounts,
account.nullifierQueue,
);
packedInputCompressedAccounts.push({
compressedAccount: {
owner: account.owner,
lamports: account.lamports,
address: account.address,
data: account.data,
},
merkleContext: {
merkleTreePubkeyIndex,
nullifierQueuePubkeyIndex,
leafIndex: account.leafIndex,
queueIndex: null,
},
rootIndex: inputStateRootIndices[index],
readOnly: false,
});
});
/// output
const paddedOutputStateMerkleTrees = padOutputStateMerkleTrees(
outputStateMerkleTrees,
outputCompressedAccounts.length,
inputCompressedAccounts,
);
outputCompressedAccounts.forEach((account, index) => {
const merkleTreePubkeyIndex = getIndexOrAdd(
_remainingAccounts,
paddedOutputStateMerkleTrees[index],
);
packedOutputCompressedAccounts.push({
compressedAccount: {
owner: account.owner,
lamports: account.lamports,
address: account.address,
data: account.data,
},
merkleTreeIndex: merkleTreePubkeyIndex,
});
});
return {
packedInputCompressedAccounts,
packedOutputCompressedAccounts,
remainingAccounts: _remainingAccounts,
};
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/test-helpers/index.ts
|
export * from './merkle-tree';
export * from './test-rpc';
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/test-helpers
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/test-helpers/merkle-tree/merkle-tree.ts
|
import { LightWasm } from '../test-rpc/test-rpc';
export const DEFAULT_ZERO = '0';
/**
* @callback hashFunction
* @param left Left leaf
* @param right Right leaf
*/
/**
* Merkle tree
*/
export class MerkleTree {
/**
* Constructor
* @param {number} levels Number of levels in the tree
* @param {Array} [elements] Initial elements
* @param {Object} options
* @param {hashFunction} [options.hashFunction] Function used to hash 2 leaves
* @param [options.zeroElement] Value for non-existent leaves
*/
levels: number;
capacity: number;
zeroElement;
_zeros: string[];
_layers: string[][];
_lightWasm: LightWasm;
constructor(
levels: number,
lightWasm: LightWasm,
elements: string[] = [],
{ zeroElement = DEFAULT_ZERO } = {},
) {
this.levels = levels;
this.capacity = 2 ** levels;
this.zeroElement = zeroElement;
this._lightWasm = lightWasm;
if (elements.length > this.capacity) {
throw new Error('Tree is full');
}
this._zeros = [];
this._layers = [];
this._layers[0] = elements;
this._zeros[0] = this.zeroElement;
for (let i = 1; i <= levels; i++) {
this._zeros[i] = this._lightWasm.poseidonHashString([
this._zeros[i - 1],
this._zeros[i - 1],
]);
}
this._rebuild();
}
_rebuild() {
for (let level = 1; level <= this.levels; level++) {
this._layers[level] = [];
for (
let i = 0;
i < Math.ceil(this._layers[level - 1].length / 2);
i++
) {
this._layers[level][i] = this._lightWasm.poseidonHashString([
this._layers[level - 1][i * 2],
i * 2 + 1 < this._layers[level - 1].length
? this._layers[level - 1][i * 2 + 1]
: this._zeros[level - 1],
]);
}
}
}
/**
* Get tree root
* @returns {*}
*/
root() {
return this._layers[this.levels].length > 0
? this._layers[this.levels][0]
: this._zeros[this.levels];
}
/**
* Insert new element into the tree
* @param element Element to insert
*/
insert(element: string) {
if (this._layers[0].length >= this.capacity) {
throw new Error('Tree is full');
}
this.update(this._layers[0].length, element);
}
/**
* Insert multiple elements into the tree. Tree will be fully rebuilt during this operation.
* @param {Array} elements Elements to insert
*/
bulkInsert(elements: string[]) {
if (this._layers[0].length + elements.length > this.capacity) {
throw new Error('Tree is full');
}
this._layers[0].push(...elements);
this._rebuild();
}
// TODO: update does not work debug
/**
* Change an element in the tree
* @param {number} index Index of element to change
* @param element Updated element value
*/
update(index: number, element: string) {
// index 0 and 1 and element is the commitment hash
if (
isNaN(Number(index)) ||
index < 0 ||
index > this._layers[0].length ||
index >= this.capacity
) {
throw new Error('Insert index out of bounds: ' + index);
}
this._layers[0][index] = element;
for (let level = 1; level <= this.levels; level++) {
index >>= 1;
this._layers[level][index] = this._lightWasm.poseidonHashString([
this._layers[level - 1][index * 2],
index * 2 + 1 < this._layers[level - 1].length
? this._layers[level - 1][index * 2 + 1]
: this._zeros[level - 1],
]);
}
}
/**
* Get merkle path to a leaf
* @param {number} index Leaf index to generate path for
* @returns {{pathElements: number[], pathIndex: number[]}} An object containing adjacent elements and left-right index
*/
path(index: number) {
if (
isNaN(Number(index)) ||
index < 0 ||
index >= this._layers[0].length
) {
throw new Error('Index out of bounds: ' + index);
}
const pathElements: string[] = [];
const pathIndices: number[] = [];
for (let level = 0; level < this.levels; level++) {
pathIndices[level] = index % 2;
pathElements[level] =
(index ^ 1) < this._layers[level].length
? this._layers[level][index ^ 1]
: this._zeros[level];
index >>= 1;
}
return {
pathElements,
pathIndices,
};
}
/**
* Find an element in the tree
* @param element An element to find
* @param comparator A function that checks leaf value equality
* @returns {number} Index if element is found, otherwise -1
*/
indexOf(
element: string,
comparator: ((element: string, el: string) => boolean) | null = null,
) {
if (comparator) {
return this._layers[0].findIndex((el: string) =>
comparator(element, el),
);
} else {
return this._layers[0].indexOf(element);
}
}
/**
* Returns a copy of non-zero tree elements
* @returns {Object[]}
*/
elements() {
return this._layers[0].slice();
}
/**
* Serialize entire tree state including intermediate layers into a plain object
* Deserializing it back will not require to recompute any hashes
* Elements are not converted to a plain type, this is responsibility of the caller
*/
serialize() {
return {
levels: this.levels,
_zeros: this._zeros,
_layers: this._layers,
};
}
/**
* Deserialize data into a MerkleTree instance
* Make sure to provide the same hashFunction as was used in the source tree,
* otherwise the tree state will be invalid
*
* @param data
* @param hashFunction
* @returns {MerkleTree}
*/
static deserialize(
data: any,
hashFunction: (left: string, right: string) => string,
) {
const instance = Object.assign(Object.create(this.prototype), data);
instance._hash = hashFunction;
instance.capacity = 2 ** instance.levels;
instance.zeroElement = instance._zeros[0];
return instance;
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/test-helpers
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/test-helpers/merkle-tree/indexed-array.ts
|
import { LightWasm } from '../test-rpc/test-rpc';
import { BN } from '@coral-xyz/anchor';
import { bn } from '../../state';
import { HIGHEST_ADDRESS_PLUS_ONE } from '../../constants';
export class IndexedElement {
public index: number;
public value: BN;
public nextIndex: number;
constructor(index: number, value: BN, nextIndex: number) {
this.index = index;
this.value = value;
this.nextIndex = nextIndex;
}
public equals(other: IndexedElement): boolean {
return this.value.eq(other.value);
}
public compareTo(other: IndexedElement): number {
return this.value.cmp(other.value);
}
public hash(lightWasm: LightWasm, nextValue: BN): Uint8Array {
try {
const hash = lightWasm.poseidonHash([
bn(this.value.toArray('be', 32)).toString(),
bn(this.nextIndex).toString(),
bn(nextValue.toArray('be', 32)).toString(),
]);
return hash;
} catch (error) {
throw new Error('Hashing failed');
}
}
}
export class IndexedElementBundle {
public newLowElement: IndexedElement;
public newElement: IndexedElement;
public newElementNextValue: BN;
constructor(
newLowElement: IndexedElement,
newElement: IndexedElement,
newElementNextValue: BN,
) {
this.newLowElement = newLowElement;
this.newElement = newElement;
this.newElementNextValue = newElementNextValue;
}
}
/**
* This indexed array implementation mirrors the rust implementation of the
* indexed merkle tree. It stores the elements of the indexed merkle tree.
*/
export class IndexedArray {
public elements: Array<IndexedElement>;
public currentNodeIndex: number;
public highestElementIndex: number;
constructor(
elements: Array<IndexedElement>,
currentNodeIndex: number,
highestElementIndex: number,
) {
this.elements = elements;
this.currentNodeIndex = currentNodeIndex;
this.highestElementIndex = highestElementIndex;
}
public static default(): IndexedArray {
return new IndexedArray([new IndexedElement(0, bn(0), 0)], 0, 0);
}
public get(index: number): IndexedElement | undefined {
return this.elements[index];
}
public length(): number {
return Number(this.currentNodeIndex);
}
public isEmpty(): boolean {
return this.currentNodeIndex === 0;
}
public findElement(value: BN): IndexedElement | undefined {
return this.elements
.slice(0, this.length() + 1)
.find(node => node.value === value);
}
public init(): IndexedElementBundle {
try {
const init_value = HIGHEST_ADDRESS_PLUS_ONE;
return this.append(init_value);
} catch (error) {
throw new Error(`Failed to initialize IndexedArray: ${error}`);
}
}
/**
* Finds the index of the low element for the given `value` which should not be part of the array.
* Low element is the greatest element which still has a lower value than the provided one.
* Low elements are used in non-membership proofs.
*/
public findLowElementIndex(value: BN): number | undefined {
// Try to find element whose next element is higher than the provided value.
for (let i = 0; i <= this.length(); i++) {
const node = this.elements[i];
if (
this.elements[node.nextIndex].value.gt(value) &&
node.value.lt(value)
) {
return i;
} else if (node.value.eq(value)) {
throw new Error('Element already exists in the array');
}
}
// If no such element was found, it means that our value is going to be the greatest in the array.
// This means that the currently greatest element is going to be the low element of our value.
return this.highestElementIndex;
}
/**
* Returns the low element for the given value and the next value for that low element.
* Low element is the greatest element which still has lower value than the provided one.
* Low elements are used in non-membership proofs.
*/
public findLowElement(
value: BN,
): [IndexedElement | undefined, BN | undefined] {
const lowElementIndex = this.findLowElementIndex(value);
if (lowElementIndex === undefined) return [undefined, undefined];
const lowElement = this.elements[lowElementIndex];
return [lowElement, this.elements[lowElement.nextIndex].value];
}
// /**
// * Returns the index of the low element for the given `value`, which should be the part of the array.
// * Low element is the greatest element which still has lower value than the provided one.
// * Low elements are used in non-membership proofs.
// */
// public findLowElementIndexForExistingElement(
// value: BN,
// ): number | undefined {
// for (let i = 0; i <= this.length(); i++) {
// const node = this.elements[i];
// if (this.elements[node.nextIndex].value === value) {
// return i;
// }
// }
// return undefined;
// }
/**
* Returns the hash of the given element. That hash consists of:
* - The value of the given element.
* - The `nextIndex` of the given element.
* - The value of the element pointed by `nextIndex`.
*/
public hashElement(
lightWasm: LightWasm,
index: number,
): Uint8Array | undefined {
const element = this.elements[index];
if (!element) return undefined;
const nextElement = this.elements[element.nextIndex];
if (!nextElement) return undefined;
const hash = lightWasm.poseidonHash([
bn(element.value.toArray('be', 32)).toString(),
bn(element.nextIndex).toString(),
bn(nextElement.value.toArray('be', 32)).toString(),
]);
return hash;
}
/**
* Appends a new element with the given value to the indexed array.
* It finds the low element index and uses it to append the new element correctly.
* @param value The value of the new element to append.
* @returns The new element and its low element after insertion.
*/
public append(value: BN): IndexedElementBundle {
const lowElementIndex = this.findLowElementIndex(value);
if (lowElementIndex === undefined) {
throw new Error('Low element index not found.');
}
return this.appendWithLowElementIndex(lowElementIndex, value);
}
/**
* Appends a new element with the given value to the indexed array using a specific low element index.
* This method ensures the new element is placed correctly relative to the low element.
* @param lowElementIndex The index of the low element.
* @param value The value of the new element to append.
* @returns The new element and its updated low element.
*/
public appendWithLowElementIndex(
lowElementIndex: number,
value: BN,
): IndexedElementBundle {
const lowElement = this.elements[lowElementIndex];
if (lowElement.nextIndex === 0) {
if (value.lte(lowElement.value)) {
throw new Error(
'New element value must be greater than the low element value.',
);
}
} else {
const nextElement = this.elements[lowElement.nextIndex];
if (value.lte(lowElement.value)) {
throw new Error(
'New element value must be greater than the low element value.',
);
}
if (value.gte(nextElement.value)) {
throw new Error(
'New element value must be less than the next element value.',
);
}
}
const newElementBundle = this.newElementWithLowElementIndex(
lowElementIndex,
value,
);
// If the old low element wasn't pointing to any element, it means that:
//
// * It used to be the highest element.
// * Our new element, which we are appending, is going the be the
// highest element.
//
// Therefore, we need to save the new element index as the highest
// index.
if (lowElement.nextIndex === 0) {
this.highestElementIndex = newElementBundle.newElement.index;
}
// Insert new node.
this.currentNodeIndex = newElementBundle.newElement.index;
this.elements[this.length()] = newElementBundle.newElement;
// Update low element.
this.elements[lowElementIndex] = newElementBundle.newLowElement;
return newElementBundle;
}
/**
* Finds the lowest element in the array.
* @returns The lowest element or undefined if the array is empty.
*/
public lowest(): IndexedElement | undefined {
return this.elements.length > 0 ? this.elements[0] : undefined;
}
/**
* Creates a new element with the specified value and updates the low element index accordingly.
* @param lowElementIndex The index of the low element.
* @param value The value for the new element.
* @returns A bundle containing the new element, the updated low element, and the value of the next element.
*/
public newElementWithLowElementIndex(
lowElementIndex: number,
value: BN,
): IndexedElementBundle {
const newLowElement = this.elements[lowElementIndex];
const newElementIndex = this.currentNodeIndex + 1;
const newElement = new IndexedElement(
newElementIndex,
value,
newLowElement.nextIndex,
);
newLowElement.nextIndex = newElementIndex;
const newElementNextValue = this.elements[newElement.nextIndex].value;
return new IndexedElementBundle(
newLowElement,
newElement,
newElementNextValue,
);
}
/**
* Creates a new element with the specified value by first finding the appropriate low element index.
* @param value The value for the new element.
* @returns A bundle containing the new element, the updated low element, and the value of the next element.
*/
public newElement(value: BN): IndexedElementBundle {
const lowElementIndex = this.findLowElementIndex(value);
if (lowElementIndex === undefined) {
throw new Error('Low element index not found.');
}
return this.newElementWithLowElementIndex(lowElementIndex, value);
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/test-helpers
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/test-helpers/merkle-tree/index.ts
|
export * from './indexed-array';
export * from './merkle-tree';
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/test-helpers
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/test-helpers/test-rpc/get-compressed-accounts.ts
|
import { PublicKey } from '@solana/web3.js';
import { BN } from '@coral-xyz/anchor';
import { getParsedEvents } from './get-parsed-events';
import { defaultTestStateTreeAccounts } from '../../constants';
import { Rpc } from '../../rpc';
import {
CompressedAccountWithMerkleContext,
bn,
MerkleContext,
createCompressedAccountWithMerkleContext,
} from '../../state';
export async function getCompressedAccountsByOwnerTest(
rpc: Rpc,
owner: PublicKey,
) {
const unspentAccounts = await getCompressedAccountsForTest(rpc);
const byOwner = unspentAccounts.filter(acc => acc.owner.equals(owner));
return byOwner;
}
export async function getCompressedAccountByHashTest(
rpc: Rpc,
hash: BN,
): Promise<CompressedAccountWithMerkleContext | undefined> {
const unspentAccounts = await getCompressedAccountsForTest(rpc);
return unspentAccounts.find(acc => bn(acc.hash).eq(hash));
}
export async function getMultipleCompressedAccountsByHashTest(
rpc: Rpc,
hashes: BN[],
): Promise<CompressedAccountWithMerkleContext[]> {
const unspentAccounts = await getCompressedAccountsForTest(rpc);
return unspentAccounts
.filter(acc => hashes.some(hash => bn(acc.hash).eq(hash)))
.sort((a, b) => b.leafIndex - a.leafIndex);
}
/// Returns all unspent compressed accounts
async function getCompressedAccountsForTest(rpc: Rpc) {
const events = (await getParsedEvents(rpc)).reverse();
const allOutputAccounts: CompressedAccountWithMerkleContext[] = [];
const allInputAccountHashes: BN[] = [];
for (const event of events) {
for (
let index = 0;
index < event.outputCompressedAccounts.length;
index++
) {
const account = event.outputCompressedAccounts[index];
const merkleContext: MerkleContext = {
merkleTree: defaultTestStateTreeAccounts().merkleTree,
nullifierQueue: defaultTestStateTreeAccounts().nullifierQueue,
hash: event.outputCompressedAccountHashes[index],
leafIndex: event.outputLeafIndices[index],
};
const withCtx: CompressedAccountWithMerkleContext =
createCompressedAccountWithMerkleContext(
merkleContext,
account.compressedAccount.owner,
account.compressedAccount.lamports,
account.compressedAccount.data ?? undefined,
account.compressedAccount.address ?? undefined,
);
allOutputAccounts.push(withCtx);
}
for (
let index = 0;
index < event.inputCompressedAccountHashes.length;
index++
) {
const hash = event.inputCompressedAccountHashes[index];
allInputAccountHashes.push(bn(hash));
}
}
const unspentAccounts = allOutputAccounts.filter(
account =>
!allInputAccountHashes.some(hash => hash.eq(bn(account.hash))),
);
unspentAccounts.sort((a, b) => b.leafIndex - a.leafIndex);
return unspentAccounts;
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/test-helpers
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/test-helpers/test-rpc/get-compressed-token-accounts.ts
|
import { PublicKey } from '@solana/web3.js';
import { getParsedEvents } from './get-parsed-events';
import { BN, BorshCoder } from '@coral-xyz/anchor';
import { IDL } from '../../idls/light_compressed_token';
import { defaultTestStateTreeAccounts } from '../../constants';
import { Rpc } from '../../rpc';
import { ParsedTokenAccount, WithCursor } from '../../rpc-interface';
import {
CompressedAccount,
PublicTransactionEvent,
MerkleContext,
createCompressedAccountWithMerkleContext,
bn,
} from '../../state';
const tokenProgramId: PublicKey = new PublicKey(
// TODO: can add check to ensure its consistent with the idl
'cTokenmWW8bLPjZEBAUgYy3zKxQZW6VKi7bqNFEVv3m',
);
type TokenData = {
mint: PublicKey;
owner: PublicKey;
amount: BN;
delegate: PublicKey | null;
state: number;
tlv: Buffer | null;
};
export type EventWithParsedTokenTlvData = {
inputCompressedAccountHashes: number[][];
outputCompressedAccounts: ParsedTokenAccount[];
};
/**
* Manually parse the compressed token layout for a given compressed account.
* @param compressedAccount - The compressed account
* @returns The parsed token data
*/
export function parseTokenLayoutWithIdl(
compressedAccount: CompressedAccount,
programId: PublicKey = tokenProgramId,
): TokenData | null {
if (compressedAccount.data === null) return null;
const { data } = compressedAccount.data;
if (data.length === 0) return null;
if (compressedAccount.owner.toBase58() !== programId.toBase58()) {
throw new Error(
`Invalid owner ${compressedAccount.owner.toBase58()} for token layout`,
);
}
const decodedLayout = new BorshCoder(IDL).types.decode(
'TokenData',
Buffer.from(data),
);
return decodedLayout;
}
/**
* parse compressed accounts of an event with token layout
* @internal
* TODO: refactor
*/
async function parseEventWithTokenTlvData(
event: PublicTransactionEvent,
): Promise<EventWithParsedTokenTlvData> {
const pubkeyArray = event.pubkeyArray;
const outputHashes = event.outputCompressedAccountHashes;
const outputCompressedAccountsWithParsedTokenData: ParsedTokenAccount[] =
event.outputCompressedAccounts.map((compressedAccount, i) => {
const merkleContext: MerkleContext = {
merkleTree:
pubkeyArray[
event.outputCompressedAccounts[i].merkleTreeIndex
],
nullifierQueue:
// FIXME: fix make dynamic
defaultTestStateTreeAccounts().nullifierQueue,
hash: outputHashes[i],
leafIndex: event.outputLeafIndices[i],
};
if (!compressedAccount.compressedAccount.data)
throw new Error('No data');
const parsedData = parseTokenLayoutWithIdl(
compressedAccount.compressedAccount,
);
if (!parsedData) throw new Error('Invalid token data');
const withMerkleContext = createCompressedAccountWithMerkleContext(
merkleContext,
compressedAccount.compressedAccount.owner,
compressedAccount.compressedAccount.lamports,
compressedAccount.compressedAccount.data,
compressedAccount.compressedAccount.address ?? undefined,
);
return {
compressedAccount: withMerkleContext,
parsed: parsedData,
};
});
return {
inputCompressedAccountHashes: event.inputCompressedAccountHashes,
outputCompressedAccounts: outputCompressedAccountsWithParsedTokenData,
};
}
/**
* Retrieves all compressed token accounts for a given mint and owner.
*
* Note: This function is intended for testing purposes only. For production, use rpc.getCompressedTokenAccounts.
*
* @param events Public transaction events
* @param owner PublicKey of the token owner
* @param mint PublicKey of the token mint
*/
export async function getCompressedTokenAccounts(
events: PublicTransactionEvent[],
): Promise<ParsedTokenAccount[]> {
const eventsWithParsedTokenTlvData: EventWithParsedTokenTlvData[] =
await Promise.all(
events.map(event => parseEventWithTokenTlvData(event)),
);
/// strip spent compressed accounts if an output compressed account of tx n is
/// an input compressed account of tx n+m, it is spent
const allOutCompressedAccounts = eventsWithParsedTokenTlvData.flatMap(
event => event.outputCompressedAccounts,
);
const allInCompressedAccountHashes = eventsWithParsedTokenTlvData.flatMap(
event => event.inputCompressedAccountHashes,
);
const unspentCompressedAccounts = allOutCompressedAccounts.filter(
outputCompressedAccount =>
!allInCompressedAccountHashes.some(hash => {
return (
JSON.stringify(hash) ===
JSON.stringify(
outputCompressedAccount.compressedAccount.hash,
)
);
}),
);
return unspentCompressedAccounts;
}
/** @internal */
export async function getCompressedTokenAccountsByOwnerTest(
rpc: Rpc,
owner: PublicKey,
mint: PublicKey,
): Promise<WithCursor<ParsedTokenAccount[]>> {
const events = await getParsedEvents(rpc);
const compressedTokenAccounts = await getCompressedTokenAccounts(events);
const accounts = compressedTokenAccounts.filter(
acc => acc.parsed.owner.equals(owner) && acc.parsed.mint.equals(mint),
);
return {
items: accounts.sort(
(a, b) =>
b.compressedAccount.leafIndex - a.compressedAccount.leafIndex,
),
cursor: null,
};
}
export async function getCompressedTokenAccountsByDelegateTest(
rpc: Rpc,
delegate: PublicKey,
mint: PublicKey,
): Promise<WithCursor<ParsedTokenAccount[]>> {
const events = await getParsedEvents(rpc);
const compressedTokenAccounts = await getCompressedTokenAccounts(events);
return {
items: compressedTokenAccounts.filter(
acc =>
acc.parsed.delegate?.equals(delegate) &&
acc.parsed.mint.equals(mint),
),
cursor: null,
};
}
export async function getCompressedTokenAccountByHashTest(
rpc: Rpc,
hash: BN,
): Promise<ParsedTokenAccount> {
const events = await getParsedEvents(rpc);
const compressedTokenAccounts = await getCompressedTokenAccounts(events);
const filtered = compressedTokenAccounts.filter(acc =>
bn(acc.compressedAccount.hash).eq(hash),
);
if (filtered.length === 0) {
throw new Error('No compressed account found');
}
return filtered[0];
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/test-helpers
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/test-helpers/test-rpc/test-rpc.ts
|
import { Connection, ConnectionConfig, PublicKey } from '@solana/web3.js';
import { BN } from '@coral-xyz/anchor';
import {
getCompressedAccountByHashTest,
getCompressedAccountsByOwnerTest,
getMultipleCompressedAccountsByHashTest,
} from './get-compressed-accounts';
import {
getCompressedTokenAccountByHashTest,
getCompressedTokenAccountsByDelegateTest,
getCompressedTokenAccountsByOwnerTest,
} from './get-compressed-token-accounts';
import { MerkleTree } from '../merkle-tree/merkle-tree';
import { getParsedEvents } from './get-parsed-events';
import { defaultTestStateTreeAccounts } from '../../constants';
import {
AddressWithTree,
CompressedMintTokenHolders,
CompressedTransaction,
GetCompressedAccountsByOwnerConfig,
PaginatedOptions,
HashWithTree,
LatestNonVotingSignatures,
LatestNonVotingSignaturesPaginated,
SignatureWithMetadata,
WithContext,
WithCursor,
} from '../../rpc-interface';
import {
CompressedProofWithContext,
CompressionApiInterface,
GetCompressedTokenAccountsByOwnerOrDelegateOptions,
ParsedTokenAccount,
TokenBalance,
} from '../../rpc-interface';
import {
BN254,
CompressedAccountWithMerkleContext,
MerkleContextWithMerkleProof,
PublicTransactionEvent,
bn,
} from '../../state';
import { IndexedArray } from '../merkle-tree';
import {
MerkleContextWithNewAddressProof,
convertMerkleProofsWithContextToHex,
convertNonInclusionMerkleProofInputsToHex,
proverRequest,
} from '../../rpc';
export interface TestRpcConfig {
/**
* Address of the state tree to index. Default: public default test state
* tree.
*/
merkleTreeAddress?: PublicKey;
/**
* Nullifier queue associated with merkleTreeAddress
*/
nullifierQueueAddress?: PublicKey;
/**
* Depth of state tree. Defaults to the public default test state tree depth
*/
depth?: number;
/**
* Log proof generation time
*/
log?: boolean;
/**
* Address of the address tree to index. Default: public default test
* address tree.
*/
addressTreeAddress?: PublicKey;
/**
* Address queue associated with addressTreeAddress
*/
addressQueueAddress?: PublicKey;
}
export interface LightWasm {
blakeHash(input: string | Uint8Array, hashLength: number): Uint8Array;
poseidonHash(input: string[] | BN[]): Uint8Array;
poseidonHashString(input: string[] | BN[]): string;
poseidonHashBN(input: string[] | BN[]): BN;
}
/**
* Returns a mock RPC instance for use in unit tests.
*
* @param lightWasm Wasm hasher instance.
* @param endpoint RPC endpoint URL. Defaults to
* 'http://127.0.0.1:8899'.
* @param proverEndpoint Prover server endpoint URL. Defaults to
* 'http://localhost:3001'.
* @param merkleTreeAddress Address of the merkle tree to index. Defaults
* to the public default test state tree.
* @param nullifierQueueAddress Optional address of the associated nullifier
* queue.
* @param depth Depth of the merkle tree.
* @param log Log proof generation time.
*/
export async function getTestRpc(
lightWasm: LightWasm,
endpoint: string = 'http://127.0.0.1:8899',
compressionApiEndpoint: string = 'http://127.0.0.1:8784',
proverEndpoint: string = 'http://127.0.0.1:3001',
merkleTreeAddress?: PublicKey,
nullifierQueueAddress?: PublicKey,
depth?: number,
log = false,
) {
const defaultAccounts = defaultTestStateTreeAccounts();
return new TestRpc(
endpoint,
lightWasm,
compressionApiEndpoint,
proverEndpoint,
undefined,
{
merkleTreeAddress: merkleTreeAddress || defaultAccounts.merkleTree,
nullifierQueueAddress:
nullifierQueueAddress || defaultAccounts.nullifierQueue,
depth: depth || defaultAccounts.merkleTreeHeight,
log,
},
);
}
/**
* Simple mock rpc for unit tests that simulates the compression rpc interface.
* Fetches, parses events and builds merkletree on-demand, i.e. it does not persist state.
* Constraints:
* - Can only index 1 merkletree
* - Can only index up to 1000 transactions
*
* For advanced testing use photon: https://github.com/helius-labs/photon
*/
export class TestRpc extends Connection implements CompressionApiInterface {
compressionApiEndpoint: string;
proverEndpoint: string;
merkleTreeAddress: PublicKey;
nullifierQueueAddress: PublicKey;
addressTreeAddress: PublicKey;
addressQueueAddress: PublicKey;
lightWasm: LightWasm;
depth: number;
log = false;
/**
* Establish a Compression-compatible JSON RPC mock-connection
*
* @param endpoint endpoint to the solana cluster (use for
* localnet only)
* @param hasher light wasm hasher instance
* @param compressionApiEndpoint Endpoint to the compression server.
* @param proverEndpoint Endpoint to the prover server. defaults
* to endpoint
* @param connectionConfig Optional connection config
* @param testRpcConfig Config for the mock rpc
*/
constructor(
endpoint: string,
hasher: LightWasm,
compressionApiEndpoint: string,
proverEndpoint: string,
connectionConfig?: ConnectionConfig,
testRpcConfig?: TestRpcConfig,
) {
super(endpoint, connectionConfig || 'confirmed');
this.compressionApiEndpoint = compressionApiEndpoint;
this.proverEndpoint = proverEndpoint;
const {
merkleTreeAddress,
nullifierQueueAddress,
depth,
log,
addressTreeAddress,
addressQueueAddress,
} = testRpcConfig ?? {};
const {
merkleTree,
nullifierQueue,
merkleTreeHeight,
addressQueue,
addressTree,
} = defaultTestStateTreeAccounts();
this.lightWasm = hasher;
this.merkleTreeAddress = merkleTreeAddress ?? merkleTree;
this.nullifierQueueAddress = nullifierQueueAddress ?? nullifierQueue;
this.addressTreeAddress = addressTreeAddress ?? addressTree;
this.addressQueueAddress = addressQueueAddress ?? addressQueue;
this.depth = depth ?? merkleTreeHeight;
this.log = log ?? false;
}
/**
* Fetch the compressed account for the specified account hash
*/
async getCompressedAccount(
address?: BN254,
hash?: BN254,
): Promise<CompressedAccountWithMerkleContext | null> {
if (address) {
throw new Error('address is not supported in test-rpc');
}
if (!hash) {
throw new Error('hash is required');
}
const account = await getCompressedAccountByHashTest(this, hash);
return account ?? null;
}
/**
* Fetch the compressed balance for the specified account hash
*/
async getCompressedBalance(address?: BN254, hash?: BN254): Promise<BN> {
if (address) {
throw new Error('address is not supported in test-rpc');
}
if (!hash) {
throw new Error('hash is required');
}
const account = await getCompressedAccountByHashTest(this, hash);
if (!account) {
throw new Error('Account not found');
}
return bn(account.lamports);
}
/**
* Fetch the total compressed balance for the specified owner public key
*/
async getCompressedBalanceByOwner(owner: PublicKey): Promise<BN> {
const accounts = await this.getCompressedAccountsByOwner(owner);
return accounts.items.reduce(
(acc, account) => acc.add(account.lamports),
bn(0),
);
}
/**
* Fetch the latest merkle proof for the specified account hash from the
* cluster
*/
async getCompressedAccountProof(
hash: BN254,
): Promise<MerkleContextWithMerkleProof> {
const proofs = await this.getMultipleCompressedAccountProofs([hash]);
return proofs[0];
}
/**
* Fetch all the account info for multiple compressed accounts specified by
* an array of account hashes
*/
async getMultipleCompressedAccounts(
hashes: BN254[],
): Promise<CompressedAccountWithMerkleContext[]> {
return await getMultipleCompressedAccountsByHashTest(this, hashes);
}
/**
* Ensure that the Compression Indexer has already indexed the transaction
*/
async confirmTransactionIndexed(_slot: number): Promise<boolean> {
return true;
}
/**
* Fetch the latest merkle proofs for multiple compressed accounts specified
* by an array account hashes
*/
async getMultipleCompressedAccountProofs(
hashes: BN254[],
): Promise<MerkleContextWithMerkleProof[]> {
/// Build tree
const events: PublicTransactionEvent[] = await getParsedEvents(
this,
).then(events => events.reverse());
const allLeaves: number[][] = [];
const allLeafIndices: number[] = [];
for (const event of events) {
for (
let index = 0;
index < event.outputCompressedAccounts.length;
index++
) {
const hash = event.outputCompressedAccountHashes[index];
allLeaves.push(hash);
allLeafIndices.push(event.outputLeafIndices[index]);
}
}
const tree = new MerkleTree(
this.depth,
this.lightWasm,
allLeaves.map(leaf => bn(leaf).toString()),
);
/// create merkle proofs and assemble return type
const merkleProofs: MerkleContextWithMerkleProof[] = [];
for (let i = 0; i < hashes.length; i++) {
const leafIndex = tree.indexOf(hashes[i].toString());
const pathElements = tree.path(leafIndex).pathElements;
const bnPathElements = pathElements.map(value => bn(value));
const root = bn(tree.root());
const merkleProof: MerkleContextWithMerkleProof = {
hash: hashes[i].toArray('be', 32),
merkleTree: this.merkleTreeAddress,
leafIndex: leafIndex,
merkleProof: bnPathElements,
nullifierQueue: this.nullifierQueueAddress,
rootIndex: allLeaves.length,
root: root,
};
merkleProofs.push(merkleProof);
}
/// Validate
merkleProofs.forEach((proof, index) => {
const leafIndex = proof.leafIndex;
const computedHash = tree.elements()[leafIndex];
const hashArr = bn(computedHash).toArray('be', 32);
if (!hashArr.every((val, index) => val === proof.hash[index])) {
throw new Error(
`Mismatch at index ${index}: expected ${proof.hash.toString()}, got ${hashArr.toString()}`,
);
}
});
return merkleProofs;
}
/**
* Fetch all the compressed accounts owned by the specified public key.
* Owner can be a program or user account
*/
async getCompressedAccountsByOwner(
owner: PublicKey,
_config?: GetCompressedAccountsByOwnerConfig,
): Promise<WithCursor<CompressedAccountWithMerkleContext[]>> {
// TODO(swen): revisit
// if (_config) {
// throw new Error(
// 'dataSlice or filters are not supported in test-rpc. Please use rpc.ts instead.',
// );
// }
const accounts = await getCompressedAccountsByOwnerTest(this, owner);
return {
items: accounts,
cursor: null,
};
}
/**
* Fetch the latest compression signatures on the cluster. Results are
* paginated.
*/
async getLatestCompressionSignatures(
_cursor?: string,
_limit?: number,
): Promise<LatestNonVotingSignaturesPaginated> {
throw new Error(
'getLatestNonVotingSignaturesWithContext not supported in test-rpc',
);
}
/**
* Fetch the latest non-voting signatures on the cluster. Results are
* not paginated.
*/
async getLatestNonVotingSignatures(
_limit?: number,
): Promise<LatestNonVotingSignatures> {
throw new Error(
'getLatestNonVotingSignaturesWithContext not supported in test-rpc',
);
}
/**
* Fetch all the compressed token accounts owned by the specified public
* key. Owner can be a program or user account
*/
async getCompressedTokenAccountsByOwner(
owner: PublicKey,
options: GetCompressedTokenAccountsByOwnerOrDelegateOptions,
): Promise<WithCursor<ParsedTokenAccount[]>> {
return await getCompressedTokenAccountsByOwnerTest(
this,
owner,
options!.mint!,
);
}
/**
* Fetch all the compressed accounts delegated to the specified public key.
*/
async getCompressedTokenAccountsByDelegate(
delegate: PublicKey,
options: GetCompressedTokenAccountsByOwnerOrDelegateOptions,
): Promise<WithCursor<ParsedTokenAccount[]>> {
return await getCompressedTokenAccountsByDelegateTest(
this,
delegate,
options.mint!,
);
}
/**
* Fetch the compressed token balance for the specified account hash
*/
async getCompressedTokenAccountBalance(
hash: BN254,
): Promise<{ amount: BN }> {
const account = await getCompressedTokenAccountByHashTest(this, hash);
return { amount: bn(account.parsed.amount) };
}
/**
* @deprecated use {@link getCompressedTokenBalancesByOwnerV2}.
* Fetch all the compressed token balances owned by the specified public
* key. Can filter by mint.
*/
async getCompressedTokenBalancesByOwner(
publicKey: PublicKey,
options: GetCompressedTokenAccountsByOwnerOrDelegateOptions,
): Promise<WithCursor<{ balance: BN; mint: PublicKey }[]>> {
const accounts = await getCompressedTokenAccountsByOwnerTest(
this,
publicKey,
options.mint!,
);
return {
items: accounts.items.map(account => ({
balance: bn(account.parsed.amount),
mint: account.parsed.mint,
})),
cursor: null,
};
}
/**
* Fetch all the compressed token balances owned by the specified public
* key. Can filter by mint. Uses context.
*/
async getCompressedTokenBalancesByOwnerV2(
publicKey: PublicKey,
options: GetCompressedTokenAccountsByOwnerOrDelegateOptions,
): Promise<WithContext<WithCursor<TokenBalance[]>>> {
const accounts = await getCompressedTokenAccountsByOwnerTest(
this,
publicKey,
options.mint!,
);
return {
context: { slot: 1 },
value: {
items: accounts.items.map(account => ({
balance: bn(account.parsed.amount),
mint: account.parsed.mint,
})),
cursor: null,
},
};
}
/**
* Returns confirmed signatures for transactions involving the specified
* account hash forward in time from genesis to the most recent confirmed
* block
*
* @param hash queried account hash
*/
async getCompressionSignaturesForAccount(
_hash: BN254,
): Promise<SignatureWithMetadata[]> {
throw new Error(
'getCompressionSignaturesForAccount not implemented in test-rpc',
);
}
/**
* Fetch a confirmed or finalized transaction from the cluster. Return with
* CompressionInfo
*/
async getTransactionWithCompressionInfo(
_signature: string,
): Promise<CompressedTransaction | null> {
throw new Error('getCompressedTransaction not implemented in test-rpc');
}
/**
* Returns confirmed signatures for transactions involving the specified
* address forward in time from genesis to the most recent confirmed
* block
*
* @param address queried compressed account address
*/
async getCompressionSignaturesForAddress(
_address: PublicKey,
_options?: PaginatedOptions,
): Promise<WithCursor<SignatureWithMetadata[]>> {
throw new Error('getSignaturesForAddress3 not implemented');
}
/**
* Returns confirmed signatures for compression transactions involving the
* specified account owner forward in time from genesis to the
* most recent confirmed block
*
* @param owner queried owner public key
*/
async getCompressionSignaturesForOwner(
_owner: PublicKey,
_options?: PaginatedOptions,
): Promise<WithCursor<SignatureWithMetadata[]>> {
throw new Error('getSignaturesForOwner not implemented');
}
/**
* Returns confirmed signatures for compression transactions involving the
* specified token account owner forward in time from genesis to the most
* recent confirmed block
*/
async getCompressionSignaturesForTokenOwner(
_owner: PublicKey,
_options?: PaginatedOptions,
): Promise<WithCursor<SignatureWithMetadata[]>> {
throw new Error('getSignaturesForTokenOwner not implemented');
}
/**
* Fetch the current indexer health status
*/
async getIndexerHealth(): Promise<string> {
return 'ok';
}
/**
* Fetch the current slot that the node is processing
*/
async getIndexerSlot(): Promise<number> {
return 1;
}
/**
* Fetch the latest address proofs for new unique addresses specified by an
* array of addresses.
*
* the proof states that said address have not yet been created in respective address tree.
* @param addresses Array of BN254 new addresses
* @returns Array of validity proofs for new addresses
*/
async getMultipleNewAddressProofs(addresses: BN254[]) {
/// Build tree
const indexedArray = IndexedArray.default();
const allAddresses: BN[] = [];
indexedArray.init();
const hashes: BN[] = [];
// TODO(crank): add support for cranked address tree in 'allAddresses'.
// The Merkle tree root doesnt actually advance beyond init() unless we
// start emptying the address queue.
for (let i = 0; i < allAddresses.length; i++) {
indexedArray.append(bn(allAddresses[i]));
}
for (let i = 0; i < indexedArray.elements.length; i++) {
const hash = indexedArray.hashElement(this.lightWasm, i);
hashes.push(bn(hash!));
}
const tree = new MerkleTree(
this.depth,
this.lightWasm,
hashes.map(hash => bn(hash).toString()),
);
/// Creates proof for each address
const newAddressProofs: MerkleContextWithNewAddressProof[] = [];
for (let i = 0; i < addresses.length; i++) {
const [lowElement] = indexedArray.findLowElement(addresses[i]);
if (!lowElement) throw new Error('Address not found');
const leafIndex = lowElement.index;
const pathElements: string[] = tree.path(leafIndex).pathElements;
const bnPathElements = pathElements.map(value => bn(value));
const higherRangeValue = indexedArray.get(
lowElement.nextIndex,
)!.value;
const root = bn(tree.root());
const proof: MerkleContextWithNewAddressProof = {
root,
rootIndex: 3,
value: addresses[i],
leafLowerRangeValue: lowElement.value,
leafHigherRangeValue: higherRangeValue,
nextIndex: bn(lowElement.nextIndex),
merkleProofHashedIndexedElementLeaf: bnPathElements,
indexHashedIndexedElementLeaf: bn(lowElement.index),
merkleTree: this.addressTreeAddress,
nullifierQueue: this.addressQueueAddress,
};
newAddressProofs.push(proof);
}
return newAddressProofs;
}
async getCompressedMintTokenHolders(
_mint: PublicKey,
_options?: PaginatedOptions,
): Promise<WithContext<WithCursor<CompressedMintTokenHolders[]>>> {
throw new Error(
'getCompressedMintTokenHolders not implemented in test-rpc',
);
}
/**
* Advanced usage of getValidityProof: fetches ZKP directly from a custom
* non-rpcprover. Note: This uses the proverEndpoint specified in the
* constructor. For normal usage, please use {@link getValidityProof}
* instead.
*
* Note: Use RPC class for forested trees. TestRpc is only for custom
* testing purposes.
*/
async getValidityProofDirect(
hashes: BN254[] = [],
newAddresses: BN254[] = [],
): Promise<CompressedProofWithContext> {
return this.getValidityProof(hashes, newAddresses);
}
/**
* @deprecated This method is not available for TestRpc. Please use
* {@link getValidityProof} instead.
*/
async getValidityProofAndRpcContext(
hashes: HashWithTree[] = [],
newAddresses: AddressWithTree[] = [],
): Promise<WithContext<CompressedProofWithContext>> {
if (newAddresses.some(address => !(address instanceof BN))) {
throw new Error('AddressWithTree is not supported in test-rpc');
}
return {
value: await this.getValidityProofV0(hashes, newAddresses),
context: { slot: 1 },
};
}
/**
* Fetch the latest validity proof for (1) compressed accounts specified by
* an array of account hashes. (2) new unique addresses specified by an
* array of addresses.
*
* Validity proofs prove the presence of compressed accounts in state trees
* and the non-existence of addresses in address trees, respectively. They
* enable verification without recomputing the merkle proof path, thus
* lowering verification and data costs.
*
* @param hashes Array of BN254 hashes.
* @param newAddresses Array of BN254 new addresses.
* @returns validity proof with context
*/
async getValidityProof(
hashes: BN254[] = [],
newAddresses: BN254[] = [],
): Promise<CompressedProofWithContext> {
if (newAddresses.some(address => !(address instanceof BN))) {
throw new Error('AddressWithTree is not supported in test-rpc');
}
let validityProof: CompressedProofWithContext;
if (hashes.length === 0 && newAddresses.length === 0) {
throw new Error(
'Empty input. Provide hashes and/or new addresses.',
);
} else if (hashes.length > 0 && newAddresses.length === 0) {
/// inclusion
const merkleProofsWithContext =
await this.getMultipleCompressedAccountProofs(hashes);
const inputs = convertMerkleProofsWithContextToHex(
merkleProofsWithContext,
);
const compressedProof = await proverRequest(
this.proverEndpoint,
'inclusion',
inputs,
this.log,
);
validityProof = {
compressedProof,
roots: merkleProofsWithContext.map(proof => proof.root),
rootIndices: merkleProofsWithContext.map(
proof => proof.rootIndex,
),
leafIndices: merkleProofsWithContext.map(
proof => proof.leafIndex,
),
leaves: merkleProofsWithContext.map(proof => bn(proof.hash)),
merkleTrees: merkleProofsWithContext.map(
proof => proof.merkleTree,
),
nullifierQueues: merkleProofsWithContext.map(
proof => proof.nullifierQueue,
),
};
} else if (hashes.length === 0 && newAddresses.length > 0) {
/// new-address
const newAddressProofs: MerkleContextWithNewAddressProof[] =
await this.getMultipleNewAddressProofs(newAddresses);
const inputs =
convertNonInclusionMerkleProofInputsToHex(newAddressProofs);
const compressedProof = await proverRequest(
this.proverEndpoint,
'new-address',
inputs,
this.log,
);
validityProof = {
compressedProof,
roots: newAddressProofs.map(proof => proof.root),
// TODO(crank): make dynamic to enable forester support in
// test-rpc.ts. Currently this is a static root because the
// address tree doesn't advance.
rootIndices: newAddressProofs.map(_ => 3),
leafIndices: newAddressProofs.map(
proof => proof.indexHashedIndexedElementLeaf.toNumber(), // TODO: support >32bit
),
leaves: newAddressProofs.map(proof => bn(proof.value)),
merkleTrees: newAddressProofs.map(proof => proof.merkleTree),
nullifierQueues: newAddressProofs.map(
proof => proof.nullifierQueue,
),
};
} else if (hashes.length > 0 && newAddresses.length > 0) {
/// combined
const merkleProofsWithContext =
await this.getMultipleCompressedAccountProofs(hashes);
const inputs = convertMerkleProofsWithContextToHex(
merkleProofsWithContext,
);
const newAddressProofs: MerkleContextWithNewAddressProof[] =
await this.getMultipleNewAddressProofs(newAddresses);
const newAddressInputs =
convertNonInclusionMerkleProofInputsToHex(newAddressProofs);
const compressedProof = await proverRequest(
this.proverEndpoint,
'combined',
[inputs, newAddressInputs],
this.log,
);
validityProof = {
compressedProof,
roots: merkleProofsWithContext
.map(proof => proof.root)
.concat(newAddressProofs.map(proof => proof.root)),
rootIndices: merkleProofsWithContext
.map(proof => proof.rootIndex)
// TODO(crank): make dynamic to enable forester support in
// test-rpc.ts. Currently this is a static root because the
// address tree doesn't advance.
.concat(newAddressProofs.map(_ => 3)),
leafIndices: merkleProofsWithContext
.map(proof => proof.leafIndex)
.concat(
newAddressProofs.map(
proof =>
proof.indexHashedIndexedElementLeaf.toNumber(), // TODO: support >32bit
),
),
leaves: merkleProofsWithContext
.map(proof => bn(proof.hash))
.concat(newAddressProofs.map(proof => bn(proof.value))),
merkleTrees: merkleProofsWithContext
.map(proof => proof.merkleTree)
.concat(newAddressProofs.map(proof => proof.merkleTree)),
nullifierQueues: merkleProofsWithContext
.map(proof => proof.nullifierQueue)
.concat(
newAddressProofs.map(proof => proof.nullifierQueue),
),
};
} else throw new Error('Invalid input');
return validityProof;
}
async getValidityProofV0(
hashes: HashWithTree[] = [],
newAddresses: AddressWithTree[] = [],
): Promise<CompressedProofWithContext> {
/// TODO(swen): add support for custom trees
return this.getValidityProof(
hashes.map(hash => hash.hash),
newAddresses.map(address => address.address),
);
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/test-helpers
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/test-helpers/test-rpc/index.ts
|
export * from './test-rpc';
export * from './get-parsed-events';
export * from './get-compressed-token-accounts';
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/test-helpers
|
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/test-helpers/test-rpc/get-parsed-events.ts
|
import {
ParsedMessageAccount,
ParsedTransactionWithMeta,
} from '@solana/web3.js';
import { bs58 } from '@coral-xyz/anchor/dist/cjs/utils/bytes';
import { defaultStaticAccountsStruct } from '../../constants';
import { LightSystemProgram } from '../../programs';
import { Rpc } from '../../rpc';
import { PublicTransactionEvent } from '../../state';
type Deserializer<T> = (data: Buffer, tx: ParsedTransactionWithMeta) => T;
/**
* @internal
* Returns newest first.
*
* */
export async function getParsedEvents(
rpc: Rpc,
): Promise<PublicTransactionEvent[]> {
const { noopProgram, accountCompressionProgram } =
defaultStaticAccountsStruct();
/// Get raw transactions
const signatures = (
await rpc.getConfirmedSignaturesForAddress2(
accountCompressionProgram,
undefined,
'confirmed',
)
).map(s => s.signature);
const txs = await rpc.getParsedTransactions(signatures, {
maxSupportedTransactionVersion: 0,
commitment: 'confirmed',
});
/// Filter by NOOP program
const transactionEvents = txs.filter(
(tx: ParsedTransactionWithMeta | null) => {
if (!tx) {
return false;
}
const accountKeys = tx.transaction.message.accountKeys;
const hasSplNoopAddress = accountKeys.some(
(item: ParsedMessageAccount) => {
const itemStr =
typeof item === 'string'
? item
: item.pubkey.toBase58();
return itemStr === noopProgram.toBase58();
},
);
return hasSplNoopAddress;
},
);
/// Parse events
const parsedEvents = parseEvents(
transactionEvents,
parsePublicTransactionEventWithIdl,
);
return parsedEvents;
}
export const parseEvents = <T>(
indexerEventsTransactions: (ParsedTransactionWithMeta | null)[],
deserializeFn: Deserializer<T>,
): NonNullable<T>[] => {
const { noopProgram } = defaultStaticAccountsStruct();
const transactions: NonNullable<T>[] = [];
indexerEventsTransactions.forEach(tx => {
if (
!tx ||
!tx.meta ||
tx.meta.err ||
!tx.meta.innerInstructions ||
tx.meta.innerInstructions.length <= 0
) {
return;
}
/// We only care about the very last inner instruction as it contains the
/// PublicTransactionEvent
tx.meta.innerInstructions.forEach(ix => {
if (ix.instructions.length > 0) {
const ixInner = ix.instructions[ix.instructions.length - 1];
// Type guard for partially parsed web3js types.
if (
'data' in ixInner &&
ixInner.data &&
ixInner.programId.toBase58() === noopProgram.toBase58()
) {
const data = bs58.decode(ixInner.data);
const decodedEvent = deserializeFn(Buffer.from(data), tx);
if (decodedEvent !== null && decodedEvent !== undefined) {
transactions.push(decodedEvent as NonNullable<T>);
}
}
}
});
});
return transactions;
};
// TODO: make it type safe. have to reimplement the types from the IDL.
export const parsePublicTransactionEventWithIdl = (
data: Buffer,
): PublicTransactionEvent | null => {
const numericData = Buffer.from(data.map(byte => byte));
try {
return LightSystemProgram.program.coder.types.decode(
'PublicTransactionEvent',
numericData,
);
} catch (error) {
console.error('Error deserializing event:', error);
return null;
}
};
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js
|
solana_public_repos/Lightprotocol/light-protocol/js/compressed-token/rollup.config.js
|
import typescript from '@rollup/plugin-typescript';
import nodePolyfills from 'rollup-plugin-polyfill-node';
import dts from 'rollup-plugin-dts';
import resolve from '@rollup/plugin-node-resolve';
import commonjs from '@rollup/plugin-commonjs';
import alias from '@rollup/plugin-alias';
import json from '@rollup/plugin-json';
const rolls = (fmt, env) => ({
input: 'src/index.ts',
output: {
dir: `dist/${fmt}/${env}`,
format: fmt,
entryFileNames: `[name].${fmt === 'cjs' ? 'cjs' : 'js'}`,
sourcemap: true,
},
external: [
'@solana/web3.js',
'@coral-xyz/anchor',
'@solana/spl-token',
'@lightprotocol/stateless.js',
'tweetnacl',
],
plugins: [
json(),
typescript({
target: fmt === 'es' ? 'ES2022' : 'ES2017',
outDir: `dist/${fmt}/${env}`,
rootDir: 'src',
}),
commonjs(),
resolve({
browser: env === 'browser',
preferBuiltins: env === 'node',
extensions: ['.mjs', '.js', '.json', '.ts'],
mainFields: ['module', 'main', 'browser'],
}),
alias({
entries: [
{
find: 'crypto',
replacement:
env === 'browser' ? 'crypto-browserify' : 'crypto',
},
],
}),
env === 'browser' ? nodePolyfills() : undefined,
].filter(Boolean),
onwarn(warning, warn) {
if (warning.code !== 'CIRCULAR_DEPENDENCY') {
warn(warning);
}
},
});
const typesConfig = {
input: 'src/index.ts',
output: [{ file: 'dist/types/index.d.ts', format: 'es' }],
plugins: [dts()],
};
export default [
rolls('cjs', 'browser'),
rolls('es', 'browser'),
rolls('cjs', 'node'),
typesConfig,
];
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js
|
solana_public_repos/Lightprotocol/light-protocol/js/compressed-token/tsconfig.test.json
|
{
"compilerOptions": {
"esModuleInterop": true,
"rootDirs": ["src", "tests"]
},
"extends": "./tsconfig.json",
"include": ["./tests/**/*.ts", "vitest.config.ts"]
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js
|
solana_public_repos/Lightprotocol/light-protocol/js/compressed-token/package.json
|
{
"name": "@lightprotocol/compressed-token",
"version": "0.16.0",
"description": "JS client to interact with the compressed-token program",
"sideEffects": false,
"main": "dist/cjs/node/index.cjs",
"type": "module",
"exports": {
".": {
"require": "./dist/cjs/node/index.cjs",
"types": "./dist/types/index.d.ts",
"default": "./dist/cjs/node/index.cjs"
},
"./browser": {
"import": "./dist/es/browser/index.js",
"require": "./dist/cjs/browser/index.cjs",
"types": "./dist/types/index.d.ts"
}
},
"types": "./dist/types/index.d.ts",
"files": [
"dist"
],
"scripts": {
"test": "pnpm test:e2e:all",
"test-all": "vitest run",
"test:unit:all": "EXCLUDE_E2E=true vitest run",
"test-all:verbose": "vitest run --reporter=verbose",
"test-validator": "./../../cli/test_bin/run test-validator --prover-run-mode rpc",
"test:e2e:create-mint": "pnpm test-validator && vitest run tests/e2e/create-mint.test.ts",
"test:e2e:create-token-pool": "pnpm test-validator && vitest run tests/e2e/create-token-pool.test.ts",
"test:e2e:mint-to": "pnpm test-validator && vitest run tests/e2e/mint-to.test.ts --reporter=verbose",
"test:e2e:approve-and-mint-to": "pnpm test-validator && vitest run tests/e2e/approve-and-mint-to.test.ts --reporter=verbose",
"test:e2e:merge-token-accounts": "pnpm test-validator && vitest run tests/e2e/merge-token-accounts.test.ts --reporter=verbose",
"test:e2e:transfer": "pnpm test-validator && vitest run tests/e2e/transfer.test.ts --reporter=verbose",
"test:e2e:compress": "pnpm test-validator && vitest run tests/e2e/compress.test.ts --reporter=verbose",
"test:e2e:compress-spl-token-account": "pnpm test-validator && vitest run tests/e2e/compress-spl-token-account.test.ts --reporter=verbose",
"test:e2e:decompress": "pnpm test-validator && vitest run tests/e2e/decompress.test.ts --reporter=verbose",
"test:e2e:rpc-token-interop": "pnpm test-validator && vitest run tests/e2e/rpc-token-interop.test.ts --reporter=verbose",
"test:e2e:custom-program-id": "vitest run tests/e2e/custom-program-id.test.ts --reporter=verbose",
"test:e2e:all": "pnpm test-validator && vitest run tests/e2e/create-mint.test.ts && vitest run tests/e2e/mint-to.test.ts && vitest run tests/e2e/transfer.test.ts && vitest run tests/e2e/compress.test.ts && vitest run tests/e2e/compress-spl-token-account.test.ts && vitest run tests/e2e/decompress.test.ts && vitest run tests/e2e/create-token-pool.test.ts && vitest run tests/e2e/approve-and-mint-to.test.ts && vitest run tests/e2e/rpc-token-interop.test.ts && vitest run tests/e2e/custom-program-id.test.ts",
"pull-idl": "../../scripts/push-compressed-token-idl.sh",
"build": "rimraf dist && pnpm pull-idl && pnpm build:bundle",
"build:bundle": "rollup -c",
"format": "prettier --write .",
"lint": "eslint ."
},
"keywords": [
"zk",
"compression",
"light",
"stateless",
"solana"
],
"maintainers": [
{
"name": "Light Protocol Maintainers",
"email": "friends@lightprotocol.com"
}
],
"license": "Apache-2.0",
"peerDependencies": {
"@lightprotocol/stateless.js": "workspace:*"
},
"dependencies": {
"@coral-xyz/anchor": "0.29.0",
"@solana/web3.js": "1.95.3",
"@solana/spl-token": "0.4.8",
"buffer": "6.0.3",
"tweetnacl": "1.0.3"
},
"devDependencies": {
"@esbuild-plugins/node-globals-polyfill": "^0.2.3",
"@lightprotocol/hasher.rs": "workspace:*",
"@lightprotocol/programs": "workspace:*",
"@rollup/plugin-alias": "^5.1.0",
"@rollup/plugin-babel": "^6.0.4",
"@rollup/plugin-commonjs": "^26.0.1",
"@rollup/plugin-json": "^6.1.0",
"@rollup/plugin-node-resolve": "^15.2.3",
"@rollup/plugin-replace": "^5.0.7",
"@rollup/plugin-terser": "^0.4.4",
"@rollup/plugin-typescript": "^11.1.6",
"@types/node": "^22.5.5",
"@typescript-eslint/eslint-plugin": "^7.13.1",
"@typescript-eslint/parser": "^7.13.1",
"add": "^2.0.6",
"crypto-browserify": "^3.12.0",
"eslint": "^8.56.0",
"eslint-plugin-import": "^2.30.0",
"eslint-plugin-n": "^17.10.2",
"eslint-plugin-promise": "^7.1.0",
"eslint-plugin-vitest": "^0.5.4",
"prettier": "^3.3.3",
"rimraf": "^6.0.1",
"rollup": "^4.21.3",
"rollup-plugin-copy": "^3.5.0",
"rollup-plugin-dts": "^6.1.1",
"rollup-plugin-polyfill-node": "^0.13.0",
"rollup-plugin-visualizer": "^5.12.0",
"ts-node": "^10.9.2",
"tslib": "^2.7.0",
"typescript": "^5.6.2",
"vitest": "^2.1.1"
},
"nx": {
"targets": {
"build": {
"inputs": [
"{workspaceRoot}/cli",
"{workspaceRoot}/target/idl",
"{workspaceRoot}/target/types"
]
}
}
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js
|
solana_public_repos/Lightprotocol/light-protocol/js/compressed-token/.prettierrc
|
{
"semi": true,
"trailingComma": "all",
"singleQuote": true,
"printWidth": 80,
"useTabs": false,
"tabWidth": 4,
"bracketSpacing": true,
"arrowParens": "avoid"
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js
|
solana_public_repos/Lightprotocol/light-protocol/js/compressed-token/tsconfig.json
|
{
"$schema": "https://json.schemastore.org/tsconfig",
"compilerOptions": {
"importHelpers": true,
"outDir": "./dist",
"esModuleInterop": true,
"allowSyntheticDefaultImports": true,
"strict": true,
"declaration": false,
"target": "ESNext",
"module": "ESNext",
"moduleResolution": "Node",
"lib": ["ESNext", "DOM"],
"types": ["node"],
"skipLibCheck": false
},
"include": ["./src/**/*.ts", "rollup.config.js"]
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js
|
solana_public_repos/Lightprotocol/light-protocol/js/compressed-token/.eslintignore
|
node_modules
lib
dist
| 0
|
solana_public_repos/Lightprotocol/light-protocol/js
|
solana_public_repos/Lightprotocol/light-protocol/js/compressed-token/vitest.config.ts
|
import { defineConfig } from 'vitest/config';
import { resolve } from 'path';
export default defineConfig({
logLevel: 'info',
test: {
include: process.env.EXCLUDE_E2E
? []
: ['src/**/__tests__/*.test.ts', 'tests/**/*.test.ts'],
includeSource: ['src/**/*.{js,ts}'],
exclude: ['src/program.ts'],
testTimeout: 350000,
hookTimeout: 30000,
},
define: {
'import.meta.vitest': false,
},
build: {
lib: {
formats: ['es', 'cjs'],
entry: resolve(__dirname, 'src/index.ts'),
fileName: 'index',
},
},
});
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.