repo_id
stringclasses
279 values
file_path
stringlengths
43
179
content
stringlengths
1
4.18M
__index_level_0__
int64
0
0
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_compressed_balance_by_owner_post_request.rs
/* * photon-indexer * * Solana indexer for general compression * * The version of the OpenAPI document: 0.45.0 * * Generated by: https://openapi-generator.tech */ use crate::models; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct GetCompressedBalanceByOwnerPostRequest { /// An ID to identify the request. #[serde(rename = "id")] pub id: Id, /// The version of the JSON-RPC protocol. #[serde(rename = "jsonrpc")] pub jsonrpc: Jsonrpc, /// The name of the method to invoke. #[serde(rename = "method")] pub method: Method, #[serde(rename = "params")] pub params: Box<models::GetCompressedBalanceByOwnerPostRequestParams>, } impl GetCompressedBalanceByOwnerPostRequest { pub fn new( id: Id, jsonrpc: Jsonrpc, method: Method, params: models::GetCompressedBalanceByOwnerPostRequestParams, ) -> GetCompressedBalanceByOwnerPostRequest { GetCompressedBalanceByOwnerPostRequest { id, jsonrpc, method, params: Box::new(params), } } } /// An ID to identify the request. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum Id { #[serde(rename = "test-account")] TestAccount, } impl Default for Id { fn default() -> Id { Self::TestAccount } } /// The version of the JSON-RPC protocol. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum Jsonrpc { #[serde(rename = "2.0")] Variant2Period0, } impl Default for Jsonrpc { fn default() -> Jsonrpc { Self::Variant2Period0 } } /// The name of the method to invoke. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum Method { #[serde(rename = "getCompressedBalanceByOwner")] GetCompressedBalanceByOwner, } impl Default for Method { fn default() -> Method { Self::GetCompressedBalanceByOwner } }
0
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_compressed_token_balances_by_owner_post_request.rs
/* * photon-indexer * * Solana indexer for general compression * * The version of the OpenAPI document: 0.45.0 * * Generated by: https://openapi-generator.tech */ use crate::models; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct GetCompressedTokenBalancesByOwnerPostRequest { /// An ID to identify the request. #[serde(rename = "id")] pub id: Id, /// The version of the JSON-RPC protocol. #[serde(rename = "jsonrpc")] pub jsonrpc: Jsonrpc, /// The name of the method to invoke. #[serde(rename = "method")] pub method: Method, #[serde(rename = "params")] pub params: Box<models::GetCompressedTokenAccountsByOwnerPostRequestParams>, } impl GetCompressedTokenBalancesByOwnerPostRequest { pub fn new( id: Id, jsonrpc: Jsonrpc, method: Method, params: models::GetCompressedTokenAccountsByOwnerPostRequestParams, ) -> GetCompressedTokenBalancesByOwnerPostRequest { GetCompressedTokenBalancesByOwnerPostRequest { id, jsonrpc, method, params: Box::new(params), } } } /// An ID to identify the request. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum Id { #[serde(rename = "test-account")] TestAccount, } impl Default for Id { fn default() -> Id { Self::TestAccount } } /// The version of the JSON-RPC protocol. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum Jsonrpc { #[serde(rename = "2.0")] Variant2Period0, } impl Default for Jsonrpc { fn default() -> Jsonrpc { Self::Variant2Period0 } } /// The name of the method to invoke. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum Method { #[serde(rename = "getCompressedTokenBalancesByOwner")] GetCompressedTokenBalancesByOwner, } impl Default for Method { fn default() -> Method { Self::GetCompressedTokenBalancesByOwner } }
0
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_multiple_new_address_proofs_post_200_response_result.rs
/* * photon-indexer * * Solana indexer for general compression * * The version of the OpenAPI document: 0.45.0 * * Generated by: https://openapi-generator.tech */ use crate::models; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct GetMultipleNewAddressProofsPost200ResponseResult { #[serde(rename = "context")] pub context: Box<models::Context>, #[serde(rename = "value")] pub value: Vec<models::MerkleContextWithNewAddressProof>, } impl GetMultipleNewAddressProofsPost200ResponseResult { pub fn new( context: models::Context, value: Vec<models::MerkleContextWithNewAddressProof>, ) -> GetMultipleNewAddressProofsPost200ResponseResult { GetMultipleNewAddressProofsPost200ResponseResult { context: Box::new(context), value, } } }
0
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/account_with_optional_token_data.rs
/* * photon-indexer * * Solana indexer for general compression * * The version of the OpenAPI document: 0.45.0 * * Generated by: https://openapi-generator.tech */ use crate::models; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct AccountWithOptionalTokenData { #[serde(rename = "account")] pub account: Box<models::Account>, #[serde(rename = "optionalTokenData", skip_serializing_if = "Option::is_none")] pub optional_token_data: Option<Box<models::TokenData>>, } impl AccountWithOptionalTokenData { pub fn new(account: models::Account) -> AccountWithOptionalTokenData { AccountWithOptionalTokenData { account: Box::new(account), optional_token_data: None, } } }
0
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_compressed_account_proof_post_request.rs
/* * photon-indexer * * Solana indexer for general compression * * The version of the OpenAPI document: 0.45.0 * * Generated by: https://openapi-generator.tech */ use crate::models; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct GetCompressedAccountProofPostRequest { /// An ID to identify the request. #[serde(rename = "id")] pub id: Id, /// The version of the JSON-RPC protocol. #[serde(rename = "jsonrpc")] pub jsonrpc: Jsonrpc, /// The name of the method to invoke. #[serde(rename = "method")] pub method: Method, #[serde(rename = "params")] pub params: Box<models::GetCompressedAccountProofPostRequestParams>, } impl GetCompressedAccountProofPostRequest { pub fn new( id: Id, jsonrpc: Jsonrpc, method: Method, params: models::GetCompressedAccountProofPostRequestParams, ) -> GetCompressedAccountProofPostRequest { GetCompressedAccountProofPostRequest { id, jsonrpc, method, params: Box::new(params), } } } /// An ID to identify the request. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum Id { #[serde(rename = "test-account")] TestAccount, } impl Default for Id { fn default() -> Id { Self::TestAccount } } /// The version of the JSON-RPC protocol. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum Jsonrpc { #[serde(rename = "2.0")] Variant2Period0, } impl Default for Jsonrpc { fn default() -> Jsonrpc { Self::Variant2Period0 } } /// The name of the method to invoke. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum Method { #[serde(rename = "getCompressedAccountProof")] GetCompressedAccountProof, } impl Default for Method { fn default() -> Method { Self::GetCompressedAccountProof } }
0
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_multiple_new_address_proofs_post_request.rs
/* * photon-indexer * * Solana indexer for general compression * * The version of the OpenAPI document: 0.45.0 * * Generated by: https://openapi-generator.tech */ use crate::models; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct GetMultipleNewAddressProofsPostRequest { /// An ID to identify the request. #[serde(rename = "id")] pub id: Id, /// The version of the JSON-RPC protocol. #[serde(rename = "jsonrpc")] pub jsonrpc: Jsonrpc, /// The name of the method to invoke. #[serde(rename = "method")] pub method: Method, #[serde(rename = "params")] pub params: Vec<String>, } impl GetMultipleNewAddressProofsPostRequest { pub fn new( id: Id, jsonrpc: Jsonrpc, method: Method, params: Vec<String>, ) -> GetMultipleNewAddressProofsPostRequest { GetMultipleNewAddressProofsPostRequest { id, jsonrpc, method, params, } } } /// An ID to identify the request. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum Id { #[serde(rename = "test-account")] TestAccount, } impl Default for Id { fn default() -> Id { Self::TestAccount } } /// The version of the JSON-RPC protocol. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum Jsonrpc { #[serde(rename = "2.0")] Variant2Period0, } impl Default for Jsonrpc { fn default() -> Jsonrpc { Self::Variant2Period0 } } /// The name of the method to invoke. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum Method { #[serde(rename = "getMultipleNewAddressProofs")] GetMultipleNewAddressProofs, } impl Default for Method { fn default() -> Method { Self::GetMultipleNewAddressProofs } }
0
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_multiple_compressed_accounts_post_request_params.rs
/* * photon-indexer * * Solana indexer for general compression * * The version of the OpenAPI document: 0.45.0 * * Generated by: https://openapi-generator.tech */ use crate::models; /// GetMultipleCompressedAccountsPostRequestParams : Request for compressed account data #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct GetMultipleCompressedAccountsPostRequestParams { #[serde( rename = "addresses", default, with = "::serde_with::rust::double_option", skip_serializing_if = "Option::is_none" )] pub addresses: Option<Option<Vec<String>>>, #[serde( rename = "hashes", default, with = "::serde_with::rust::double_option", skip_serializing_if = "Option::is_none" )] pub hashes: Option<Option<Vec<String>>>, } impl GetMultipleCompressedAccountsPostRequestParams { /// Request for compressed account data pub fn new() -> GetMultipleCompressedAccountsPostRequestParams { GetMultipleCompressedAccountsPostRequestParams { addresses: None, hashes: None, } } }
0
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/compressed_proof_with_context.rs
/* * photon-indexer * * Solana indexer for general compression * * The version of the OpenAPI document: 0.45.0 * * Generated by: https://openapi-generator.tech */ use crate::models; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct CompressedProofWithContext { #[serde(rename = "compressedProof")] pub compressed_proof: Box<models::CompressedProof>, #[serde(rename = "leafIndices")] pub leaf_indices: Vec<i32>, #[serde(rename = "leaves")] pub leaves: Vec<String>, #[serde(rename = "merkleTrees")] pub merkle_trees: Vec<String>, #[serde(rename = "rootIndices")] pub root_indices: Vec<i32>, #[serde(rename = "roots")] pub roots: Vec<String>, } impl CompressedProofWithContext { pub fn new( compressed_proof: models::CompressedProof, leaf_indices: Vec<i32>, leaves: Vec<String>, merkle_trees: Vec<String>, root_indices: Vec<i32>, roots: Vec<String>, ) -> CompressedProofWithContext { CompressedProofWithContext { compressed_proof: Box::new(compressed_proof), leaf_indices, leaves, merkle_trees, root_indices, roots, } } }
0
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/context.rs
/* * photon-indexer * * Solana indexer for general compression * * The version of the OpenAPI document: 0.45.0 * * Generated by: https://openapi-generator.tech */ use crate::models; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct Context { #[serde(rename = "slot")] pub slot: i32, } impl Context { pub fn new(slot: i32) -> Context { Context { slot } } }
0
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_multiple_new_address_proofs_post_200_response.rs
/* * photon-indexer * * Solana indexer for general compression * * The version of the OpenAPI document: 0.45.0 * * Generated by: https://openapi-generator.tech */ use crate::models; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct GetMultipleNewAddressProofsPost200Response { #[serde(rename = "error", skip_serializing_if = "Option::is_none")] pub error: Option<Box<models::GetCompressedAccountPost200ResponseError>>, /// An ID to identify the response. #[serde(rename = "id")] pub id: Id, /// The version of the JSON-RPC protocol. #[serde(rename = "jsonrpc")] pub jsonrpc: Jsonrpc, #[serde(rename = "result", skip_serializing_if = "Option::is_none")] pub result: Option<Box<models::GetMultipleNewAddressProofsPost200ResponseResult>>, } impl GetMultipleNewAddressProofsPost200Response { pub fn new(id: Id, jsonrpc: Jsonrpc) -> GetMultipleNewAddressProofsPost200Response { GetMultipleNewAddressProofsPost200Response { error: None, id, jsonrpc, result: None, } } } /// An ID to identify the response. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum Id { #[serde(rename = "test-account")] TestAccount, } impl Default for Id { fn default() -> Id { Self::TestAccount } } /// The version of the JSON-RPC protocol. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum Jsonrpc { #[serde(rename = "2.0")] Variant2Period0, } impl Default for Jsonrpc { fn default() -> Jsonrpc { Self::Variant2Period0 } }
0
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_latest_non_voting_signatures_post_request.rs
/* * photon-indexer * * Solana indexer for general compression * * The version of the OpenAPI document: 0.45.0 * * Generated by: https://openapi-generator.tech */ use crate::models; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct GetLatestNonVotingSignaturesPostRequest { /// An ID to identify the request. #[serde(rename = "id")] pub id: Id, /// The version of the JSON-RPC protocol. #[serde(rename = "jsonrpc")] pub jsonrpc: Jsonrpc, /// The name of the method to invoke. #[serde(rename = "method")] pub method: Method, #[serde(rename = "params")] pub params: Box<models::GetLatestCompressionSignaturesPostRequestParams>, } impl GetLatestNonVotingSignaturesPostRequest { pub fn new( id: Id, jsonrpc: Jsonrpc, method: Method, params: models::GetLatestCompressionSignaturesPostRequestParams, ) -> GetLatestNonVotingSignaturesPostRequest { GetLatestNonVotingSignaturesPostRequest { id, jsonrpc, method, params: Box::new(params), } } } /// An ID to identify the request. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum Id { #[serde(rename = "test-account")] TestAccount, } impl Default for Id { fn default() -> Id { Self::TestAccount } } /// The version of the JSON-RPC protocol. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum Jsonrpc { #[serde(rename = "2.0")] Variant2Period0, } impl Default for Jsonrpc { fn default() -> Jsonrpc { Self::Variant2Period0 } } /// The name of the method to invoke. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum Method { #[serde(rename = "getLatestNonVotingSignatures")] GetLatestNonVotingSignatures, } impl Default for Method { fn default() -> Method { Self::GetLatestNonVotingSignatures } }
0
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_compressed_account_post_request.rs
/* * photon-indexer * * Solana indexer for general compression * * The version of the OpenAPI document: 0.45.0 * * Generated by: https://openapi-generator.tech */ use crate::models; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct GetCompressedAccountPostRequest { /// An ID to identify the request. #[serde(rename = "id")] pub id: Id, /// The version of the JSON-RPC protocol. #[serde(rename = "jsonrpc")] pub jsonrpc: Jsonrpc, /// The name of the method to invoke. #[serde(rename = "method")] pub method: Method, #[serde(rename = "params")] pub params: Box<models::GetCompressedAccountPostRequestParams>, } impl GetCompressedAccountPostRequest { pub fn new( id: Id, jsonrpc: Jsonrpc, method: Method, params: models::GetCompressedAccountPostRequestParams, ) -> GetCompressedAccountPostRequest { GetCompressedAccountPostRequest { id, jsonrpc, method, params: Box::new(params), } } } /// An ID to identify the request. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum Id { #[serde(rename = "test-account")] TestAccount, } impl Default for Id { fn default() -> Id { Self::TestAccount } } /// The version of the JSON-RPC protocol. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum Jsonrpc { #[serde(rename = "2.0")] Variant2Period0, } impl Default for Jsonrpc { fn default() -> Jsonrpc { Self::Variant2Period0 } } /// The name of the method to invoke. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum Method { #[serde(rename = "getCompressedAccount")] GetCompressedAccount, } impl Default for Method { fn default() -> Method { Self::GetCompressedAccount } }
0
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_transaction_with_compression_info_post_request.rs
/* * photon-indexer * * Solana indexer for general compression * * The version of the OpenAPI document: 0.45.0 * * Generated by: https://openapi-generator.tech */ use crate::models; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub struct GetTransactionWithCompressionInfoPostRequest { /// An ID to identify the request. #[serde(rename = "id")] pub id: Id, /// The version of the JSON-RPC protocol. #[serde(rename = "jsonrpc")] pub jsonrpc: Jsonrpc, /// The name of the method to invoke. #[serde(rename = "method")] pub method: Method, #[serde(rename = "params")] pub params: Box<models::GetTransactionWithCompressionInfoPostRequestParams>, } impl GetTransactionWithCompressionInfoPostRequest { pub fn new( id: Id, jsonrpc: Jsonrpc, method: Method, params: models::GetTransactionWithCompressionInfoPostRequestParams, ) -> GetTransactionWithCompressionInfoPostRequest { GetTransactionWithCompressionInfoPostRequest { id, jsonrpc, method, params: Box::new(params), } } } /// An ID to identify the request. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum Id { #[serde(rename = "test-account")] TestAccount, } impl Default for Id { fn default() -> Id { Self::TestAccount } } /// The version of the JSON-RPC protocol. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum Jsonrpc { #[serde(rename = "2.0")] Variant2Period0, } impl Default for Jsonrpc { fn default() -> Jsonrpc { Self::Variant2Period0 } } /// The name of the method to invoke. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum Method { #[serde(rename = "getTransactionWithCompressionInfo")] GetTransactionWithCompressionInfo, } impl Default for Method { fn default() -> Method { Self::GetTransactionWithCompressionInfo } }
0
solana_public_repos/Lightprotocol/light-protocol
solana_public_repos/Lightprotocol/light-protocol/test-utils/Cargo.toml
[package] name = "light-test-utils" version = "1.2.1" description = "Utilities used in Light Protocol program tests" repository = "https://github.com/Lightprotocol/light-protocol" license = "Apache-2.0" edition = "2021" [features] default = [] devenv = [] [dependencies] anchor-lang = { workspace = true } anchor-spl = { workspace = true } anyhow = "1.0" ark-ff = "0.4" light-hash-set = { workspace=true } num-bigint = "0.4" num-traits = "0.2" solana-program-test = { workspace = true } solana-sdk = { workspace = true } solana-client = { workspace = true } thiserror = "1.0" light-macros = { path = "../macros/light", version = "1.1.0" } account-compression = { workspace = true } light-compressed-token = { workspace = true } light-system-program = { workspace = true } light-registry = { workspace = true } spl-token = { workspace = true, features = ["no-entrypoint"] } solana-transaction-status = { workspace = true } tokio = { workspace = true } light-prover-client = { path = "../circuit-lib/light-prover-client", version = "1.2.0" } reqwest = "0.11.26" light-hasher = { version = "1.1.0", path = "../merkle-tree/hasher" } light-merkle-tree-reference = { version = "1.1.0", path = "../merkle-tree/reference" } light-concurrent-merkle-tree = { version = "1.1.0", path = "../merkle-tree/concurrent" } light-indexed-merkle-tree = { path = "../merkle-tree/indexed/", version = "1.1.0" } light-verifier = { path = "../circuit-lib/verifier", version = "1.1.0" } light-utils = { path = "../utils", version = "1.1.0" } light-program-test = { workspace = true } forester-utils = { workspace = true } memoffset = "0.9.1" rand = "0.8" photon-api = { workspace = true } log = "0.4" serde = { version = "1.0.197", features = ["derive"] } async-trait = "0.1.82" light-client = { workspace = true } spl-token-2022 = { workspace = true } [dev-dependencies] rand = "0.8"
0
solana_public_repos/Lightprotocol/light-protocol/test-utils
solana_public_repos/Lightprotocol/light-protocol/test-utils/src/assert_compressed_tx.rs
use account_compression::{state::QueueAccount, StateMerkleTreeAccount}; use forester_utils::indexer::{Indexer, StateMerkleTreeAccounts}; use forester_utils::{get_concurrent_merkle_tree, get_hash_set, AccountZeroCopy}; use light_client::rpc::RpcConnection; use light_hasher::Poseidon; use light_system_program::sdk::event::MerkleTreeSequenceNumber; use light_system_program::sdk::{ compressed_account::{CompressedAccount, CompressedAccountWithMerkleContext}, event::PublicTransactionEvent, invoke::get_sol_pool_pda, }; use log::debug; use num_bigint::BigUint; use num_traits::FromBytes; use solana_sdk::account::ReadableAccount; use solana_sdk::pubkey::Pubkey; pub struct AssertCompressedTransactionInputs<'a, R: RpcConnection, I: Indexer<R>> { pub rpc: &'a mut R, pub test_indexer: &'a mut I, pub output_compressed_accounts: &'a [CompressedAccount], pub created_output_compressed_accounts: &'a [CompressedAccountWithMerkleContext], pub input_compressed_account_hashes: &'a [[u8; 32]], pub output_merkle_tree_snapshots: &'a [MerkleTreeTestSnapShot], pub input_merkle_tree_snapshots: &'a [MerkleTreeTestSnapShot], pub created_addresses: &'a [[u8; 32]], pub address_queue_pubkeys: &'a [Pubkey], pub event: &'a PublicTransactionEvent, pub sorted_output_accounts: bool, pub compress_or_decompress_lamports: Option<u64>, pub is_compress: bool, pub relay_fee: Option<u64>, pub compression_recipient: Option<Pubkey>, pub recipient_balance_pre: u64, pub compressed_sol_pda_balance_pre: u64, } /// General tx assert: /// 1. outputs created /// 2. inputs nullified /// 3. addressed inserted into address queue /// 4. Public Transaction event emitted correctly /// 5. Merkle tree was updated correctly /// 6. TODO: Fees have been paid (after fee refactor) /// 7. Check compression amount was transferred pub async fn assert_compressed_transaction<R: RpcConnection, I: Indexer<R>>( input: AssertCompressedTransactionInputs<'_, R, I>, ) { // CHECK 1 assert_created_compressed_accounts( input.output_compressed_accounts, input .output_merkle_tree_snapshots .iter() .map(|x| x.accounts.merkle_tree) .collect::<Vec<_>>() .as_slice(), input.created_output_compressed_accounts, input.sorted_output_accounts, ); // CHECK 2 assert_nullifiers_exist_in_hash_sets( input.rpc, input.input_merkle_tree_snapshots, input.input_compressed_account_hashes, ) .await; // CHECK 3 assert_addresses_exist_in_hash_sets( input.rpc, input.address_queue_pubkeys, input.created_addresses, ) .await; // CHECK 5 let sequence_numbers = assert_merkle_tree_after_tx( input.rpc, input.output_merkle_tree_snapshots, input.test_indexer, ) .await; // CHECK 4 assert_public_transaction_event( input.event, Some(&input.input_compressed_account_hashes.to_vec()), input .output_merkle_tree_snapshots .iter() .map(|x| x.accounts) .collect::<Vec<_>>() .as_slice(), &input .created_output_compressed_accounts .iter() .map(|x| x.merkle_context.leaf_index) .collect::<Vec<_>>(), input.compress_or_decompress_lamports, input.is_compress, input.relay_fee, sequence_numbers, ); // CHECK 7 if let Some(compress_or_decompress_lamports) = input.compress_or_decompress_lamports { assert_compression( input.rpc, compress_or_decompress_lamports, input.compressed_sol_pda_balance_pre, input.recipient_balance_pre, &input.compression_recipient.unwrap_or_default(), input.is_compress, ) .await; } } pub async fn assert_nullifiers_exist_in_hash_sets<R: RpcConnection>( rpc: &mut R, snapshots: &[MerkleTreeTestSnapShot], input_compressed_account_hashes: &[[u8; 32]], ) { for (i, hash) in input_compressed_account_hashes.iter().enumerate() { let nullifier_queue = unsafe { get_hash_set::<QueueAccount, R>(rpc, snapshots[i].accounts.nullifier_queue).await }; assert!(nullifier_queue .contains(&BigUint::from_be_bytes(hash.as_slice()), None) .unwrap()); } } pub async fn assert_addresses_exist_in_hash_sets<R: RpcConnection>( rpc: &mut R, address_queue_pubkeys: &[Pubkey], created_addresses: &[[u8; 32]], ) { for (address, pubkey) in created_addresses.iter().zip(address_queue_pubkeys) { let address_queue = unsafe { get_hash_set::<QueueAccount, R>(rpc, *pubkey).await }; assert!(address_queue .contains(&BigUint::from_be_bytes(address), None) .unwrap()); } } pub fn assert_created_compressed_accounts( output_compressed_accounts: &[CompressedAccount], output_merkle_tree_pubkeys: &[Pubkey], created_out_compressed_accounts: &[CompressedAccountWithMerkleContext], _sorted: bool, ) { for output_account in created_out_compressed_accounts.iter() { assert!(output_compressed_accounts.iter().any(|x| x.lamports == output_account.compressed_account.lamports && x.owner == output_account.compressed_account.owner && x.data == output_account.compressed_account.data && x.address == output_account.compressed_account.address),); assert!(output_merkle_tree_pubkeys .iter() .any(|x| *x == output_account.merkle_context.merkle_tree_pubkey),); } } #[allow(clippy::too_many_arguments)] pub fn assert_public_transaction_event( event: &PublicTransactionEvent, input_compressed_account_hashes: Option<&Vec<[u8; 32]>>, output_merkle_tree_accounts: &[StateMerkleTreeAccounts], output_leaf_indices: &Vec<u32>, compress_or_decompress_lamports: Option<u64>, is_compress: bool, relay_fee: Option<u64>, sequence_numbers: Vec<MerkleTreeSequenceNumber>, ) { assert_eq!( event.input_compressed_account_hashes, *input_compressed_account_hashes.unwrap_or(&Vec::<[u8; 32]>::new()), "assert_public_transaction_event: input compressed account hashes mismatch" ); for account in event.output_compressed_accounts.iter() { assert!( output_merkle_tree_accounts .iter() .any(|x| x.merkle_tree == event.pubkey_array[account.merkle_tree_index as usize]), // output_merkle_tree_accounts[account.merkle_tree_index as usize].merkle_tree, "assert_public_transaction_event: output state merkle tree account index mismatch" ); } assert_eq!( event.output_leaf_indices, *output_leaf_indices, "assert_public_transaction_event: output leaf indices mismatch" ); assert_eq!( event.compress_or_decompress_lamports, compress_or_decompress_lamports, "assert_public_transaction_event: compression lamports mismatch" ); assert_eq!( event.is_compress, is_compress, "assert_public_transaction_event: is_compress mismatch" ); assert_eq!( event.relay_fee, relay_fee, "assert_public_transaction_event: relay fee mismatch" ); let mut updated_sequence_numbers = event.sequence_numbers.clone(); for account in event.output_compressed_accounts.iter() { let merkle_tree_pubkey = event.pubkey_array[account.merkle_tree_index as usize]; let index = &mut updated_sequence_numbers .iter_mut() .find(|x| x.pubkey == merkle_tree_pubkey); if index.is_none() { debug!("reference sequence numbers: {:?}", sequence_numbers); debug!("event: {:?}", event); panic!( "merkle tree pubkey not found in sequence numbers : {:?}", merkle_tree_pubkey ); } else { index.as_mut().unwrap().seq += 1; } } for sequence_number in updated_sequence_numbers.iter() { sequence_numbers.iter().any(|x| x == sequence_number); } } #[derive(Debug, Clone, Copy, Ord, PartialOrd, Eq, PartialEq)] pub struct MerkleTreeTestSnapShot { pub accounts: StateMerkleTreeAccounts, pub root: [u8; 32], pub next_index: usize, pub num_added_accounts: usize, pub merkle_tree_account_lamports: u64, pub queue_account_lamports: u64, pub cpi_context_account_lamports: u64, } // TODO: add assert that changelog, seq number is updated correctly /// Asserts that the merkle tree account has been updated correctly, /// by comparing the merkle tree account with the test indexer merkle tree. /// Asserts: /// 1. The root has been updated /// 2. The next index has been updated pub async fn assert_merkle_tree_after_tx<R: RpcConnection, I: Indexer<R>>( rpc: &mut R, snapshots: &[MerkleTreeTestSnapShot], test_indexer: &mut I, ) -> Vec<MerkleTreeSequenceNumber> { let mut deduped_snapshots = snapshots.to_vec(); deduped_snapshots.sort(); deduped_snapshots.dedup(); let mut sequence_numbers = Vec::new(); for (i, snapshot) in deduped_snapshots.iter().enumerate() { let merkle_tree = get_concurrent_merkle_tree::<StateMerkleTreeAccount, R, Poseidon, 26>( rpc, snapshot.accounts.merkle_tree, ) .await; debug!("sequence number: {:?}", merkle_tree.next_index() as u64); debug!("next index: {:?}", snapshot.next_index); debug!("prev sequence number: {:?}", snapshot.num_added_accounts); sequence_numbers.push(MerkleTreeSequenceNumber { pubkey: snapshot.accounts.merkle_tree, seq: merkle_tree.sequence_number() as u64, }); if merkle_tree.root() == snapshot.root { debug!("deduped_snapshots: {:?}", deduped_snapshots); debug!("i: {:?}", i); panic!("merkle tree root update failed, it should have updated but didn't"); } assert_eq!( merkle_tree.next_index(), snapshot.next_index + snapshot.num_added_accounts ); let test_indexer_merkle_tree = test_indexer .get_state_merkle_trees_mut() .iter_mut() .find(|x| x.accounts.merkle_tree == snapshot.accounts.merkle_tree) .expect("merkle tree not found in test indexer"); if merkle_tree.root() != test_indexer_merkle_tree.merkle_tree.root() { // The following lines are just debug prints debug!("Merkle tree pubkey {:?}", snapshot.accounts.merkle_tree); for (i, leaf) in test_indexer_merkle_tree.merkle_tree.layers[0] .iter() .enumerate() { debug!("test_indexer_merkle_tree index {} leaf: {:?}", i, leaf); } for i in 0..16 { debug!("root {} {:?}", i, merkle_tree.roots.get(i)); } panic!("merkle tree root update failed"); } } sequence_numbers } /// Takes a snapshot of the provided the onchain Merkle trees. /// Snapshot data: /// 1. root /// 2. next_index /// 3. num_added_accounts // so that we can assert the expected next index after tx /// 4. lamports of all bundle accounts pub async fn get_merkle_tree_snapshots<R: RpcConnection>( rpc: &mut R, accounts: &[StateMerkleTreeAccounts], ) -> Vec<MerkleTreeTestSnapShot> { let mut snapshots = Vec::new(); for account_bundle in accounts.iter() { let merkle_tree = get_concurrent_merkle_tree::<StateMerkleTreeAccount, R, Poseidon, 26>( rpc, account_bundle.merkle_tree, ) .await; let merkle_tree_account = AccountZeroCopy::<StateMerkleTreeAccount>::new(rpc, account_bundle.merkle_tree).await; let queue_account_lamports = match rpc .get_account(account_bundle.nullifier_queue) .await .unwrap() { Some(x) => x.lamports, None => 0, }; let cpi_context_account_lamports = match rpc.get_account(account_bundle.cpi_context).await.unwrap() { Some(x) => x.lamports, None => 0, }; snapshots.push(MerkleTreeTestSnapShot { accounts: *account_bundle, root: merkle_tree.root(), next_index: merkle_tree.next_index(), num_added_accounts: accounts .iter() .filter(|x| x.merkle_tree == account_bundle.merkle_tree) .count(), merkle_tree_account_lamports: merkle_tree_account.account.lamports(), queue_account_lamports, cpi_context_account_lamports, }); } snapshots } pub async fn assert_compression<R: RpcConnection>( context: &mut R, compress_amount: u64, compressed_sol_pda_balance_pre: u64, recipient_balance_pre: u64, recipient: &Pubkey, is_compress: bool, ) { if is_compress { let compressed_sol_pda_balance = match context.get_account(get_sol_pool_pda()).await { Ok(Some(account)) => account.lamports, _ => 0, }; assert_eq!( compressed_sol_pda_balance, compressed_sol_pda_balance_pre + compress_amount, "assert_compression: balance of compressed sol pda insufficient, compress sol failed" ); } else { let compressed_sol_pda_balance = match context.get_account(get_sol_pool_pda()).await.unwrap() { Some(account) => account.lamports, None => 0, }; assert_eq!( compressed_sol_pda_balance, compressed_sol_pda_balance_pre - compress_amount, "assert_compression: balance of compressed sol pda incorrect, decompress sol failed" ); let recipient_balance = context .get_account(*recipient) .await .unwrap() .unwrap() .lamports; assert_eq!( recipient_balance, recipient_balance_pre + compress_amount, "assert_compression: balance of recipient insufficient, decompress sol failed" ); } }
0
solana_public_repos/Lightprotocol/light-protocol/test-utils
solana_public_repos/Lightprotocol/light-protocol/test-utils/src/assert_epoch.rs
use light_client::rpc::RpcConnection; use light_registry::{ protocol_config::state::ProtocolConfigPda, utils::{get_epoch_pda_address, get_forester_pda, get_protocol_config_pda_address}, EpochPda, ForesterEpochPda, ForesterPda, }; use solana_sdk::pubkey::Pubkey; pub async fn assert_finalized_epoch_registration<R: RpcConnection>( rpc: &mut R, forester_epoch_pda_pubkey: &Pubkey, epoch_pda_pubkey: &Pubkey, ) { let epoch_pda = rpc .get_anchor_account::<EpochPda>(epoch_pda_pubkey) .await .unwrap() .unwrap(); let expected_total_epoch_weight = epoch_pda.registered_weight; let forester_epoch_pda = rpc .get_anchor_account::<ForesterEpochPda>(forester_epoch_pda_pubkey) .await .unwrap() .unwrap(); assert!(forester_epoch_pda.total_epoch_weight.is_some()); assert_eq!( forester_epoch_pda.total_epoch_weight.unwrap(), expected_total_epoch_weight ); } pub async fn assert_epoch_pda<R: RpcConnection>( rpc: &mut R, epoch: u64, expected_registered_weight: u64, ) { let epoch_pda_pubkey = get_epoch_pda_address(epoch); let epoch_pda = rpc .get_anchor_account::<EpochPda>(&epoch_pda_pubkey) .await .unwrap() .unwrap(); let protocol_config_pda_pubkey = get_protocol_config_pda_address().0; let protocol_config_pda = rpc .get_anchor_account::<ProtocolConfigPda>(&protocol_config_pda_pubkey) .await .unwrap() .unwrap(); assert_eq!(epoch_pda.registered_weight, expected_registered_weight); assert_eq!(epoch_pda.total_work, 0); assert_eq!(epoch_pda.protocol_config, protocol_config_pda.config); assert_eq!(epoch_pda.epoch, epoch); } /// Helper function to fetch the forester epoch and epoch account to assert diff /// after transaction. pub async fn fetch_epoch_and_forester_pdas<R: RpcConnection>( rpc: &mut R, forester_epoch_pda: &Pubkey, epoch_pda: &Pubkey, ) -> (ForesterEpochPda, EpochPda) { let forester_epoch_pda = rpc .get_anchor_account::<ForesterEpochPda>(forester_epoch_pda) .await .unwrap() .unwrap(); println!("forester_epoch_pda: {:?}", forester_epoch_pda); let epoch_pda = rpc .get_anchor_account::<EpochPda>(epoch_pda) .await .unwrap() .unwrap(); println!("epoch_pda: {:?}", epoch_pda); (forester_epoch_pda, epoch_pda) } /// Asserts: /// 1. ForesterEpochPda has reported work /// 2. EpochPda has updated total work by forester work counter pub async fn assert_report_work<R: RpcConnection>( rpc: &mut R, forester_epoch_pda_pubkey: &Pubkey, epoch_pda_pubkey: &Pubkey, mut pre_forester_epoch_pda: ForesterEpochPda, mut pre_epoch_pda: EpochPda, ) { let forester_epoch_pda = rpc .get_anchor_account::<ForesterEpochPda>(forester_epoch_pda_pubkey) .await .unwrap() .unwrap(); pre_forester_epoch_pda.has_reported_work = true; assert_eq!(forester_epoch_pda, pre_forester_epoch_pda); let epoch_pda = rpc .get_anchor_account::<EpochPda>(epoch_pda_pubkey) .await .unwrap() .unwrap(); pre_epoch_pda.total_work += forester_epoch_pda.work_counter; assert_eq!(epoch_pda, pre_epoch_pda); } /// Asserts the correct creation of a ForesterEpochPda. pub async fn assert_registered_forester_pda<R: RpcConnection>( rpc: &mut R, forester_epoch_pda_pubkey: &Pubkey, forester_derivation_pubkey: &Pubkey, epoch: u64, ) { let (forester_pda_pubkey, _) = get_forester_pda(forester_derivation_pubkey); let epoch_pda_pubkey = get_epoch_pda_address(epoch); let epoch_pda = rpc .get_anchor_account::<EpochPda>(&epoch_pda_pubkey) .await .unwrap() .unwrap(); let forester_pda = rpc .get_anchor_account::<ForesterPda>(&forester_pda_pubkey) .await .unwrap() .unwrap(); let epoch_active_phase_start_slot = epoch_pda.protocol_config.genesis_slot + epoch_pda.protocol_config.registration_phase_length + epoch_pda.epoch * epoch_pda.protocol_config.active_phase_length; let expected_forester_epoch_pda = ForesterEpochPda { authority: forester_pda.authority, config: forester_pda.config, epoch: epoch_pda.epoch, weight: forester_pda.active_weight, work_counter: 0, has_reported_work: false, forester_index: epoch_pda.registered_weight - forester_pda.active_weight, total_epoch_weight: None, epoch_active_phase_start_slot, protocol_config: epoch_pda.protocol_config, finalize_counter: 0, }; let forester_epoch_pda = rpc .get_anchor_account::<ForesterEpochPda>(forester_epoch_pda_pubkey) .await .unwrap() .unwrap(); assert_eq!(forester_epoch_pda, expected_forester_epoch_pda); }
0
solana_public_repos/Lightprotocol/light-protocol/test-utils
solana_public_repos/Lightprotocol/light-protocol/test-utils/src/test_forester.rs
use account_compression::instruction::UpdateAddressMerkleTree; use account_compression::state::QueueAccount; use account_compression::utils::constants::{ ADDRESS_MERKLE_TREE_HEIGHT, ADDRESS_MERKLE_TREE_ROOTS, }; use account_compression::{instruction::InsertAddresses, StateMerkleTreeAccount, ID}; use account_compression::{AddressMerkleTreeAccount, SAFETY_MARGIN}; use anchor_lang::system_program; use anchor_lang::{InstructionData, ToAccountMetas}; use light_client::rpc::errors::RpcError; use light_client::rpc::RpcConnection; use light_concurrent_merkle_tree::event::MerkleTreeEvent; use light_hasher::Poseidon; use light_indexed_merkle_tree::copy::IndexedMerkleTreeCopy; use forester_utils::indexer::{AddressMerkleTreeBundle, StateMerkleTreeBundle}; use forester_utils::{get_concurrent_merkle_tree, get_hash_set, get_indexed_merkle_tree}; use light_program_test::test_env::NOOP_PROGRAM_ID; use light_registry::account_compression_cpi::sdk::{ create_nullify_instruction, create_update_address_merkle_tree_instruction, CreateNullifyInstructionInputs, UpdateAddressMerkleTreeInstructionInputs, }; use light_registry::utils::get_forester_epoch_pda_from_authority; use light_registry::{ForesterEpochPda, RegisterForester}; use light_utils::bigint::bigint_to_be_bytes_array; use log::debug; use solana_sdk::signature::Signature; use solana_sdk::{ instruction::{AccountMeta, Instruction}, pubkey::Pubkey, signature::{Keypair, Signer}, transaction::Transaction, }; use thiserror::Error; // doesn't keep its own Merkle tree but gets it from the indexer // can also get all the state and Address Merkle trees from the indexer // the lightweight version is just a function // we should have a random option that shuffles the order in which to nullify transactions // we should have a parameters how many to nullify // in the test we should nullify everything once the queue is 60% full /// Check compressed_accounts in the queue array which are not nullified yet /// Iterate over these compressed_accounts and nullify them /// /// Checks: /// 1. Value in hashset is marked /// 2. State tree root is updated /// 3. TODO: add event is emitted (after rebase) /// optional: assert that the Merkle tree doesn't change except the updated leaf pub async fn nullify_compressed_accounts<R: RpcConnection>( rpc: &mut R, forester: &Keypair, state_tree_bundle: &mut StateMerkleTreeBundle, epoch: u64, is_metadata_forester: bool, ) -> Result<(), RpcError> { let nullifier_queue = unsafe { get_hash_set::<QueueAccount, R>(rpc, state_tree_bundle.accounts.nullifier_queue).await }; let pre_forester_counter = if is_metadata_forester { 0 } else { rpc.get_anchor_account::<ForesterEpochPda>( &get_forester_epoch_pda_from_authority(&forester.pubkey(), epoch).0, ) .await .unwrap() .unwrap() .work_counter }; let onchain_merkle_tree = get_concurrent_merkle_tree::<StateMerkleTreeAccount, R, Poseidon, 26>( rpc, state_tree_bundle.accounts.merkle_tree, ) .await; assert_eq!( onchain_merkle_tree.root(), state_tree_bundle.merkle_tree.root() ); let pre_root = onchain_merkle_tree.root(); let change_log_index = onchain_merkle_tree.changelog_index() as u64; let mut compressed_account_to_nullify = Vec::new(); let first = nullifier_queue.first_no_seq().unwrap(); for i in 0..nullifier_queue.get_capacity() { let bucket = nullifier_queue.get_bucket(i).unwrap(); if let Some(bucket) = bucket { if bucket.sequence_number.is_none() { debug!("element to nullify: {:?}", bucket.value_bytes()); let leaf_index: usize = state_tree_bundle .merkle_tree .get_leaf_index(&bucket.value_bytes()) .unwrap(); debug!("leaf_index: {:?}", leaf_index); compressed_account_to_nullify.push((i, bucket.value_bytes())); } } } debug!( "nullifying {:?} accounts ", compressed_account_to_nullify.len() ); for (i, (index_in_nullifier_queue, compressed_account)) in compressed_account_to_nullify.iter().enumerate() { let leaf_index: usize = state_tree_bundle .merkle_tree .get_leaf_index(compressed_account) .unwrap(); debug!("nullifying leaf: {:?}", leaf_index); let proof: Vec<[u8; 32]> = state_tree_bundle .merkle_tree .get_proof_of_leaf(leaf_index, false) .unwrap() .to_array::<16>() .unwrap() .to_vec(); let ix = create_nullify_instruction( CreateNullifyInstructionInputs { authority: forester.pubkey(), nullifier_queue: state_tree_bundle.accounts.nullifier_queue, merkle_tree: state_tree_bundle.accounts.merkle_tree, change_log_indices: vec![change_log_index], leaves_queue_indices: vec![*index_in_nullifier_queue as u16], indices: vec![leaf_index as u64], proofs: vec![proof], derivation: forester.pubkey(), is_metadata_forester, }, epoch, ); let instructions = [ix]; let event = rpc .create_and_send_transaction_with_event::<MerkleTreeEvent>( &instructions, &forester.pubkey(), &[forester], None, ) .await? .unwrap(); match event.0 { MerkleTreeEvent::V2(event) => { assert_eq!(event.id, state_tree_bundle.accounts.merkle_tree.to_bytes()); assert_eq!( event.seq, onchain_merkle_tree.sequence_number() as u64 + 1 + i as u64 ); assert_eq!(event.nullified_leaves_indices.len(), 1); assert_eq!(event.nullified_leaves_indices[0], leaf_index as u64); } _ => { panic!("Wrong event type."); } } assert_value_is_marked_in_queue( rpc, state_tree_bundle, index_in_nullifier_queue, compressed_account, ) .await; } let num_nullified = compressed_account_to_nullify.len() as u64; // Locally nullify all leaves for (_, compressed_account) in compressed_account_to_nullify.iter() { let leaf_index = state_tree_bundle .merkle_tree .get_leaf_index(compressed_account) .unwrap(); debug!("locally nullifying leaf_index {}", leaf_index); debug!("compressed_account {:?}", compressed_account); debug!( "merkle tree pubkey {:?}", state_tree_bundle.accounts.merkle_tree ); state_tree_bundle .merkle_tree .update(&[0u8; 32], leaf_index) .unwrap(); } let onchain_merkle_tree = get_concurrent_merkle_tree::<StateMerkleTreeAccount, R, Poseidon, 26>( rpc, state_tree_bundle.accounts.merkle_tree, ) .await; assert_eq!( onchain_merkle_tree.root(), state_tree_bundle.merkle_tree.root() ); if !is_metadata_forester { assert_forester_counter( rpc, &get_forester_epoch_pda_from_authority(&forester.pubkey(), epoch).0, pre_forester_counter, num_nullified, ) .await .unwrap(); } // SAFEGUARD: check that the root changed if there was at least one element to nullify if first.is_some() { assert_ne!(pre_root, onchain_merkle_tree.root()); } Ok(()) } async fn assert_value_is_marked_in_queue<'a, R: RpcConnection>( rpc: &mut R, state_tree_bundle: &mut StateMerkleTreeBundle, index_in_nullifier_queue: &usize, compressed_account: &[u8; 32], ) { let nullifier_queue = unsafe { get_hash_set::<QueueAccount, R>(rpc, state_tree_bundle.accounts.nullifier_queue).await }; let array_element = nullifier_queue .get_bucket(*index_in_nullifier_queue) .unwrap() .unwrap(); assert_eq!(&array_element.value_bytes(), compressed_account); let onchain_merkle_tree = get_concurrent_merkle_tree::<StateMerkleTreeAccount, R, Poseidon, 26>( rpc, state_tree_bundle.accounts.merkle_tree, ) .await; assert_eq!( array_element.sequence_number(), Some( onchain_merkle_tree.sequence_number() + onchain_merkle_tree.roots.capacity() + SAFETY_MARGIN as usize ) ); } pub async fn assert_forester_counter<R: RpcConnection>( rpc: &mut R, pubkey: &Pubkey, pre: u64, num_nullified: u64, ) -> Result<(), RpcError> { let account = rpc .get_anchor_account::<ForesterEpochPda>(pubkey) .await? .unwrap(); if account.work_counter != pre + num_nullified { debug!("account.work_counter: {}", account.work_counter); debug!("pre: {}", pre); debug!("num_nullified: {}", num_nullified); debug!("forester pubkey: {:?}", pubkey); return Err(RpcError::CustomError( "ForesterEpochPda counter not updated correctly".to_string(), )); } Ok(()) } #[derive(Error, Debug)] pub enum RelayerUpdateError { #[error("Error in relayer update")] RpcError, } /// Mocks the address insert logic of a forester. /// Gets addresses from the AddressQueue and inserts them into the AddressMerkleTree. /// /// Checks: /// 1. Element has been marked correctly /// 2. Merkle tree has been updated correctly /// /// TODO: Event has been emitted, event doesn't exist yet pub async fn empty_address_queue_test<R: RpcConnection>( forester: &Keypair, rpc: &mut R, address_tree_bundle: &mut AddressMerkleTreeBundle, signer_is_owner: bool, epoch: u64, is_metadata_forester: bool, ) -> Result<(), RelayerUpdateError> { let address_merkle_tree_pubkey = address_tree_bundle.accounts.merkle_tree; let address_queue_pubkey = address_tree_bundle.accounts.queue; let initial_merkle_tree_state = address_tree_bundle.merkle_tree.clone(); let initial_indexed_array_state = address_tree_bundle.indexed_array.clone(); let relayer_merkle_tree = &mut address_tree_bundle.merkle_tree; let relayer_indexing_array = &mut address_tree_bundle.indexed_array; let mut update_errors: Vec<RpcError> = Vec::new(); let address_merkle_tree = get_indexed_merkle_tree::<AddressMerkleTreeAccount, R, Poseidon, usize, 26, 16>( rpc, address_merkle_tree_pubkey, ) .await; let indexed_changelog_index = address_merkle_tree.indexed_changelog_index() as u16; let changelog_index = address_merkle_tree.changelog_index() as u16; let mut counter = 0; loop { let pre_forester_counter = if !signer_is_owner { rpc.get_anchor_account::<ForesterEpochPda>( &get_forester_epoch_pda_from_authority(&forester.pubkey(), epoch).0, ) .await .map_err(|e| RelayerUpdateError::RpcError)? .unwrap() .work_counter } else { 0 }; let address_merkle_tree = get_indexed_merkle_tree::<AddressMerkleTreeAccount, R, Poseidon, usize, 26, 16>( rpc, address_merkle_tree_pubkey, ) .await; assert_eq!(relayer_merkle_tree.root(), address_merkle_tree.root()); let address_queue = unsafe { get_hash_set::<QueueAccount, R>(rpc, address_queue_pubkey).await }; let address = address_queue.first_no_seq().unwrap(); if address.is_none() { break; } let (address, address_hashset_index) = address.unwrap(); // Create new element from the dequeued value. let (old_low_address, old_low_address_next_value) = initial_indexed_array_state .find_low_element_for_nonexistent(&address.value_biguint()) .unwrap(); let address_bundle = initial_indexed_array_state .new_element_with_low_element_index(old_low_address.index, &address.value_biguint()) .unwrap(); // Get the Merkle proof for updating low element. let low_address_proof = initial_merkle_tree_state .get_proof_of_leaf(old_low_address.index, false) .unwrap(); let old_sequence_number = address_merkle_tree.sequence_number(); let old_root = address_merkle_tree.root(); // Update on-chain tree. let update_successful = match update_merkle_tree( rpc, forester, address_queue_pubkey, address_merkle_tree_pubkey, address_hashset_index, old_low_address.index as u64, bigint_to_be_bytes_array(&old_low_address.value).unwrap(), old_low_address.next_index as u64, bigint_to_be_bytes_array(&old_low_address_next_value).unwrap(), low_address_proof.to_array().unwrap(), Some(changelog_index), Some(indexed_changelog_index), signer_is_owner, epoch, is_metadata_forester, ) .await { Ok(event) => { let event = event.unwrap(); match event.0 { MerkleTreeEvent::V3(event) => { // Only assert for the first update since the other updates might be patched // the asserts are likely to fail if counter == 0 { assert_eq!(event.id, address_merkle_tree_pubkey.to_bytes()); assert_eq!(event.seq, old_sequence_number as u64 + 1); assert_eq!(event.updates.len(), 1); let event = &event.updates[0]; assert_eq!( event.new_low_element.index, address_bundle.new_low_element.index, "Empty Address Queue Test: invalid new_low_element.index" ); assert_eq!( event.new_low_element.next_index, address_bundle.new_low_element.next_index, "Empty Address Queue Test: invalid new_low_element.next_index" ); assert_eq!( event.new_low_element.value, bigint_to_be_bytes_array::<32>( &address_bundle.new_low_element.value ) .unwrap(), "Empty Address Queue Test: invalid new_low_element.value" ); assert_eq!( event.new_low_element.next_value, bigint_to_be_bytes_array::<32>(&address_bundle.new_element.value) .unwrap(), "Empty Address Queue Test: invalid new_low_element.next_value" ); let leaf_hash = address_bundle .new_low_element .hash::<Poseidon>(&address_bundle.new_element.value) .unwrap(); assert_eq!( event.new_low_element_hash, leaf_hash, "Empty Address Queue Test: invalid new_low_element_hash" ); let leaf_hash = address_bundle .new_element .hash::<Poseidon>(&address_bundle.new_element_next_value) .unwrap(); assert_eq!( event.new_high_element_hash, leaf_hash, "Empty Address Queue Test: invalid new_high_element_hash" ); assert_eq!( event.new_high_element.index, address_bundle.new_element.index, "Empty Address Queue Test: invalid new_high_element.index" ); assert_eq!( event.new_high_element.next_index, address_bundle.new_element.next_index, "Empty Address Queue Test: invalid new_high_element.next_index" ); assert_eq!( event.new_high_element.value, bigint_to_be_bytes_array::<32>(&address_bundle.new_element.value) .unwrap(), "Empty Address Queue Test: invalid new_high_element.value" ); assert_eq!( event.new_high_element.next_value, bigint_to_be_bytes_array::<32>( &address_bundle.new_element_next_value ) .unwrap(), "Empty Address Queue Test: invalid new_high_element.next_value" ); } } _ => { panic!("Wrong event type."); } } counter += 1; true } Err(e) => { update_errors.push(e); break; } }; if update_successful { if !signer_is_owner { assert_forester_counter( rpc, &get_forester_epoch_pda_from_authority(&forester.pubkey(), epoch).0, pre_forester_counter, 1, ) .await .unwrap(); } let merkle_tree = get_indexed_merkle_tree::<AddressMerkleTreeAccount, R, Poseidon, usize, 26, 16>( rpc, address_merkle_tree_pubkey, ) .await; let (old_low_address, _) = relayer_indexing_array .find_low_element_for_nonexistent(&address.value_biguint()) .unwrap(); let address_bundle = relayer_indexing_array .new_element_with_low_element_index(old_low_address.index, &address.value_biguint()) .unwrap(); let address_queue = unsafe { get_hash_set::<QueueAccount, R>(rpc, address_queue_pubkey).await }; assert_eq!( address_queue .get_bucket(address_hashset_index as usize) .unwrap() .unwrap() .sequence_number() .unwrap(), old_sequence_number + address_queue.sequence_threshold + 2 // We are doing two Merkle tree operations ); relayer_merkle_tree .update( &address_bundle.new_low_element, &address_bundle.new_element, &address_bundle.new_element_next_value, ) .unwrap(); relayer_indexing_array .append_with_low_element_index( address_bundle.new_low_element.index, &address_bundle.new_element.value, ) .unwrap(); assert_eq!(merkle_tree.sequence_number(), old_sequence_number + 2); assert_ne!(old_root, merkle_tree.root(), "Root did not change."); assert_eq!( relayer_merkle_tree.root(), merkle_tree.root(), "Root off-chain onchain inconsistent." ); let changelog_entry = merkle_tree .changelog .get(merkle_tree.changelog_index()) .unwrap(); let path = relayer_merkle_tree .get_path_of_leaf(merkle_tree.current_index(), true) .unwrap(); for i in 0..ADDRESS_MERKLE_TREE_HEIGHT as usize { let changelog_node = changelog_entry.path[i].unwrap(); let path_node = path[i]; assert_eq!(changelog_node, path_node); } let indexed_changelog_entry = merkle_tree .indexed_changelog .get(merkle_tree.indexed_changelog_index()) .unwrap(); let proof = relayer_merkle_tree .get_proof_of_leaf(merkle_tree.current_index(), false) .unwrap(); assert_eq!( address_bundle.new_element, indexed_changelog_entry.element.into(), ); assert_eq!(indexed_changelog_entry.proof.as_slice(), proof.as_slice()); assert_eq!( indexed_changelog_entry.changelog_index, merkle_tree.changelog_index() ); } } if update_errors.is_empty() { Ok(()) } else { panic!("Errors: {:?}", update_errors); } } #[allow(clippy::too_many_arguments)] pub async fn update_merkle_tree<R: RpcConnection>( rpc: &mut R, forester: &Keypair, address_queue_pubkey: Pubkey, address_merkle_tree_pubkey: Pubkey, value: u16, low_address_index: u64, low_address_value: [u8; 32], low_address_next_index: u64, low_address_next_value: [u8; 32], low_address_proof: [[u8; 32]; 16], changelog_index: Option<u16>, indexed_changelog_index: Option<u16>, signer_is_owner: bool, epoch: u64, is_metadata_forester: bool, ) -> Result<Option<(MerkleTreeEvent, Signature, u64)>, RpcError> { let changelog_index = match changelog_index { Some(changelog_index) => changelog_index, None => { let address_merkle_tree = get_indexed_merkle_tree::<AddressMerkleTreeAccount, R, Poseidon, usize, 26, 16>( rpc, address_merkle_tree_pubkey, ) .await; address_merkle_tree.changelog_index() as u16 } }; let indexed_changelog_index = match indexed_changelog_index { Some(indexed_changelog_index) => indexed_changelog_index, None => { let address_merkle_tree = get_indexed_merkle_tree::<AddressMerkleTreeAccount, R, Poseidon, usize, 26, 16>( rpc, address_merkle_tree_pubkey, ) .await; address_merkle_tree.indexed_changelog_index() as u16 } }; let update_ix = if !signer_is_owner { create_update_address_merkle_tree_instruction( UpdateAddressMerkleTreeInstructionInputs { authority: forester.pubkey(), derivation: forester.pubkey(), address_merkle_tree: address_merkle_tree_pubkey, address_queue: address_queue_pubkey, changelog_index, indexed_changelog_index, value, low_address_index, low_address_value, low_address_next_index, low_address_next_value, low_address_proof, is_metadata_forester, }, epoch, ) } else { let instruction_data = UpdateAddressMerkleTree { changelog_index, indexed_changelog_index, value, low_address_index, low_address_value, low_address_next_index, low_address_next_value, low_address_proof, }; Instruction { program_id: ID, accounts: vec![ AccountMeta::new(forester.pubkey(), true), AccountMeta::new(ID, false), AccountMeta::new(address_queue_pubkey, false), AccountMeta::new(address_merkle_tree_pubkey, false), AccountMeta::new(NOOP_PROGRAM_ID, false), ], data: instruction_data.data(), } }; rpc.create_and_send_transaction_with_event::<MerkleTreeEvent>( &[update_ix], &forester.pubkey(), &[forester], None, ) .await } pub async fn insert_addresses<R: RpcConnection>( context: &mut R, address_queue_pubkey: Pubkey, address_merkle_tree_pubkey: Pubkey, addresses: Vec<[u8; 32]>, ) -> Result<Signature, RpcError> { let num_addresses = addresses.len(); let instruction_data = InsertAddresses { addresses }; let accounts = account_compression::accounts::InsertIntoQueues { fee_payer: context.get_payer().pubkey(), authority: context.get_payer().pubkey(), registered_program_pda: None, system_program: system_program::ID, }; let insert_ix = Instruction { program_id: ID, accounts: [ accounts.to_account_metas(Some(true)), vec![ vec![ AccountMeta::new(address_queue_pubkey, false), AccountMeta::new(address_merkle_tree_pubkey, false) ]; num_addresses ] .iter() .flat_map(|x| x.to_vec()) .collect::<Vec<AccountMeta>>(), ] .concat(), data: instruction_data.data(), }; let latest_blockhash = context.get_latest_blockhash().await.unwrap(); let transaction = Transaction::new_signed_with_payer( &[insert_ix], Some(&context.get_payer().pubkey()), &[&context.get_payer()], latest_blockhash, ); context.process_transaction(transaction).await }
0
solana_public_repos/Lightprotocol/light-protocol/test-utils
solana_public_repos/Lightprotocol/light-protocol/test-utils/src/lib.rs
use account_compression::initialize_address_merkle_tree::Pubkey; use account_compression::{ AddressMerkleTreeConfig, AddressQueueConfig, QueueType, RegisteredProgram, }; use solana_sdk::signature::{Keypair, Signature, Signer}; use solana_sdk::{instruction::InstructionError, transaction}; use std::cmp; pub mod address_tree_rollover; pub mod assert_address_merkle_tree; pub mod assert_compressed_tx; pub mod assert_epoch; pub mod assert_merkle_tree; pub mod assert_queue; pub mod assert_rollover; pub mod assert_token_tx; pub mod e2e_test_env; #[allow(unused)] pub mod indexer; pub mod spl; pub mod state_tree_rollover; pub mod system_program; #[allow(unused)] pub mod test_forester; use crate::assert_address_merkle_tree::assert_address_merkle_tree_initialized; use crate::assert_queue::assert_address_queue_initialized; pub use forester_utils::{ airdrop_lamports, create_account_instruction, forester_epoch::{Epoch, TreeAccounts, TreeType}, get_concurrent_merkle_tree, get_hash_set, get_indexed_merkle_tree, indexer::{AddressMerkleTreeAccounts, AddressMerkleTreeBundle, Indexer, TokenDataWithContext}, registry::{ create_rollover_address_merkle_tree_instructions, create_rollover_state_merkle_tree_instructions, register_test_forester, update_test_forester, }, AccountZeroCopy, }; pub use light_client::{ rpc::{ assert_rpc_error, solana_rpc::SolanaRpcUrl, RpcConnection, RpcError, SolanaRpcConnection, }, transaction_params::{FeeConfig, TransactionParams}, }; use light_hasher::Poseidon; use light_program_test::test_env::create_address_merkle_tree_and_queue_account; use light_registry::account_compression_cpi::sdk::get_registered_program_pda; #[allow(clippy::too_many_arguments)] #[inline(never)] pub async fn create_address_merkle_tree_and_queue_account_with_assert<R: RpcConnection>( payer: &Keypair, registry: bool, context: &mut R, address_merkle_tree_keypair: &Keypair, address_queue_keypair: &Keypair, program_owner: Option<Pubkey>, forester: Option<Pubkey>, merkle_tree_config: &AddressMerkleTreeConfig, queue_config: &AddressQueueConfig, index: u64, ) -> Result<Signature, RpcError> { let result = create_address_merkle_tree_and_queue_account( payer, registry, context, address_merkle_tree_keypair, address_queue_keypair, program_owner, forester, merkle_tree_config, queue_config, index, ) .await; // To initialize the indexed tree we do 4 operations: // 1. insert 0 append 0 and update 0 // 2. insert 1 append BN254_FIELD_SIZE -1 and update 0 // we appended two values this the expected next index is 2; // The right most leaf is the hash of the indexed array element with value FIELD_SIZE - 1 // index 1, next_index: 0 let expected_change_log_length = cmp::min(4, merkle_tree_config.changelog_size as usize); let expected_roots_length = cmp::min(4, merkle_tree_config.roots_size as usize); let expected_next_index = 2; let expected_indexed_change_log_length = cmp::min(4, merkle_tree_config.address_changelog_size as usize); let mut reference_tree = light_indexed_merkle_tree::reference::IndexedMerkleTree::<Poseidon, usize>::new( account_compression::utils::constants::ADDRESS_MERKLE_TREE_HEIGHT as usize, account_compression::utils::constants::ADDRESS_MERKLE_TREE_CANOPY_DEPTH as usize, ) .unwrap(); reference_tree.init().unwrap(); let expected_right_most_leaf = reference_tree .merkle_tree .get_leaf(reference_tree.merkle_tree.rightmost_index - 1); let _expected_right_most_leaf = [ 30, 164, 22, 238, 180, 2, 24, 181, 64, 193, 207, 184, 219, 233, 31, 109, 84, 232, 162, 158, 220, 48, 163, 158, 50, 107, 64, 87, 167, 217, 99, 245, ]; assert_eq!(expected_right_most_leaf, _expected_right_most_leaf); let owner = if registry { let registered_program = get_registered_program_pda(&light_registry::ID); let registered_program_account = context .get_anchor_account::<RegisteredProgram>(&registered_program) .await .unwrap() .unwrap(); registered_program_account.group_authority_pda } else { payer.pubkey() }; assert_address_merkle_tree_initialized( context, &address_merkle_tree_keypair.pubkey(), &address_queue_keypair.pubkey(), merkle_tree_config, index, program_owner, forester, expected_change_log_length, expected_roots_length, expected_next_index, &expected_right_most_leaf, &owner, expected_indexed_change_log_length, ) .await; assert_address_queue_initialized( context, &address_queue_keypair.pubkey(), queue_config, &address_merkle_tree_keypair.pubkey(), merkle_tree_config, QueueType::AddressQueue, index, program_owner, forester, &owner, ) .await; result } /// Asserts that the given `BanksTransactionResultWithMetadata` is an error with a custom error code /// or a program error. /// Unfortunately BanksTransactionResultWithMetadata does not reliably expose the custom error code, so /// we allow program error as well. // TODO: unify with assert_rpc_error pub fn assert_custom_error_or_program_error( result: Result<solana_sdk::signature::Signature, RpcError>, error_code: u32, ) -> Result<(), RpcError> { let accepted_errors = [ (0, InstructionError::ProgramFailedToComplete), (0, InstructionError::Custom(error_code)), ]; let is_accepted = accepted_errors.iter().any(|(index, error)| { matches!(result, Err(RpcError::TransactionError(transaction::TransactionError::InstructionError(i, ref e))) if i == (*index as u8) && e == error) }); if !is_accepted { println!("result {:?}", result); println!("error_code {:?}", error_code); return Err(RpcError::AssertRpcError(format!( "Expected error code {} or program error, got {:?}", error_code, result ))); } Ok(()) }
0
solana_public_repos/Lightprotocol/light-protocol/test-utils
solana_public_repos/Lightprotocol/light-protocol/test-utils/src/assert_address_merkle_tree.rs
use forester_utils::{get_indexed_merkle_tree, AccountZeroCopy}; use light_client::rpc::RpcConnection; use light_hasher::Poseidon; use solana_sdk::pubkey::Pubkey; #[allow(clippy::too_many_arguments)] pub async fn assert_address_merkle_tree_initialized<R: RpcConnection>( rpc: &mut R, merkle_tree_pubkey: &Pubkey, queue_pubkey: &Pubkey, merkle_tree_config: &account_compression::AddressMerkleTreeConfig, index: u64, program_owner: Option<Pubkey>, forester: Option<Pubkey>, expected_changelog_length: usize, expected_roots_length: usize, expected_next_index: usize, expected_rightmost_leaf: &[u8; 32], owner_pubkey: &Pubkey, expected_indexed_changelog_length: usize, ) { let merkle_tree = AccountZeroCopy::<account_compression::AddressMerkleTreeAccount>::new( rpc, *merkle_tree_pubkey, ) .await; let merkle_tree_account = merkle_tree.deserialized(); assert_eq!( merkle_tree_account .metadata .rollover_metadata .rollover_threshold, merkle_tree_config.rollover_threshold.unwrap_or_default() ); assert_eq!( merkle_tree_account.metadata.rollover_metadata.network_fee, merkle_tree_config.network_fee.unwrap_or_default() ); // The address Merkle tree is never directly called by the user. // The whole rollover fees are collected by the address queue. let expected_rollover_fee = 0; assert_eq!( merkle_tree_account.metadata.rollover_metadata.rollover_fee, expected_rollover_fee ); assert_eq!(merkle_tree_account.metadata.rollover_metadata.index, index); assert_eq!( merkle_tree_account .metadata .rollover_metadata .rolledover_slot, u64::MAX ); assert_eq!( merkle_tree_account .metadata .rollover_metadata .close_threshold, merkle_tree_config.close_threshold.unwrap_or(u64::MAX) ); assert_eq!( merkle_tree_account.metadata.next_merkle_tree, Pubkey::default() ); let expected_access_meta_data = account_compression::AccessMetadata { owner: *owner_pubkey, program_owner: program_owner.unwrap_or_default(), forester: forester.unwrap_or_default(), }; assert_eq!( merkle_tree_account.metadata.access_metadata, expected_access_meta_data ); assert_eq!(merkle_tree_account.metadata.associated_queue, *queue_pubkey); let merkle_tree = get_indexed_merkle_tree::< account_compression::AddressMerkleTreeAccount, R, Poseidon, usize, 26, 16, >(rpc, *merkle_tree_pubkey) .await; assert_eq!(merkle_tree.height, merkle_tree_config.height as usize); assert_eq!( merkle_tree.merkle_tree.changelog.capacity(), merkle_tree_config.changelog_size as usize ); assert_eq!( merkle_tree.merkle_tree.changelog.len(), expected_changelog_length ); assert_eq!( merkle_tree.merkle_tree.changelog_index(), expected_changelog_length.saturating_sub(1) ); assert_eq!( merkle_tree.roots.capacity(), merkle_tree_config.roots_size as usize ); assert_eq!(merkle_tree.roots.len(), expected_roots_length); assert_eq!( merkle_tree.root_index(), expected_roots_length.saturating_sub(1) ); assert_eq!( merkle_tree.canopy_depth, merkle_tree_config.canopy_depth as usize ); assert_eq!(merkle_tree.next_index(), expected_next_index); assert_eq!( merkle_tree.sequence_number() % merkle_tree_config.roots_size as usize, expected_roots_length.saturating_sub(1) ); assert_eq!(&merkle_tree.rightmost_leaf(), expected_rightmost_leaf); // TODO: complete asserts assert_eq!( merkle_tree.indexed_changelog_index(), expected_indexed_changelog_length.saturating_sub(1) ); }
0
solana_public_repos/Lightprotocol/light-protocol/test-utils
solana_public_repos/Lightprotocol/light-protocol/test-utils/src/assert_merkle_tree.rs
use account_compression::StateMerkleTreeAccount; use forester_utils::{get_concurrent_merkle_tree, AccountZeroCopy}; use light_client::rpc::RpcConnection; use light_hasher::Poseidon; use light_utils::fee::compute_rollover_fee; use solana_sdk::pubkey::Pubkey; #[allow(clippy::too_many_arguments)] pub async fn assert_merkle_tree_initialized<R: RpcConnection>( rpc: &mut R, merkle_tree_pubkey: &Pubkey, queue_pubkey: &Pubkey, height: usize, changelog_capacity: usize, roots_capacity: usize, canopy_depth: usize, expected_changelog_length: usize, expected_roots_length: usize, expected_next_index: usize, expected_rightmost_leaf: &[u8; 32], rollover_threshold: Option<u64>, close_threshold: Option<u64>, network_fee: u64, payer_pubkey: &Pubkey, ) { let merkle_tree_account = AccountZeroCopy::<account_compression::StateMerkleTreeAccount>::new( rpc, *merkle_tree_pubkey, ) .await; let merkle_tree_account = merkle_tree_account.deserialized(); let balance_merkle_tree = rpc .get_account(*merkle_tree_pubkey) .await .unwrap() .unwrap() .lamports; let balance_nullifier_queue = rpc .get_account(*queue_pubkey) .await .unwrap() .unwrap() .lamports; assert_eq!( merkle_tree_account .metadata .rollover_metadata .rollover_threshold, rollover_threshold.unwrap_or_default() ); assert_eq!( merkle_tree_account.metadata.rollover_metadata.network_fee, network_fee ); let expected_rollover_fee = match rollover_threshold { Some(rollover_threshold) => { compute_rollover_fee(rollover_threshold, height as u32, balance_merkle_tree).unwrap() + compute_rollover_fee(rollover_threshold, height as u32, balance_nullifier_queue) .unwrap() } None => 0, }; assert_eq!( merkle_tree_account.metadata.rollover_metadata.rollover_fee, expected_rollover_fee ); assert_eq!(merkle_tree_account.metadata.rollover_metadata.index, 1); assert_eq!( merkle_tree_account .metadata .rollover_metadata .rolledover_slot, u64::MAX ); assert_eq!( merkle_tree_account .metadata .rollover_metadata .close_threshold, close_threshold.unwrap_or(u64::MAX) ); assert_eq!( merkle_tree_account.metadata.next_merkle_tree, Pubkey::default() ); assert_eq!( merkle_tree_account.metadata.access_metadata.owner, *payer_pubkey ); assert_eq!( merkle_tree_account.metadata.access_metadata.program_owner, Pubkey::default() ); assert_eq!(merkle_tree_account.metadata.associated_queue, *queue_pubkey); let merkle_tree = get_concurrent_merkle_tree::<StateMerkleTreeAccount, R, Poseidon, 26>( rpc, *merkle_tree_pubkey, ) .await; assert_eq!(merkle_tree.height, height); assert_eq!(merkle_tree.changelog.capacity(), changelog_capacity); assert_eq!(merkle_tree.changelog.len(), expected_changelog_length); assert_eq!( merkle_tree.changelog_index(), expected_changelog_length.saturating_sub(1) ); assert_eq!(merkle_tree.roots.capacity(), roots_capacity); assert_eq!(merkle_tree.roots.len(), expected_roots_length); assert_eq!( merkle_tree.root_index(), expected_roots_length.saturating_sub(1) ); assert_eq!(merkle_tree.canopy_depth, canopy_depth); assert_eq!(merkle_tree.next_index(), expected_next_index); assert_eq!( merkle_tree.sequence_number(), expected_roots_length.saturating_sub(1) ); assert_eq!(&merkle_tree.rightmost_leaf(), expected_rightmost_leaf); }
0
solana_public_repos/Lightprotocol/light-protocol/test-utils
solana_public_repos/Lightprotocol/light-protocol/test-utils/src/address_tree_rollover.rs
#![allow(clippy::await_holding_refcell_ref)] use anchor_lang::{InstructionData, Key, Lamports, ToAccountInfo, ToAccountMetas}; use solana_sdk::clock::Slot; use solana_sdk::{ account::{AccountSharedData, WritableAccount}, account_info::AccountInfo, instruction::Instruction, pubkey::Pubkey, signature::Keypair, signer::Signer, transaction::Transaction, }; use crate::assert_rollover::{ assert_rolledover_merkle_trees, assert_rolledover_merkle_trees_metadata, assert_rolledover_queues_metadata, }; use account_compression::{ accounts, initialize_address_merkle_tree::AccountLoader, instruction, state::QueueAccount, AddressMerkleTreeAccount, }; use account_compression::{AddressMerkleTreeConfig, AddressQueueConfig}; use forester_utils::registry::{ create_rollover_address_merkle_tree_instructions, create_rollover_state_merkle_tree_instructions, }; use forester_utils::{create_account_instruction, get_hash_set, get_indexed_merkle_tree}; use light_client::rpc::{RpcConnection, RpcError}; use light_hasher::Poseidon; use light_indexed_merkle_tree::zero_copy::IndexedMerkleTreeZeroCopyMut; pub async fn set_address_merkle_tree_next_index<R: RpcConnection>( rpc: &mut R, merkle_tree_pubkey: &Pubkey, next_index: u64, lamports: u64, ) { let mut merkle_tree = rpc.get_account(*merkle_tree_pubkey).await.unwrap().unwrap(); let merkle_tree_deserialized = &mut IndexedMerkleTreeZeroCopyMut::<Poseidon, usize, 26, 16>::from_bytes_zero_copy_mut( &mut merkle_tree.data[8 + std::mem::size_of::<AddressMerkleTreeAccount>()..], ) .unwrap(); unsafe { *merkle_tree_deserialized.next_index = next_index as usize; } let mut account_share_data = AccountSharedData::from(merkle_tree); account_share_data.set_lamports(lamports); rpc.set_account(merkle_tree_pubkey, &account_share_data); let mut merkle_tree = rpc.get_account(*merkle_tree_pubkey).await.unwrap().unwrap(); let merkle_tree_deserialized = IndexedMerkleTreeZeroCopyMut::<Poseidon, usize, 26, 16>::from_bytes_zero_copy_mut( &mut merkle_tree.data[8 + std::mem::size_of::<AddressMerkleTreeAccount>()..], ) .unwrap(); assert_eq!(merkle_tree_deserialized.next_index() as u64, next_index); } pub async fn perform_address_merkle_tree_roll_over<R: RpcConnection>( context: &mut R, new_queue_keypair: &Keypair, new_address_merkle_tree_keypair: &Keypair, old_merkle_tree_pubkey: &Pubkey, old_queue_pubkey: &Pubkey, merkle_tree_config: &AddressMerkleTreeConfig, queue_config: &AddressQueueConfig, ) -> Result<solana_sdk::signature::Signature, RpcError> { let payer = context.get_payer().insecure_clone(); let size = QueueAccount::size(queue_config.capacity as usize).unwrap(); let account_create_ix = create_account_instruction( &payer.pubkey(), size, context .get_minimum_balance_for_rent_exemption(size) .await .unwrap(), &account_compression::ID, Some(new_queue_keypair), ); let size = AddressMerkleTreeAccount::size( merkle_tree_config.height as usize, merkle_tree_config.changelog_size as usize, merkle_tree_config.roots_size as usize, merkle_tree_config.canopy_depth as usize, merkle_tree_config.address_changelog_size as usize, ); let mt_account_create_ix = create_account_instruction( &payer.pubkey(), size, context .get_minimum_balance_for_rent_exemption(size) .await .unwrap(), &account_compression::ID, Some(new_address_merkle_tree_keypair), ); let instruction_data = instruction::RolloverAddressMerkleTreeAndQueue {}; let accounts = accounts::RolloverAddressMerkleTreeAndQueue { fee_payer: context.get_payer().pubkey(), authority: context.get_payer().pubkey(), registered_program_pda: None, new_address_merkle_tree: new_address_merkle_tree_keypair.pubkey(), new_queue: new_queue_keypair.pubkey(), old_address_merkle_tree: *old_merkle_tree_pubkey, old_queue: *old_queue_pubkey, }; let instruction = Instruction { program_id: account_compression::ID, accounts: [accounts.to_account_metas(Some(true))].concat(), data: instruction_data.data(), }; let blockhash = context.get_latest_blockhash().await.unwrap(); let transaction = Transaction::new_signed_with_payer( &[account_create_ix, mt_account_create_ix, instruction], Some(&context.get_payer().pubkey()), &vec![ &context.get_payer(), &new_queue_keypair, &new_address_merkle_tree_keypair, ], blockhash, ); context.process_transaction(transaction).await } pub async fn assert_rolled_over_address_merkle_tree_and_queue<R: RpcConnection>( payer: &Pubkey, rpc: &mut R, fee_payer_prior_balance: &u64, old_merkle_tree_pubkey: &Pubkey, old_queue_pubkey: &Pubkey, new_merkle_tree_pubkey: &Pubkey, new_queue_pubkey: &Pubkey, ) { let current_slot = rpc.get_slot().await.unwrap(); let mut new_mt_account = rpc .get_account(*new_merkle_tree_pubkey) .await .unwrap() .unwrap(); let mut new_mt_lamports = 0u64; let account_info = AccountInfo::new( new_merkle_tree_pubkey, false, false, &mut new_mt_lamports, &mut new_mt_account.data, &account_compression::ID, false, 0u64, ); let new_mt_account = AccountLoader::<AddressMerkleTreeAccount>::try_from(&account_info).unwrap(); let new_loaded_mt_account = new_mt_account.load().unwrap(); let mut old_mt_account = rpc .get_account(*old_merkle_tree_pubkey) .await .unwrap() .unwrap(); let mut old_mt_lamports = 0u64; let account_info = AccountInfo::new( old_merkle_tree_pubkey, false, false, &mut old_mt_lamports, &mut old_mt_account.data, &account_compression::ID, false, 0u64, ); let old_mt_account = AccountLoader::<AddressMerkleTreeAccount>::try_from(&account_info).unwrap(); let old_loaded_mt_account = old_mt_account.load().unwrap(); assert_eq!( new_mt_account.to_account_info().data.borrow().len(), old_mt_account.to_account_info().data.borrow().len() ); assert_rolledover_merkle_trees_metadata( &old_loaded_mt_account.metadata, &new_loaded_mt_account.metadata, current_slot, new_queue_pubkey, ); drop(new_loaded_mt_account); drop(old_loaded_mt_account); let struct_old = get_indexed_merkle_tree::<AddressMerkleTreeAccount, R, Poseidon, usize, 26, 16>( rpc, old_mt_account.key(), ) .await; let struct_new = get_indexed_merkle_tree::<AddressMerkleTreeAccount, R, Poseidon, usize, 26, 16>( rpc, new_mt_account.key(), ) .await; assert_rolledover_merkle_trees(&struct_old.merkle_tree, &struct_new.merkle_tree); assert_eq!( struct_old.merkle_tree.changelog.capacity(), struct_new.merkle_tree.changelog.capacity() ); { let mut new_queue_account = rpc.get_account(*new_queue_pubkey).await.unwrap().unwrap(); let mut new_mt_lamports = 0u64; let account_info = AccountInfo::new( new_queue_pubkey, false, false, &mut new_mt_lamports, &mut new_queue_account.data, &account_compression::ID, false, 0u64, ); let new_queue_account = AccountLoader::<QueueAccount>::try_from(&account_info).unwrap(); let new_loaded_queue_account = new_queue_account.load().unwrap(); let mut old_queue_account = rpc.get_account(*old_queue_pubkey).await.unwrap().unwrap(); let mut old_mt_lamports = 0u64; let account_info = AccountInfo::new( old_queue_pubkey, false, false, &mut old_mt_lamports, &mut old_queue_account.data, &account_compression::ID, false, 0u64, ); let old_queue_account = AccountLoader::<QueueAccount>::try_from(&account_info).unwrap(); let old_loaded_queue_account = old_queue_account.load().unwrap(); assert_eq!( old_queue_account.to_account_info().data.borrow().len(), new_queue_account.to_account_info().data.borrow().len(), ); assert_rolledover_queues_metadata( &old_loaded_queue_account.metadata, &new_loaded_queue_account.metadata, current_slot, new_merkle_tree_pubkey, new_queue_pubkey, old_mt_account.get_lamports(), new_mt_account.get_lamports(), new_queue_account.get_lamports(), ); } let fee_payer_post_balance = rpc.get_account(*payer).await.unwrap().unwrap().lamports; // rent is reimbursed, 3 signatures cost 3 x 5000 lamports assert_eq!(*fee_payer_prior_balance, fee_payer_post_balance + 15000); { let old_address_queue = unsafe { get_hash_set::<QueueAccount, R>(rpc, *old_queue_pubkey).await }; let new_address_queue = unsafe { get_hash_set::<QueueAccount, R>(rpc, *new_queue_pubkey).await }; assert_eq!( old_address_queue.get_capacity(), new_address_queue.get_capacity() ); assert_eq!( old_address_queue.sequence_threshold, new_address_queue.sequence_threshold, ); } } #[allow(clippy::too_many_arguments)] pub async fn perform_address_merkle_tree_roll_over_forester<R: RpcConnection>( payer: &Keypair, context: &mut R, new_queue_keypair: &Keypair, new_address_merkle_tree_keypair: &Keypair, old_merkle_tree_pubkey: &Pubkey, old_queue_pubkey: &Pubkey, epoch: u64, is_metadata_forester: bool, ) -> Result<solana_sdk::signature::Signature, RpcError> { let instructions = create_rollover_address_merkle_tree_instructions( context, &payer.pubkey(), &payer.pubkey(), new_queue_keypair, new_address_merkle_tree_keypair, old_merkle_tree_pubkey, old_queue_pubkey, epoch, is_metadata_forester, ) .await; let blockhash = context.get_latest_blockhash().await.unwrap(); let transaction = Transaction::new_signed_with_payer( &instructions, Some(&payer.pubkey()), &vec![&payer, &new_queue_keypair, &new_address_merkle_tree_keypair], blockhash, ); context.process_transaction(transaction).await } #[allow(clippy::too_many_arguments)] pub async fn perform_state_merkle_tree_roll_over_forester<R: RpcConnection>( payer: &Keypair, context: &mut R, new_queue_keypair: &Keypair, new_address_merkle_tree_keypair: &Keypair, new_cpi_signature_keypair: &Keypair, old_merkle_tree_pubkey: &Pubkey, old_queue_pubkey: &Pubkey, epoch: u64, is_metadata_forester: bool, ) -> Result<(solana_sdk::signature::Signature, Slot), RpcError> { let instructions = create_rollover_state_merkle_tree_instructions( context, &payer.pubkey(), &payer.pubkey(), new_queue_keypair, new_address_merkle_tree_keypair, new_cpi_signature_keypair, old_merkle_tree_pubkey, old_queue_pubkey, epoch, is_metadata_forester, ) .await; let blockhash = context.get_latest_blockhash().await.unwrap(); let transaction = Transaction::new_signed_with_payer( &instructions, Some(&payer.pubkey()), &vec![ &payer, &new_queue_keypair, &new_address_merkle_tree_keypair, &new_cpi_signature_keypair, ], blockhash, ); context.process_transaction_with_context(transaction).await }
0
solana_public_repos/Lightprotocol/light-protocol/test-utils
solana_public_repos/Lightprotocol/light-protocol/test-utils/src/assert_rollover.rs
use account_compression::{MerkleTreeMetadata, QueueMetadata}; use anchor_lang::prelude::Pubkey; use light_concurrent_merkle_tree::ConcurrentMerkleTree; use light_hasher::Hasher; pub fn assert_rolledover_merkle_trees<H, const HEIGHT: usize>( old_merkle_tree: &ConcurrentMerkleTree<H, HEIGHT>, new_merkle_tree: &ConcurrentMerkleTree<H, HEIGHT>, ) where H: Hasher, { assert_eq!(old_merkle_tree.height, new_merkle_tree.height); assert_eq!( old_merkle_tree.changelog.capacity(), new_merkle_tree.changelog.capacity(), ); assert_eq!( old_merkle_tree.changelog.capacity(), new_merkle_tree.changelog.capacity() ); assert_eq!( old_merkle_tree.roots.capacity(), new_merkle_tree.roots.capacity() ); assert_eq!( old_merkle_tree.roots.capacity(), new_merkle_tree.roots.capacity() ); assert_eq!(old_merkle_tree.canopy_depth, new_merkle_tree.canopy_depth); } pub fn assert_rolledover_merkle_trees_metadata( old_merkle_tree_metadata: &MerkleTreeMetadata, new_merkle_tree_metadata: &MerkleTreeMetadata, current_slot: u64, new_queue_pubkey: &Pubkey, ) { // Old Merkle tree // 1. rolled over slot is set to current slot // 2. next Merkle tree is set to the new Merkle tree // New Merkle tree // 1. index is equal to the old Merkle tree index // 2. rollover fee is equal to the old Merkle tree rollover fee (the fee is calculated onchain in case rent should change the fee might be different) // 3. network_fee is equal to the old Merkle tree network_fee // 4. rollover threshold is equal to the old Merkle tree rollover threshold // 5. rolled over slot is set to u64::MAX (not rolled over) // 6. close threshold is equal to the old Merkle tree close threshold // 7. associated queue is equal to the new queue // 7. next merkle tree is set to Pubkey::default() (not set) // 8. owner is equal to the old Merkle tree owner // 9. delegate is equal to the old Merkle tree delegate assert_eq!( old_merkle_tree_metadata.access_metadata, new_merkle_tree_metadata.access_metadata ); assert_eq!( old_merkle_tree_metadata.rollover_metadata.index, new_merkle_tree_metadata.rollover_metadata.index ); assert_eq!( old_merkle_tree_metadata.rollover_metadata.rollover_fee, new_merkle_tree_metadata.rollover_metadata.rollover_fee, ); assert_eq!( old_merkle_tree_metadata .rollover_metadata .rollover_threshold, new_merkle_tree_metadata .rollover_metadata .rollover_threshold, ); assert_eq!( old_merkle_tree_metadata.rollover_metadata.network_fee, new_merkle_tree_metadata.rollover_metadata.network_fee, ); assert_eq!( old_merkle_tree_metadata.rollover_metadata.rolledover_slot, current_slot, ); assert_eq!( old_merkle_tree_metadata.rollover_metadata.close_threshold, new_merkle_tree_metadata.rollover_metadata.close_threshold ); assert_eq!( old_merkle_tree_metadata.rollover_metadata.additional_bytes, new_merkle_tree_metadata.rollover_metadata.additional_bytes ); assert_eq!(new_merkle_tree_metadata.associated_queue, *new_queue_pubkey); assert_eq!(new_merkle_tree_metadata.next_merkle_tree, Pubkey::default()); } #[allow(clippy::too_many_arguments)] pub fn assert_rolledover_queues_metadata( old_queue_metadata: &QueueMetadata, new_queue_metadata: &QueueMetadata, current_slot: u64, new_merkle_tree_pubkey: &Pubkey, new_queue_pubkey: &Pubkey, old_merkle_tree_lamports: u64, new_merkle_tree_lamports: u64, new_queue_lamports: u64, ) { assert_eq!( old_queue_metadata.rollover_metadata.rolledover_slot, current_slot ); // Isn't this wrong??? assert_eq!( old_queue_metadata.rollover_metadata.index, new_queue_metadata.rollover_metadata.index, ); assert_eq!( old_queue_metadata.rollover_metadata.rollover_fee, new_queue_metadata.rollover_metadata.rollover_fee ); assert_eq!( old_queue_metadata.rollover_metadata.network_fee, new_queue_metadata.rollover_metadata.network_fee ); assert_eq!( u64::MAX, new_queue_metadata.rollover_metadata.rolledover_slot ); assert_eq!( old_queue_metadata.access_metadata.owner, new_queue_metadata.access_metadata.owner ); assert_eq!( old_queue_metadata.access_metadata.program_owner, new_queue_metadata.access_metadata.program_owner ); assert_eq!( new_queue_metadata.associated_merkle_tree, *new_merkle_tree_pubkey ); assert_eq!(old_queue_metadata.next_queue, *new_queue_pubkey); assert_eq!( old_merkle_tree_lamports, new_merkle_tree_lamports + new_queue_lamports + old_merkle_tree_lamports ); }
0
solana_public_repos/Lightprotocol/light-protocol/test-utils
solana_public_repos/Lightprotocol/light-protocol/test-utils/src/spl.rs
use anchor_spl::token::{Mint, TokenAccount}; use forester_utils::create_account_instruction; use forester_utils::indexer::{Indexer, TokenDataWithContext}; use light_compressed_token::process_compress_spl_token_account::sdk::create_compress_spl_token_account_instruction; use light_compressed_token::{ burn::sdk::{create_burn_instruction, CreateBurnInstructionInputs}, delegation::sdk::{ create_approve_instruction, create_revoke_instruction, CreateApproveInstructionInputs, CreateRevokeInstructionInputs, }, freeze::sdk::{create_instruction, CreateInstructionInputs}, get_token_pool_pda, mint_sdk::{create_create_token_pool_instruction, create_mint_to_instruction}, process_transfer::{transfer_sdk::create_transfer_instruction, TokenTransferOutputData}, token_data::AccountState, TokenData, }; use light_hasher::Poseidon; use light_system_program::{ invoke::processor::CompressedProof, sdk::{compressed_account::MerkleContext, event::PublicTransactionEvent}, }; use solana_program_test::BanksClientError; use solana_sdk::{ instruction::Instruction, program_pack::Pack, pubkey::Pubkey, signature::{Keypair, Signature, Signer}, }; use spl_token::instruction::initialize_mint; use crate::{ assert_compressed_tx::get_merkle_tree_snapshots, assert_token_tx::{assert_create_mint, assert_mint_to, assert_transfer}, }; use light_client::rpc::errors::RpcError; use light_client::rpc::RpcConnection; use light_client::transaction_params::TransactionParams; pub async fn mint_tokens_helper<R: RpcConnection, I: Indexer<R>>( rpc: &mut R, test_indexer: &mut I, merkle_tree_pubkey: &Pubkey, mint_authority: &Keypair, mint: &Pubkey, amounts: Vec<u64>, recipients: Vec<Pubkey>, ) { mint_tokens_helper_with_lamports( rpc, test_indexer, merkle_tree_pubkey, mint_authority, mint, amounts, recipients, None, ) .await } pub async fn mint_spl_tokens<R: RpcConnection>( rpc: &mut R, mint: &Pubkey, token_account: &Pubkey, token_owner: &Pubkey, mint_authority: &Keypair, amount: u64, is_token_22: bool, ) -> Result<Signature, RpcError> { let mint_to_instruction = if is_token_22 { spl_token_2022::instruction::mint_to( &spl_token_2022::ID, mint, token_account, token_owner, &[&mint_authority.pubkey()], amount, ) .unwrap() } else { spl_token::instruction::mint_to( &spl_token::ID, mint, token_account, token_owner, &[&mint_authority.pubkey()], amount, ) .unwrap() }; rpc.create_and_send_transaction( &[mint_to_instruction], &mint_authority.pubkey(), &[mint_authority], ) .await } #[allow(clippy::too_many_arguments)] pub async fn mint_tokens_helper_with_lamports<R: RpcConnection, I: Indexer<R>>( rpc: &mut R, test_indexer: &mut I, merkle_tree_pubkey: &Pubkey, mint_authority: &Keypair, mint: &Pubkey, amounts: Vec<u64>, recipients: Vec<Pubkey>, lamports: Option<u64>, ) { mint_tokens_22_helper_with_lamports( rpc, test_indexer, merkle_tree_pubkey, mint_authority, mint, amounts, recipients, lamports, false, ) .await; } #[allow(clippy::too_many_arguments)] pub async fn mint_tokens_22_helper_with_lamports<R: RpcConnection, I: Indexer<R>>( rpc: &mut R, test_indexer: &mut I, merkle_tree_pubkey: &Pubkey, mint_authority: &Keypair, mint: &Pubkey, amounts: Vec<u64>, recipients: Vec<Pubkey>, lamports: Option<u64>, token_22: bool, ) { let payer_pubkey = mint_authority.pubkey(); let instruction = create_mint_to_instruction( &payer_pubkey, &payer_pubkey, mint, merkle_tree_pubkey, amounts.clone(), recipients.clone(), lamports, token_22, ); let output_merkle_tree_accounts = test_indexer.get_state_merkle_tree_accounts(&vec![*merkle_tree_pubkey; amounts.len()]); let snapshots = get_merkle_tree_snapshots::<R>(rpc, &output_merkle_tree_accounts).await; let previous_mint_supply = spl_token::state::Mint::unpack(&rpc.get_account(*mint).await.unwrap().unwrap().data) .unwrap() .supply; let pool: Pubkey = get_token_pool_pda(mint); let previous_pool_amount = spl_token::state::Account::unpack(&rpc.get_account(pool).await.unwrap().unwrap().data) .unwrap() .amount; let (event, _signature, _) = rpc .create_and_send_transaction_with_event::<PublicTransactionEvent>( &[instruction], &payer_pubkey, &[mint_authority], None, ) .await .unwrap() .unwrap(); let (_, created_token_accounts) = test_indexer.add_event_and_compressed_accounts(&event); assert_mint_to( rpc, test_indexer, &recipients, *mint, amounts.as_slice(), &snapshots, &created_token_accounts, previous_mint_supply, previous_pool_amount, ) .await; } pub async fn create_token_pool<R: RpcConnection>( rpc: &mut R, payer: &Keypair, mint_authority: &Pubkey, decimals: u8, mint_keypair: Option<&Keypair>, ) -> Pubkey { let keypair = Keypair::new(); let mint_keypair = match mint_keypair { Some(mint_keypair) => mint_keypair, None => &keypair, }; let mint_pubkey = (*mint_keypair).pubkey(); let mint_rent = rpc .get_minimum_balance_for_rent_exemption(Mint::LEN) .await .unwrap(); let (instructions, _) = create_initialize_mint_instructions( &payer.pubkey(), mint_authority, mint_rent, decimals, mint_keypair, ); rpc.create_and_send_transaction(&instructions, &payer.pubkey(), &[payer, mint_keypair]) .await .unwrap(); mint_pubkey } pub async fn create_mint_helper<R: RpcConnection>(rpc: &mut R, payer: &Keypair) -> Pubkey { let payer_pubkey = payer.pubkey(); let rent = rpc .get_minimum_balance_for_rent_exemption(Mint::LEN) .await .unwrap(); let mint = Keypair::new(); let (instructions, pool) = create_initialize_mint_instructions(&payer_pubkey, &payer_pubkey, rent, 2, &mint); rpc.create_and_send_transaction(&instructions, &payer_pubkey, &[payer, &mint]) .await .unwrap(); assert_create_mint(rpc, &payer_pubkey, &mint.pubkey(), &pool).await; mint.pubkey() } pub async fn create_mint_22_helper<R: RpcConnection>(rpc: &mut R, payer: &Keypair) -> Pubkey { let payer_pubkey = payer.pubkey(); let rent = rpc .get_minimum_balance_for_rent_exemption(Mint::LEN) .await .unwrap(); let mint = Keypair::new(); let (instructions, pool) = create_initialize_mint_22_instructions(&payer_pubkey, &payer_pubkey, rent, 2, &mint, true); rpc.create_and_send_transaction(&instructions, &payer_pubkey, &[payer, &mint]) .await .unwrap(); assert_create_mint(rpc, &payer_pubkey, &mint.pubkey(), &pool).await; mint.pubkey() } pub async fn mint_wrapped_sol<R: RpcConnection>( rpc: &mut R, payer: &Keypair, token_account: &Pubkey, amount: u64, is_token_22: bool, ) -> Result<Signature, RpcError> { let transfer_ix = anchor_lang::solana_program::system_instruction::transfer( &payer.pubkey(), token_account, amount, ); let sync_native_ix = if is_token_22 { spl_token_2022::instruction::sync_native(&spl_token_2022::ID, token_account) .map_err(|e| RpcError::CustomError(format!("{:?}", e)))? } else { spl_token::instruction::sync_native(&spl_token::ID, token_account) .map_err(|e| RpcError::CustomError(format!("{:?}", e)))? }; rpc.create_and_send_transaction(&[transfer_ix, sync_native_ix], &payer.pubkey(), &[payer]) .await } pub fn create_initialize_mint_instructions( payer: &Pubkey, authority: &Pubkey, rent: u64, decimals: u8, mint_keypair: &Keypair, ) -> ([Instruction; 4], Pubkey) { create_initialize_mint_22_instructions(payer, authority, rent, decimals, mint_keypair, false) } pub fn create_initialize_mint_22_instructions( payer: &Pubkey, authority: &Pubkey, rent: u64, decimals: u8, mint_keypair: &Keypair, token_22: bool, ) -> ([Instruction; 4], Pubkey) { let program_id = if token_22 { anchor_spl::token_2022::ID } else { spl_token::ID }; let account_create_ix = create_account_instruction(payer, Mint::LEN, rent, &program_id, Some(mint_keypair)); let mint_pubkey = mint_keypair.pubkey(); let create_mint_instruction = if token_22 { spl_token_2022::instruction::initialize_mint( &program_id, &mint_keypair.pubkey(), authority, Some(authority), decimals, ) .unwrap() } else { initialize_mint( &program_id, &mint_keypair.pubkey(), authority, Some(authority), decimals, ) .unwrap() }; let transfer_ix = anchor_lang::solana_program::system_instruction::transfer(payer, &mint_pubkey, rent); let instruction = create_create_token_pool_instruction(payer, &mint_pubkey, token_22); let pool_pubkey = get_token_pool_pda(&mint_pubkey); ( [ account_create_ix, create_mint_instruction, transfer_ix, instruction, ], pool_pubkey, ) } /// Creates a spl token account and initializes it with the given mint and owner. /// This function is useful to create token accounts for spl compression and decompression tests. pub async fn create_token_account<R: RpcConnection>( rpc: &mut R, mint: &Pubkey, account_keypair: &Keypair, owner: &Keypair, ) -> Result<(), BanksClientError> { create_token_2022_account(rpc, mint, account_keypair, owner, false).await } pub async fn create_token_2022_account<R: RpcConnection>( rpc: &mut R, mint: &Pubkey, account_keypair: &Keypair, owner: &Keypair, token_22: bool, ) -> Result<(), BanksClientError> { let account_len = if token_22 { spl_token_2022::state::Account::LEN } else { spl_token::state::Account::LEN }; let rent = rpc .get_minimum_balance_for_rent_exemption(account_len) .await .unwrap(); let program_id = if token_22 { spl_token_2022::ID } else { spl_token::ID }; let account_create_ix = create_account_instruction( &owner.pubkey(), TokenAccount::LEN, rent, &program_id, Some(account_keypair), ); let instruction = if token_22 { spl_token_2022::instruction::initialize_account( &program_id, &account_keypair.pubkey(), mint, &owner.pubkey(), ) .unwrap() } else { spl_token::instruction::initialize_account( &program_id, &account_keypair.pubkey(), mint, &owner.pubkey(), ) .unwrap() }; rpc.create_and_send_transaction( &[account_create_ix, instruction], &owner.pubkey(), &[account_keypair, owner], ) .await .unwrap(); Ok(()) } #[allow(clippy::too_many_arguments)] pub async fn compressed_transfer_test<R: RpcConnection, I: Indexer<R>>( payer: &Keypair, rpc: &mut R, test_indexer: &mut I, mint: &Pubkey, from: &Keypair, recipients: &[Pubkey], amounts: &[u64], lamports: Option<Vec<Option<u64>>>, input_compressed_accounts: &[TokenDataWithContext], output_merkle_tree_pubkeys: &[Pubkey], delegate_change_account_index: Option<u8>, delegate_is_signer: bool, transaction_params: Option<TransactionParams>, ) { compressed_transfer_22_test( payer, rpc, test_indexer, mint, from, recipients, amounts, lamports, input_compressed_accounts, output_merkle_tree_pubkeys, delegate_change_account_index, delegate_is_signer, transaction_params, false, ) .await; } #[allow(clippy::too_many_arguments)] pub async fn compressed_transfer_22_test<R: RpcConnection, I: Indexer<R>>( payer: &Keypair, rpc: &mut R, test_indexer: &mut I, mint: &Pubkey, from: &Keypair, recipients: &[Pubkey], amounts: &[u64], mut lamports: Option<Vec<Option<u64>>>, input_compressed_accounts: &[TokenDataWithContext], output_merkle_tree_pubkeys: &[Pubkey], delegate_change_account_index: Option<u8>, delegate_is_signer: bool, transaction_params: Option<TransactionParams>, token_22: bool, ) { if recipients.len() != amounts.len() && amounts.len() != output_merkle_tree_pubkeys.len() { println!("{:?}", recipients); println!("{:?}", amounts); println!("{:?}", output_merkle_tree_pubkeys); panic!("recipients, amounts, and output_merkle_tree_pubkeys must have the same length"); } let mut input_merkle_tree_context = Vec::new(); let mut input_compressed_account_token_data = Vec::new(); let mut input_compressed_account_hashes = Vec::new(); let mut sum_input_amounts = 0; for account in input_compressed_accounts { let leaf_index = account.compressed_account.merkle_context.leaf_index; input_compressed_account_token_data.push(account.token_data.clone()); input_compressed_account_hashes.push( account .compressed_account .compressed_account .hash::<Poseidon>( &account.compressed_account.merkle_context.merkle_tree_pubkey, &leaf_index, ) .unwrap(), ); sum_input_amounts += account.token_data.amount; input_merkle_tree_context.push(MerkleContext { merkle_tree_pubkey: account.compressed_account.merkle_context.merkle_tree_pubkey, nullifier_queue_pubkey: account .compressed_account .merkle_context .nullifier_queue_pubkey, leaf_index, queue_index: None, }); } let output_lamports = lamports .clone() .unwrap_or_else(|| vec![None; recipients.len()]); let mut output_compressed_accounts = Vec::new(); for (((recipient, amount), merkle_tree_pubkey), lamports) in recipients .iter() .zip(amounts) .zip(output_merkle_tree_pubkeys) .zip(output_lamports) { let account = TokenTransferOutputData { amount: *amount, owner: *recipient, lamports, merkle_tree: *merkle_tree_pubkey, }; sum_input_amounts -= amount; output_compressed_accounts.push(account); } // add change compressed account if tokens are left if sum_input_amounts > 0 { let account = TokenTransferOutputData { amount: sum_input_amounts, owner: from.pubkey(), lamports: None, merkle_tree: *output_merkle_tree_pubkeys.last().unwrap(), }; output_compressed_accounts.push(account); } let input_merkle_tree_pubkeys: Vec<Pubkey> = input_merkle_tree_context .iter() .map(|x| x.merkle_tree_pubkey) .collect(); println!("{:?}", input_compressed_accounts); println!( "input_compressed_account_hashes: {:?}", input_compressed_account_hashes ); let proof_rpc_result = test_indexer .create_proof_for_compressed_accounts( Some(&input_compressed_account_hashes), Some(&input_merkle_tree_pubkeys), None, None, rpc, ) .await; output_compressed_accounts.sort_by(|a, b| a.merkle_tree.cmp(&b.merkle_tree)); let delegate_pubkey = if delegate_is_signer { Some(payer.pubkey()) } else { None }; let authority_signer = if delegate_is_signer { payer } else { from }; let instruction = create_transfer_instruction( &payer.pubkey(), &authority_signer.pubkey(), // authority &input_merkle_tree_context, &output_compressed_accounts, &proof_rpc_result.root_indices, &Some(proof_rpc_result.proof), &input_compressed_account_token_data, // input_token_data &input_compressed_accounts .iter() .map(|x| &x.compressed_account.compressed_account) .cloned() .collect::<Vec<_>>(), *mint, delegate_pubkey, // owner_if_delegate_change_account_index false, // is_compress None, // compression_amount None, // token_pool_pda None, // compress_or_decompress_token_account true, delegate_change_account_index, None, token_22, ) .unwrap(); let sum_input_lamports = input_compressed_accounts .iter() .map(|x| &x.compressed_account.compressed_account.lamports) .sum::<u64>(); let sum_output_lamports = output_compressed_accounts .iter() .map(|x| x.lamports.unwrap_or(0)) .sum::<u64>(); let sum_output_amounts = output_compressed_accounts .iter() .map(|x| x.amount) .sum::<u64>(); let output_merkle_tree_pubkeys = if sum_input_lamports > sum_output_lamports || sum_input_amounts > sum_output_amounts && delegate_is_signer { let mut output_merkle_tree_pubkeys = output_merkle_tree_pubkeys.to_vec(); output_merkle_tree_pubkeys.push(*output_merkle_tree_pubkeys.last().unwrap()); if let Some(lamports) = &mut lamports { if sum_input_lamports != sum_output_lamports { lamports.push(Some(sum_input_lamports - sum_output_lamports)); } else { lamports.push(None); } } output_merkle_tree_pubkeys } else { output_merkle_tree_pubkeys.to_vec() }; let output_merkle_tree_accounts = test_indexer.get_state_merkle_tree_accounts(output_merkle_tree_pubkeys.as_slice()); let input_merkle_tree_accounts = test_indexer.get_state_merkle_tree_accounts(&input_merkle_tree_pubkeys); let snapshots = get_merkle_tree_snapshots::<R>(rpc, output_merkle_tree_accounts.as_slice()).await; let input_snapshots = get_merkle_tree_snapshots::<R>(rpc, input_merkle_tree_accounts.as_slice()).await; let (event, _signature, _) = rpc .create_and_send_transaction_with_event::<PublicTransactionEvent>( &[instruction], &payer.pubkey(), &[payer, authority_signer], transaction_params, ) .await .unwrap() .unwrap(); let (created_change_output_account, created_token_output_accounts) = test_indexer.add_event_and_compressed_accounts(&event); let delegates = if let Some(index) = delegate_change_account_index { let mut delegates = vec![None; created_token_output_accounts.len()]; delegates[index as usize] = Some(payer.pubkey()); Some(delegates) } else { None }; let mut created_output_accounts = Vec::new(); created_token_output_accounts.iter().for_each(|x| { created_output_accounts.push(x.compressed_account.clone()); }); created_change_output_account.iter().for_each(|x| { created_output_accounts.push(x.clone()); }); assert_transfer( rpc, test_indexer, &output_compressed_accounts, created_output_accounts.as_slice(), lamports, &input_compressed_account_hashes, &snapshots, &input_snapshots, &event, delegates, ) .await; } #[allow(clippy::too_many_arguments)] pub async fn decompress_test<R: RpcConnection, I: Indexer<R>>( payer: &Keypair, rpc: &mut R, test_indexer: &mut I, input_compressed_accounts: Vec<TokenDataWithContext>, amount: u64, output_merkle_tree_pubkey: &Pubkey, recipient_token_account: &Pubkey, transaction_params: Option<TransactionParams>, is_token_22: bool, ) { let max_amount: u64 = input_compressed_accounts .iter() .map(|x| x.token_data.amount) .sum(); let change_out_compressed_account = TokenTransferOutputData { amount: max_amount - amount, owner: payer.pubkey(), lamports: None, merkle_tree: *output_merkle_tree_pubkey, }; let input_compressed_account_hashes = input_compressed_accounts .iter() .map(|x| x.compressed_account.hash().unwrap()) .collect::<Vec<_>>(); let input_merkle_tree_pubkeys = input_compressed_accounts .iter() .map(|x| x.compressed_account.merkle_context.merkle_tree_pubkey) .collect::<Vec<_>>(); let proof_rpc_result = test_indexer .create_proof_for_compressed_accounts( Some(&input_compressed_account_hashes), Some(&input_merkle_tree_pubkeys), None, None, rpc, ) .await; let mint = input_compressed_accounts[0].token_data.mint; let instruction = create_transfer_instruction( &rpc.get_payer().pubkey(), &payer.pubkey(), // authority &input_compressed_accounts .iter() .map(|x| x.compressed_account.merkle_context) .collect::<Vec<_>>(), // input_compressed_account_merkle_tree_pubkeys &[change_out_compressed_account], // output_compressed_accounts &proof_rpc_result.root_indices, // root_indices &Some(proof_rpc_result.proof), input_compressed_accounts .iter() .map(|x| x.token_data.clone()) .collect::<Vec<_>>() .as_slice(), // input_token_data &input_compressed_accounts .iter() .map(|x| &x.compressed_account.compressed_account) .cloned() .collect::<Vec<_>>(), mint, // mint None, // owner_if_delegate_change_account_index false, // is_compress Some(amount), // compression_amount Some(get_token_pool_pda(&mint)), // token_pool_pda Some(*recipient_token_account), // compress_or_decompress_token_account true, None, None, is_token_22, ) .unwrap(); let output_merkle_tree_pubkeys = vec![*output_merkle_tree_pubkey]; let output_merkle_tree_accounts = test_indexer.get_state_merkle_tree_accounts(&output_merkle_tree_pubkeys); let input_merkle_tree_accounts = test_indexer.get_state_merkle_tree_accounts(&input_merkle_tree_pubkeys); let output_merkle_tree_test_snapshots = get_merkle_tree_snapshots::<R>(rpc, output_merkle_tree_accounts.as_slice()).await; let input_merkle_tree_test_snapshots = get_merkle_tree_snapshots::<R>(rpc, input_merkle_tree_accounts.as_slice()).await; let recipient_token_account_data_pre = spl_token::state::Account::unpack( &rpc.get_account(*recipient_token_account) .await .unwrap() .unwrap() .data, ) .unwrap(); let context_payer = rpc.get_payer().insecure_clone(); let (event, _signature, _) = rpc .create_and_send_transaction_with_event::<PublicTransactionEvent>( &[instruction], &context_payer.pubkey(), &[&context_payer, payer], transaction_params, ) .await .unwrap() .unwrap(); let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(&event); assert_transfer( rpc, test_indexer, &[change_out_compressed_account], created_output_accounts .iter() .map(|x| x.compressed_account.clone()) .collect::<Vec<_>>() .as_slice(), None, input_compressed_account_hashes.as_slice(), &output_merkle_tree_test_snapshots, &input_merkle_tree_test_snapshots, &event, None, ) .await; let recipient_token_account_data = spl_token::state::Account::unpack( &rpc.get_account(*recipient_token_account) .await .unwrap() .unwrap() .data, ) .unwrap(); assert_eq!( recipient_token_account_data.amount, recipient_token_account_data_pre.amount + amount ); } #[allow(clippy::too_many_arguments)] pub async fn perform_compress_spl_token_account<R: RpcConnection, I: Indexer<R>>( rpc: &mut R, test_indexer: &mut I, payer: &Keypair, token_owner: &Keypair, mint: &Pubkey, token_account: &Pubkey, merkle_tree_pubkey: &Pubkey, remaining_amount: Option<u64>, is_token_22: bool, ) -> Result<(), RpcError> { let pre_token_account_amount = spl_token::state::Account::unpack( &rpc.get_account(*token_account).await.unwrap().unwrap().data, ) .unwrap() .amount; let instruction = create_compress_spl_token_account_instruction( &token_owner.pubkey(), remaining_amount, None, &payer.pubkey(), &token_owner.pubkey(), mint, merkle_tree_pubkey, token_account, is_token_22, ); let (event, _, _) = rpc .create_and_send_transaction_with_event::<PublicTransactionEvent>( &[instruction], &token_owner.pubkey(), &[payer, token_owner], None, ) .await? .unwrap(); test_indexer.add_event_and_compressed_accounts(&event); let created_compressed_token_account = test_indexer.get_compressed_token_accounts_by_owner(&token_owner.pubkey())[0].clone(); let expected_token_data = TokenData { amount: pre_token_account_amount - remaining_amount.unwrap_or_default(), mint: *mint, owner: token_owner.pubkey(), state: AccountState::Initialized, delegate: None, tlv: None, }; assert_eq!( created_compressed_token_account.token_data, expected_token_data ); assert_eq!( created_compressed_token_account .compressed_account .merkle_context .merkle_tree_pubkey, *merkle_tree_pubkey ); if let Some(remaining_amount) = remaining_amount { let post_token_account_amount = spl_token::state::Account::unpack( &rpc.get_account(*token_account).await.unwrap().unwrap().data, ) .unwrap() .amount; assert_eq!(post_token_account_amount, remaining_amount); } Ok(()) } #[allow(clippy::too_many_arguments)] pub async fn compress_test<R: RpcConnection, I: Indexer<R>>( payer: &Keypair, rpc: &mut R, test_indexer: &mut I, amount: u64, mint: &Pubkey, output_merkle_tree_pubkey: &Pubkey, sender_token_account: &Pubkey, transaction_params: Option<TransactionParams>, is_token_22: bool, ) { let output_compressed_account = TokenTransferOutputData { amount, owner: payer.pubkey(), lamports: None, merkle_tree: *output_merkle_tree_pubkey, }; let instruction = create_transfer_instruction( &rpc.get_payer().pubkey(), &payer.pubkey(), // authority &Vec::new(), // input_compressed_account_merkle_tree_pubkeys &[output_compressed_account], // output_compressed_accounts &Vec::new(), // root_indices &None, &Vec::new(), // input_token_data &Vec::new(), // input_compressed_accounts *mint, // mint None, // owner_if_delegate_is_signer true, // is_compress Some(amount), // compression_amount Some(get_token_pool_pda(mint)), // token_pool_pda Some(*sender_token_account), // compress_or_decompress_token_account true, None, None, is_token_22, ) .unwrap(); let output_merkle_tree_pubkeys = vec![*output_merkle_tree_pubkey]; let output_merkle_tree_accounts = test_indexer.get_state_merkle_tree_accounts(&output_merkle_tree_pubkeys); let output_merkle_tree_test_snapshots = get_merkle_tree_snapshots::<R>(rpc, output_merkle_tree_accounts.as_slice()).await; let input_merkle_tree_test_snapshots = Vec::new(); let recipient_token_account_data_pre = spl_token::state::Account::unpack( &rpc.get_account(*sender_token_account) .await .unwrap() .unwrap() .data, ) .unwrap(); let context_payer = rpc.get_payer().insecure_clone(); let (event, _signature, _) = rpc .create_and_send_transaction_with_event::<PublicTransactionEvent>( &[instruction], &payer.pubkey(), &[&context_payer, payer], transaction_params, ) .await .unwrap() .unwrap(); let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(&event); assert_transfer( rpc, test_indexer, &[output_compressed_account], created_output_accounts .iter() .map(|x| x.compressed_account.clone()) .collect::<Vec<_>>() .as_slice(), None, Vec::new().as_slice(), &output_merkle_tree_test_snapshots, &input_merkle_tree_test_snapshots, &event, None, ) .await; let recipient_token_account_data = spl_token::state::Account::unpack( &rpc.get_account(*sender_token_account) .await .unwrap() .unwrap() .data, ) .unwrap(); assert_eq!( recipient_token_account_data.amount, recipient_token_account_data_pre.amount - amount ); } #[allow(clippy::too_many_arguments)] pub async fn approve_test<R: RpcConnection, I: Indexer<R>>( authority: &Keypair, rpc: &mut R, test_indexer: &mut I, input_compressed_accounts: Vec<TokenDataWithContext>, delegated_amount: u64, delegate_lamports: Option<u64>, delegate: &Pubkey, delegated_compressed_account_merkle_tree: &Pubkey, change_compressed_account_merkle_tree: &Pubkey, transaction_params: Option<TransactionParams>, ) { let input_compressed_account_hashes = input_compressed_accounts .iter() .map(|x| x.compressed_account.hash().unwrap()) .collect::<Vec<_>>(); let input_merkle_tree_pubkeys = input_compressed_accounts .iter() .map(|x| x.compressed_account.merkle_context.merkle_tree_pubkey) .collect::<Vec<_>>(); println!( "input_compressed_account_hashes: {:?}", input_compressed_account_hashes ); println!("input compressed accounts: {:?}", input_compressed_accounts); let proof_rpc_result = test_indexer .create_proof_for_compressed_accounts( Some(&input_compressed_account_hashes), Some(&input_merkle_tree_pubkeys), None, None, rpc, ) .await; let mint = input_compressed_accounts[0].token_data.mint; let inputs = CreateApproveInstructionInputs { fee_payer: rpc.get_payer().pubkey(), authority: authority.pubkey(), input_merkle_contexts: input_compressed_accounts .iter() .map(|x| x.compressed_account.merkle_context) .collect(), input_token_data: input_compressed_accounts .iter() .map(|x| x.token_data.clone()) .collect(), input_compressed_accounts: input_compressed_accounts .iter() .map(|x| &x.compressed_account.compressed_account) .cloned() .collect::<Vec<_>>(), mint, delegated_amount, delegate_lamports, delegated_compressed_account_merkle_tree: *delegated_compressed_account_merkle_tree, change_compressed_account_merkle_tree: *change_compressed_account_merkle_tree, delegate: *delegate, root_indices: proof_rpc_result.root_indices, proof: proof_rpc_result.proof, }; let instruction = create_approve_instruction(inputs).unwrap(); let mut output_merkle_tree_pubkeys = vec![*delegated_compressed_account_merkle_tree]; let input_amount = input_compressed_accounts .iter() .map(|x| x.token_data.amount) .sum::<u64>(); let change_amount = input_amount - delegated_amount; let input_lamports = input_compressed_accounts .iter() .map(|x| x.compressed_account.compressed_account.lamports) .sum::<u64>(); let (change_lamports, change_lamports_greater_zero) = if let Some(delegate_lamports) = delegate_lamports { let change_lamports = input_lamports - delegate_lamports; let option_change_lamports = if change_lamports > 0 { Some(change_lamports) } else { None }; ( Some(vec![Some(delegate_lamports), option_change_lamports]), change_lamports > 0, ) } else if input_lamports > 0 { (Some(vec![None, Some(input_lamports)]), true) } else { (None, false) }; if change_lamports_greater_zero || change_amount > 0 { output_merkle_tree_pubkeys.push(*change_compressed_account_merkle_tree); } let output_merkle_tree_accounts = test_indexer.get_state_merkle_tree_accounts(&output_merkle_tree_pubkeys); let output_merkle_tree_test_snapshots = get_merkle_tree_snapshots::<R>(rpc, output_merkle_tree_accounts.as_slice()).await; let input_merkle_tree_accounts = test_indexer.get_state_merkle_tree_accounts(&input_merkle_tree_pubkeys); let input_merkle_tree_test_snapshots = get_merkle_tree_snapshots::<R>(rpc, input_merkle_tree_accounts.as_slice()).await; let context_payer = rpc.get_payer().insecure_clone(); let (event, _signature, _) = rpc .create_and_send_transaction_with_event::<PublicTransactionEvent>( &[instruction], &context_payer.pubkey(), &[&context_payer, authority], transaction_params, ) .await .unwrap() .unwrap(); let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(&event); let expected_delegated_token_data = TokenData { mint, owner: authority.pubkey(), amount: delegated_amount, delegate: Some(*delegate), state: AccountState::Initialized, tlv: None, }; assert_eq!( expected_delegated_token_data, created_output_accounts[0].token_data ); let mut expected_token_data = vec![expected_delegated_token_data]; let mut delegates = vec![Some(*delegate)]; if delegated_amount != input_amount { let expected_change_token_data = TokenData { mint, owner: authority.pubkey(), amount: change_amount, delegate: None, state: AccountState::Initialized, tlv: None, }; assert_eq!( expected_change_token_data, created_output_accounts[1].token_data ); expected_token_data.push(expected_change_token_data); delegates.push(None); } let expected_compressed_output_accounts = create_expected_token_output_data(expected_token_data, &output_merkle_tree_pubkeys); assert_transfer( rpc, test_indexer, expected_compressed_output_accounts.as_slice(), created_output_accounts .iter() .map(|x| x.compressed_account.clone()) .collect::<Vec<_>>() .as_slice(), change_lamports, input_compressed_account_hashes.as_slice(), &output_merkle_tree_test_snapshots, &input_merkle_tree_test_snapshots, &event, Some(delegates), ) .await; } #[allow(clippy::too_many_arguments)] pub async fn revoke_test<R: RpcConnection, I: Indexer<R>>( authority: &Keypair, rpc: &mut R, test_indexer: &mut I, input_compressed_accounts: Vec<TokenDataWithContext>, output_account_merkle_tree: &Pubkey, transaction_params: Option<TransactionParams>, ) { let input_compressed_account_hashes = input_compressed_accounts .iter() .map(|x| x.compressed_account.hash().unwrap()) .collect::<Vec<_>>(); let input_merkle_tree_pubkeys = input_compressed_accounts .iter() .map(|x| x.compressed_account.merkle_context.merkle_tree_pubkey) .collect::<Vec<_>>(); let proof_rpc_result = test_indexer .create_proof_for_compressed_accounts( Some(&input_compressed_account_hashes), Some(&input_merkle_tree_pubkeys), None, None, rpc, ) .await; let mint = input_compressed_accounts[0].token_data.mint; let inputs = CreateRevokeInstructionInputs { fee_payer: rpc.get_payer().pubkey(), authority: authority.pubkey(), input_merkle_contexts: input_compressed_accounts .iter() .map(|x| x.compressed_account.merkle_context) .collect(), input_token_data: input_compressed_accounts .iter() .map(|x| x.token_data.clone()) .collect(), input_compressed_accounts: input_compressed_accounts .iter() .map(|x| &x.compressed_account.compressed_account) .cloned() .collect::<Vec<_>>(), mint, output_account_merkle_tree: *output_account_merkle_tree, root_indices: proof_rpc_result.root_indices, proof: proof_rpc_result.proof, }; let instruction = create_revoke_instruction(inputs).unwrap(); let output_merkle_tree_pubkeys = vec![*output_account_merkle_tree]; let output_merkle_tree_accounts = test_indexer.get_state_merkle_tree_accounts(&output_merkle_tree_pubkeys); let input_merkle_tree_accounts = test_indexer.get_state_merkle_tree_accounts(&input_merkle_tree_pubkeys); let output_merkle_tree_test_snapshots = get_merkle_tree_snapshots::<R>(rpc, output_merkle_tree_accounts.as_slice()).await; let input_merkle_tree_test_snapshots = get_merkle_tree_snapshots::<R>(rpc, input_merkle_tree_accounts.as_slice()).await; let context_payer = rpc.get_payer().insecure_clone(); let (event, _signature, _) = rpc .create_and_send_transaction_with_event::<PublicTransactionEvent>( &[instruction], &context_payer.pubkey(), &[&context_payer, authority], transaction_params, ) .await .unwrap() .unwrap(); let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(&event); let input_amount = input_compressed_accounts .iter() .map(|x| x.token_data.amount) .sum::<u64>(); let expected_token_data = TokenData { mint, owner: authority.pubkey(), amount: input_amount, delegate: None, state: AccountState::Initialized, tlv: None, }; assert_eq!(expected_token_data, created_output_accounts[0].token_data); let expected_compressed_output_accounts = create_expected_token_output_data(vec![expected_token_data], &output_merkle_tree_pubkeys); let sum_inputs = input_compressed_accounts .iter() .map(|x| x.compressed_account.compressed_account.lamports) .sum::<u64>(); let change_lamports = if sum_inputs > 0 { Some(vec![Some(sum_inputs)]) } else { None }; assert_transfer( rpc, test_indexer, expected_compressed_output_accounts.as_slice(), created_output_accounts .iter() .map(|x| x.compressed_account.clone()) .collect::<Vec<_>>() .as_slice(), change_lamports, input_compressed_account_hashes.as_slice(), &output_merkle_tree_test_snapshots, &input_merkle_tree_test_snapshots, &event, None, ) .await; } pub async fn freeze_test<R: RpcConnection, I: Indexer<R>>( authority: &Keypair, rpc: &mut R, test_indexer: &mut I, input_compressed_accounts: Vec<TokenDataWithContext>, outputs_merkle_tree: &Pubkey, transaction_params: Option<TransactionParams>, ) { freeze_or_thaw_test::<R, true, I>( authority, rpc, test_indexer, input_compressed_accounts, outputs_merkle_tree, transaction_params, ) .await; } pub async fn thaw_test<R: RpcConnection, I: Indexer<R>>( authority: &Keypair, rpc: &mut R, test_indexer: &mut I, input_compressed_accounts: Vec<TokenDataWithContext>, outputs_merkle_tree: &Pubkey, transaction_params: Option<TransactionParams>, ) { freeze_or_thaw_test::<R, false, I>( authority, rpc, test_indexer, input_compressed_accounts, outputs_merkle_tree, transaction_params, ) .await; } pub async fn freeze_or_thaw_test<R: RpcConnection, const FREEZE: bool, I: Indexer<R>>( authority: &Keypair, rpc: &mut R, test_indexer: &mut I, input_compressed_accounts: Vec<TokenDataWithContext>, outputs_merkle_tree: &Pubkey, transaction_params: Option<TransactionParams>, ) { let input_compressed_account_hashes = input_compressed_accounts .iter() .map(|x| x.compressed_account.hash().unwrap()) .collect::<Vec<_>>(); let input_merkle_tree_pubkeys = input_compressed_accounts .iter() .map(|x| x.compressed_account.merkle_context.merkle_tree_pubkey) .collect::<Vec<_>>(); let proof_rpc_result = test_indexer .create_proof_for_compressed_accounts( Some(&input_compressed_account_hashes), Some(&input_merkle_tree_pubkeys), None, None, rpc, ) .await; let mint = input_compressed_accounts[0].token_data.mint; let inputs = CreateInstructionInputs { fee_payer: rpc.get_payer().pubkey(), authority: authority.pubkey(), input_merkle_contexts: input_compressed_accounts .iter() .map(|x| x.compressed_account.merkle_context) .collect(), input_token_data: input_compressed_accounts .iter() .map(|x| x.token_data.clone()) .collect(), input_compressed_accounts: input_compressed_accounts .iter() .map(|x| &x.compressed_account.compressed_account) .cloned() .collect::<Vec<_>>(), outputs_merkle_tree: *outputs_merkle_tree, root_indices: proof_rpc_result.root_indices, proof: proof_rpc_result.proof, }; let instruction = create_instruction::<FREEZE>(inputs).unwrap(); let output_merkle_tree_pubkeys = vec![*outputs_merkle_tree; input_compressed_account_hashes.len()]; let output_merkle_tree_accounts = test_indexer.get_state_merkle_tree_accounts(&output_merkle_tree_pubkeys); let input_merkle_tree_accounts = test_indexer.get_state_merkle_tree_accounts(&input_merkle_tree_pubkeys); let output_merkle_tree_test_snapshots = get_merkle_tree_snapshots::<R>(rpc, output_merkle_tree_accounts.as_slice()).await; let input_merkle_tree_test_snapshots = get_merkle_tree_snapshots::<R>(rpc, input_merkle_tree_accounts.as_slice()).await; let context_payer = rpc.get_payer().insecure_clone(); let (event, _signature, _) = rpc .create_and_send_transaction_with_event::<PublicTransactionEvent>( &[instruction], &context_payer.pubkey(), &[&context_payer, authority], transaction_params, ) .await .unwrap() .unwrap(); let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(&event); let mut delegates = Vec::new(); let mut expected_output_accounts = Vec::new(); for account in input_compressed_accounts.iter() { let state = if FREEZE { AccountState::Frozen } else { AccountState::Initialized }; let expected_token_data = TokenData { mint, owner: input_compressed_accounts[0].token_data.owner, amount: account.token_data.amount, delegate: account.token_data.delegate, state, tlv: None, }; if let Some(delegate) = account.token_data.delegate { delegates.push(Some(delegate)); } else { delegates.push(None); } expected_output_accounts.push(expected_token_data); } let expected_compressed_output_accounts = create_expected_token_output_data(expected_output_accounts, &output_merkle_tree_pubkeys); let sum_inputs = input_compressed_accounts .iter() .map(|x| x.compressed_account.compressed_account.lamports) .sum::<u64>(); let change_lamports = if sum_inputs > 0 { let mut change_lamports = Vec::new(); for account in input_compressed_accounts.iter() { if account.compressed_account.compressed_account.lamports > 0 { change_lamports.push(Some(account.compressed_account.compressed_account.lamports)); } else { change_lamports.push(None); } } Some(change_lamports) } else { None }; assert_transfer( rpc, test_indexer, expected_compressed_output_accounts.as_slice(), created_output_accounts .iter() .map(|x| x.compressed_account.clone()) .collect::<Vec<_>>() .as_slice(), change_lamports, input_compressed_account_hashes.as_slice(), &output_merkle_tree_test_snapshots, &input_merkle_tree_test_snapshots, &event, Some(delegates), ) .await; } #[allow(clippy::too_many_arguments)] pub async fn burn_test<R: RpcConnection, I: Indexer<R>>( authority: &Keypair, rpc: &mut R, test_indexer: &mut I, input_compressed_accounts: Vec<TokenDataWithContext>, change_account_merkle_tree: &Pubkey, burn_amount: u64, signer_is_delegate: bool, transaction_params: Option<TransactionParams>, is_token_22: bool, ) { let ( input_compressed_account_hashes, input_merkle_tree_pubkeys, mint, output_amount, instruction, ) = create_burn_test_instruction( authority, rpc, test_indexer, &input_compressed_accounts, change_account_merkle_tree, burn_amount, signer_is_delegate, BurnInstructionMode::Normal, is_token_22, ) .await; let output_merkle_tree_pubkeys = vec![*change_account_merkle_tree; 1]; let output_merkle_tree_test_snapshots = if output_amount > 0 { let output_merkle_tree_accounts = test_indexer.get_state_merkle_tree_accounts(&output_merkle_tree_pubkeys); get_merkle_tree_snapshots::<R>(rpc, output_merkle_tree_accounts.as_slice()).await } else { Vec::new() }; let token_pool_pda_address = get_token_pool_pda(&mint); let pre_token_pool_account = rpc .get_account(token_pool_pda_address) .await .unwrap() .unwrap(); let pre_token_pool_balance = spl_token::state::Account::unpack(&pre_token_pool_account.data) .unwrap() .amount; let input_merkle_tree_accounts = test_indexer.get_state_merkle_tree_accounts(&input_merkle_tree_pubkeys); let input_merkle_tree_test_snapshots = get_merkle_tree_snapshots::<R>(rpc, input_merkle_tree_accounts.as_slice()).await; let context_payer = rpc.get_payer().insecure_clone(); let (event, _signature, _) = rpc .create_and_send_transaction_with_event::<PublicTransactionEvent>( &[instruction], &context_payer.pubkey(), &[&context_payer, authority], transaction_params, ) .await .unwrap() .unwrap(); let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(&event); let mut delegates = Vec::new(); let mut expected_output_accounts = Vec::new(); let delegate = if signer_is_delegate { Some(authority.pubkey()) } else { None }; if output_amount > 0 { let expected_token_data = TokenData { mint, owner: input_compressed_accounts[0].token_data.owner, amount: output_amount, delegate, state: AccountState::Initialized, tlv: None, }; if let Some(delegate) = expected_token_data.delegate { delegates.push(Some(delegate)); } else { delegates.push(None); } expected_output_accounts.push(expected_token_data); } let expected_compressed_output_accounts = create_expected_token_output_data(expected_output_accounts, &output_merkle_tree_pubkeys); let sum_inputs = input_compressed_accounts .iter() .map(|x| x.compressed_account.compressed_account.lamports) .sum::<u64>(); let change_lamports = if sum_inputs > 0 { Some(vec![Some(sum_inputs)]) } else { None }; assert_transfer( rpc, test_indexer, expected_compressed_output_accounts.as_slice(), created_output_accounts .iter() .map(|x| x.compressed_account.clone()) .collect::<Vec<_>>() .as_slice(), change_lamports, input_compressed_account_hashes.as_slice(), &output_merkle_tree_test_snapshots, &input_merkle_tree_test_snapshots, &event, Some(delegates), ) .await; let post_token_pool_account = rpc .get_account(token_pool_pda_address) .await .unwrap() .unwrap(); let post_token_pool_balance = spl_token::state::Account::unpack(&post_token_pool_account.data) .unwrap() .amount; assert_eq!( post_token_pool_balance, pre_token_pool_balance - burn_amount ); } #[derive(Debug, Clone, PartialEq)] pub enum BurnInstructionMode { Normal, InvalidProof, InvalidMint, } #[allow(clippy::too_many_arguments)] pub async fn create_burn_test_instruction<R: RpcConnection, I: Indexer<R>>( authority: &Keypair, rpc: &mut R, test_indexer: &mut I, input_compressed_accounts: &[TokenDataWithContext], change_account_merkle_tree: &Pubkey, burn_amount: u64, signer_is_delegate: bool, mode: BurnInstructionMode, is_token_22: bool, ) -> (Vec<[u8; 32]>, Vec<Pubkey>, Pubkey, u64, Instruction) { let input_compressed_account_hashes = input_compressed_accounts .iter() .map(|x| x.compressed_account.hash().unwrap()) .collect::<Vec<_>>(); let input_merkle_tree_pubkeys = input_compressed_accounts .iter() .map(|x| x.compressed_account.merkle_context.merkle_tree_pubkey) .collect::<Vec<_>>(); let proof_rpc_result = test_indexer .create_proof_for_compressed_accounts( Some(&input_compressed_account_hashes), Some(&input_merkle_tree_pubkeys), None, None, rpc, ) .await; let mint = if mode == BurnInstructionMode::InvalidMint { Pubkey::new_unique() } else { input_compressed_accounts[0].token_data.mint }; let proof = if mode == BurnInstructionMode::InvalidProof { CompressedProof { a: proof_rpc_result.proof.a, b: proof_rpc_result.proof.b, c: proof_rpc_result.proof.a, // flip c to make proof invalid but not run into decompress errors } } else { proof_rpc_result.proof }; let inputs = CreateBurnInstructionInputs { fee_payer: rpc.get_payer().pubkey(), authority: authority.pubkey(), input_merkle_contexts: input_compressed_accounts .iter() .map(|x| x.compressed_account.merkle_context) .collect(), input_token_data: input_compressed_accounts .iter() .map(|x| x.token_data.clone()) .collect(), input_compressed_accounts: input_compressed_accounts .iter() .map(|x| &x.compressed_account.compressed_account) .cloned() .collect::<Vec<_>>(), change_account_merkle_tree: *change_account_merkle_tree, root_indices: proof_rpc_result.root_indices, proof, mint, signer_is_delegate, burn_amount, is_token_22, }; let input_amount_sum = input_compressed_accounts .iter() .map(|x| x.token_data.amount) .sum::<u64>(); let output_amount = input_amount_sum - burn_amount; let instruction = create_burn_instruction(inputs).unwrap(); ( input_compressed_account_hashes, input_merkle_tree_pubkeys, mint, output_amount, instruction, ) } pub fn create_expected_token_output_data( expected_token_data: Vec<TokenData>, merkle_tree_pubkeys: &[Pubkey], ) -> Vec<TokenTransferOutputData> { let mut expected_compressed_output_accounts = Vec::new(); for (token_data, merkle_tree_pubkey) in expected_token_data.iter().zip(merkle_tree_pubkeys.iter()) { expected_compressed_output_accounts.push(TokenTransferOutputData { owner: token_data.owner, amount: token_data.amount, merkle_tree: *merkle_tree_pubkey, lamports: None, }); } expected_compressed_output_accounts }
0
solana_public_repos/Lightprotocol/light-protocol/test-utils
solana_public_repos/Lightprotocol/light-protocol/test-utils/src/assert_queue.rs
use account_compression::{QueueAccount, QueueMetadata, QueueType, RolloverMetadata}; use forester_utils::{get_hash_set, AccountZeroCopy}; use light_client::rpc::RpcConnection; use light_utils::fee::compute_rollover_fee; use solana_sdk::pubkey::Pubkey; #[allow(clippy::too_many_arguments)] pub async fn assert_address_queue_initialized<R: RpcConnection>( rpc: &mut R, queue_pubkey: &Pubkey, queue_config: &account_compression::AddressQueueConfig, associated_merkle_tree_pubkey: &Pubkey, associated_tree_config: &account_compression::AddressMerkleTreeConfig, expected_queue_type: QueueType, expected_index: u64, expected_program_owner: Option<Pubkey>, expected_forester: Option<Pubkey>, payer_pubkey: &Pubkey, ) { assert_address_queue( rpc, queue_pubkey, queue_config, associated_merkle_tree_pubkey, associated_tree_config, expected_queue_type, expected_index, expected_program_owner, expected_forester, None, None, payer_pubkey, ) .await; } #[allow(clippy::too_many_arguments)] pub async fn assert_nullifier_queue_initialized<R: RpcConnection>( rpc: &mut R, queue_pubkey: &Pubkey, queue_config: &account_compression::NullifierQueueConfig, associated_merkle_tree_pubkey: &Pubkey, associated_tree_config: &account_compression::StateMerkleTreeConfig, expected_queue_type: QueueType, expected_index: u64, expected_program_owner: Option<Pubkey>, expected_forester: Option<Pubkey>, payer_pubkey: &Pubkey, ) { let associated_tree_config = account_compression::AddressMerkleTreeConfig { height: associated_tree_config.height, changelog_size: associated_tree_config.changelog_size, // not asserted here address_changelog_size: 0, roots_size: associated_tree_config.roots_size, canopy_depth: associated_tree_config.canopy_depth, rollover_threshold: associated_tree_config.rollover_threshold, close_threshold: associated_tree_config.close_threshold, network_fee: associated_tree_config.network_fee, }; // The address queue is the only account that collects the rollover fees. let expected_rollover_fee = 0; assert_queue( rpc, queue_pubkey, queue_config, associated_merkle_tree_pubkey, &associated_tree_config, expected_rollover_fee, expected_queue_type, expected_index, expected_program_owner, expected_forester, None, None, payer_pubkey, ) .await; } #[allow(clippy::too_many_arguments)] pub async fn assert_address_queue<R: RpcConnection>( rpc: &mut R, queue_pubkey: &Pubkey, queue_config: &account_compression::AddressQueueConfig, associated_merkle_tree_pubkey: &Pubkey, associated_tree_config: &account_compression::AddressMerkleTreeConfig, expected_queue_type: QueueType, expected_index: u64, expected_program_owner: Option<Pubkey>, expected_forester: Option<Pubkey>, expected_rolledover_slot: Option<u64>, expected_next_queue: Option<Pubkey>, payer_pubkey: &Pubkey, ) { let balance_merkle_tree = rpc .get_account(*associated_merkle_tree_pubkey) .await .unwrap() .unwrap() .lamports; let balance_queue = rpc .get_account(*queue_pubkey) .await .unwrap() .unwrap() .lamports; // The address queue is the only account that collects the rollover fees. let expected_rollover_fee = match associated_tree_config.rollover_threshold { Some(threshold) => { compute_rollover_fee(threshold, associated_tree_config.height, balance_queue).unwrap() + compute_rollover_fee( threshold, associated_tree_config.height, balance_merkle_tree, ) .unwrap() } None => 0, }; assert_queue( rpc, queue_pubkey, queue_config, associated_merkle_tree_pubkey, associated_tree_config, expected_rollover_fee, expected_queue_type, expected_index, expected_program_owner, expected_forester, expected_rolledover_slot, expected_next_queue, payer_pubkey, ) .await; } #[allow(clippy::too_many_arguments)] pub async fn assert_queue<R: RpcConnection>( rpc: &mut R, queue_pubkey: &Pubkey, queue_config: &account_compression::AddressQueueConfig, associated_merkle_tree_pubkey: &Pubkey, associated_tree_config: &account_compression::AddressMerkleTreeConfig, expected_rollover_fee: u64, expected_queue_type: QueueType, expected_index: u64, expected_program_owner: Option<Pubkey>, expected_forester: Option<Pubkey>, expected_rolledover_slot: Option<u64>, expected_next_queue: Option<Pubkey>, payer_pubkey: &Pubkey, ) { let queue = AccountZeroCopy::<account_compression::QueueAccount>::new(rpc, *queue_pubkey).await; let queue_account = queue.deserialized(); let expected_rollover_meta_data = RolloverMetadata { index: expected_index, rolledover_slot: expected_rolledover_slot.unwrap_or(u64::MAX), rollover_threshold: associated_tree_config .rollover_threshold .unwrap_or_default(), network_fee: queue_config.network_fee.unwrap_or_default(), rollover_fee: expected_rollover_fee, close_threshold: associated_tree_config.close_threshold.unwrap_or(u64::MAX), additional_bytes: 0, }; let expected_access_meta_data = account_compression::AccessMetadata { owner: *payer_pubkey, program_owner: expected_program_owner.unwrap_or_default(), forester: expected_forester.unwrap_or_default(), }; let expected_queue_meta_data = QueueMetadata { access_metadata: expected_access_meta_data, rollover_metadata: expected_rollover_meta_data, associated_merkle_tree: *associated_merkle_tree_pubkey, next_queue: expected_next_queue.unwrap_or_default(), queue_type: expected_queue_type as u64, }; assert_eq!(queue_account.metadata, expected_queue_meta_data); let queue = unsafe { get_hash_set::<QueueAccount, R>(rpc, *queue_pubkey).await }; assert_eq!(queue.get_capacity(), queue_config.capacity as usize); assert_eq!( queue.sequence_threshold, queue_config.sequence_threshold as usize ); }
0
solana_public_repos/Lightprotocol/light-protocol/test-utils
solana_public_repos/Lightprotocol/light-protocol/test-utils/src/assert_token_tx.rs
use crate::assert_compressed_tx::{ assert_merkle_tree_after_tx, assert_nullifiers_exist_in_hash_sets, assert_public_transaction_event, MerkleTreeTestSnapShot, }; use anchor_lang::AnchorSerialize; use forester_utils::indexer::{Indexer, TokenDataWithContext}; use light_client::rpc::RpcConnection; use light_compressed_token::{ get_token_pool_pda, process_transfer::{get_cpi_authority_pda, TokenTransferOutputData}, }; use light_system_program::sdk::{ compressed_account::CompressedAccountWithMerkleContext, event::PublicTransactionEvent, }; use solana_sdk::{program_pack::Pack, pubkey::Pubkey}; /// General token tx assert: /// 1. outputs created /// 2. inputs nullified /// 3. Public Transaction event emitted correctly /// 4. Merkle tree was updated correctly /// 5. TODO: Fees have been paid (after fee refactor) /// 6. Check compression amount was transferred (outside of this function) /// No addresses in token transactions #[allow(clippy::too_many_arguments)] pub async fn assert_transfer<R: RpcConnection, I: Indexer<R>>( context: &mut R, test_indexer: &mut I, out_compressed_accounts: &[TokenTransferOutputData], created_output_compressed_accounts: &[CompressedAccountWithMerkleContext], lamports: Option<Vec<Option<u64>>>, input_compressed_account_hashes: &[[u8; 32]], output_merkle_tree_snapshots: &[MerkleTreeTestSnapShot], input_merkle_tree_test_snapshots: &[MerkleTreeTestSnapShot], event: &PublicTransactionEvent, delegates: Option<Vec<Option<Pubkey>>>, ) { // CHECK 1 assert_compressed_token_accounts( test_indexer, out_compressed_accounts, lamports, output_merkle_tree_snapshots, delegates, ); // CHECK 2 assert_nullifiers_exist_in_hash_sets( context, input_merkle_tree_test_snapshots, input_compressed_account_hashes, ) .await; let vec; let input_compressed_account_hashes = if input_compressed_account_hashes.is_empty() { None } else { vec = input_compressed_account_hashes.to_vec(); Some(&vec) }; // CHECK 4 let sequence_numbers = assert_merkle_tree_after_tx(context, output_merkle_tree_snapshots, test_indexer).await; // CHECK 3 assert_public_transaction_event( event, input_compressed_account_hashes, output_merkle_tree_snapshots .iter() .map(|x| x.accounts) .collect::<Vec<_>>() .as_slice(), &created_output_compressed_accounts .iter() .map(|x| x.merkle_context.leaf_index) .collect::<Vec<_>>(), None, false, None, sequence_numbers, ); } pub fn assert_compressed_token_accounts<R: RpcConnection, I: Indexer<R>>( test_indexer: &mut I, out_compressed_accounts: &[TokenTransferOutputData], lamports: Option<Vec<Option<u64>>>, output_merkle_tree_snapshots: &[MerkleTreeTestSnapShot], delegates: Option<Vec<Option<Pubkey>>>, ) { let delegates = delegates.unwrap_or(vec![None; out_compressed_accounts.len()]); let mut tree = Pubkey::default(); let mut index = 0; let output_lamports = lamports.unwrap_or(vec![None; out_compressed_accounts.len()]); println!("out_compressed_accounts {:?}", out_compressed_accounts); for (i, out_compressed_account) in out_compressed_accounts.iter().enumerate() { if output_merkle_tree_snapshots[i].accounts.merkle_tree != tree { tree = output_merkle_tree_snapshots[i].accounts.merkle_tree; index = 0; } else { index += 1; } let pos = test_indexer .get_token_compressed_accounts() .iter() .position(|x| { x.token_data.owner == out_compressed_account.owner && x.token_data.amount == out_compressed_account.amount && x.token_data.delegate == delegates[i] }) .expect("transfer recipient compressed account not found in mock indexer"); let transfer_recipient_token_compressed_account = test_indexer.get_token_compressed_accounts()[pos].clone(); assert_eq!( transfer_recipient_token_compressed_account .token_data .amount, out_compressed_account.amount ); assert_eq!( transfer_recipient_token_compressed_account.token_data.owner, out_compressed_account.owner ); assert_eq!( transfer_recipient_token_compressed_account .token_data .delegate, delegates[i] ); let transfer_recipient_compressed_account = transfer_recipient_token_compressed_account .compressed_account .clone(); println!( "transfer_recipient_compressed_account {:?}", transfer_recipient_compressed_account ); if i < output_lamports.len() { assert_eq!( transfer_recipient_compressed_account .compressed_account .lamports, output_lamports[i].unwrap_or(0) ); } else if i != output_lamports.len() { // This check accounts for change accounts which are dynamically created onchain. panic!("lamports not found in output_lamports"); } assert!(transfer_recipient_compressed_account .compressed_account .data .is_some()); let mut data = Vec::new(); transfer_recipient_token_compressed_account .token_data .serialize(&mut data) .unwrap(); assert_eq!( transfer_recipient_compressed_account .compressed_account .data .as_ref() .unwrap() .data, data ); assert_eq!( transfer_recipient_compressed_account .compressed_account .owner, light_compressed_token::ID ); if !test_indexer .get_token_compressed_accounts() .iter() .any(|x| { x.compressed_account.merkle_context.leaf_index as usize == output_merkle_tree_snapshots[i].next_index + index }) { println!( "token_compressed_accounts {:?}", test_indexer.get_token_compressed_accounts() ); println!("snapshot {:?}", output_merkle_tree_snapshots[i]); println!("index {:?}", index); panic!("transfer recipient compressed account not found in mock indexer"); }; } } #[allow(clippy::too_many_arguments)] pub async fn assert_mint_to<'a, R: RpcConnection, I: Indexer<R>>( rpc: &mut R, test_indexer: &'a mut I, recipients: &[Pubkey], mint: Pubkey, amounts: &[u64], snapshots: &[MerkleTreeTestSnapShot], created_token_accounts: &[TokenDataWithContext], previous_mint_supply: u64, previous_sol_pool_amount: u64, ) { let mut created_token_accounts = created_token_accounts.to_vec(); for (recipient, amount) in recipients.iter().zip(amounts) { let pos = created_token_accounts .iter() .position(|x| { x.token_data.owner == *recipient && x.token_data.amount == *amount && x.token_data.mint == mint && x.token_data.delegate.is_none() }) .expect("Mint to failed to create expected compressed token account."); created_token_accounts.remove(pos); } assert_merkle_tree_after_tx(rpc, snapshots, test_indexer).await; let mint_account: spl_token::state::Mint = spl_token::state::Mint::unpack(&rpc.get_account(mint).await.unwrap().unwrap().data) .unwrap(); let sum_amounts = amounts.iter().sum::<u64>(); assert_eq!(mint_account.supply, previous_mint_supply + sum_amounts); let pool = get_token_pool_pda(&mint); let pool_account = spl_token::state::Account::unpack(&rpc.get_account(pool).await.unwrap().unwrap().data) .unwrap(); assert_eq!(pool_account.amount, previous_sol_pool_amount + sum_amounts); } pub async fn assert_create_mint<R: RpcConnection>( context: &mut R, authority: &Pubkey, mint: &Pubkey, pool: &Pubkey, ) { let mint_account: spl_token::state::Mint = spl_token::state::Mint::unpack(&context.get_account(*mint).await.unwrap().unwrap().data) .unwrap(); assert_eq!(mint_account.supply, 0); assert_eq!(mint_account.decimals, 2); assert_eq!(mint_account.mint_authority.unwrap(), *authority); assert_eq!(mint_account.freeze_authority, Some(*authority).into()); assert!(mint_account.is_initialized); let mint_account: spl_token::state::Account = spl_token::state::Account::unpack(&context.get_account(*pool).await.unwrap().unwrap().data) .unwrap(); assert_eq!(mint_account.amount, 0); assert_eq!(mint_account.delegate, None.into()); assert_eq!(mint_account.mint, *mint); assert_eq!(mint_account.owner, get_cpi_authority_pda().0); }
0
solana_public_repos/Lightprotocol/light-protocol/test-utils
solana_public_repos/Lightprotocol/light-protocol/test-utils/src/e2e_test_env.rs
// Flow: // init indexer // init first keypair // init crank // vec of public Merkle tree NF queue pairs // vec of public address Mt and queue pairs // for i in rounds // randomly add new keypair // for every keypair randomly select whether it does an action // Architecture: // - bundle trees, indexer etc in a E2ETestEnv struct // - methods: // // bundles all general actions // - activate general actions // // bundles all keypair actions // - activate keypair actions // // calls general and keypair actions // - execute round // // every action takes a probability as input // // if you want to execute the action on purpose pass 1 // - method for every action // - add action activation config with default configs // - all enabled // - only spl, only sol, etc // Forester struct // - payer keypair, authority keypair // -methods // - empty nullifier queue // - empty address queue // - rollover Merkle tree // - rollover address Merkle tree // keypair actions: // safeguard every action in case of no balance // 1. compress sol // 2. decompress sol // 2. transfer sol // 3. compress spl // 4. decompress spl // 5. mint spl // 6. transfer spl // general actions: // add keypair // create new state Mt // create new address Mt // extension: // keypair actions: // - create pda // - escrow tokens // - delegate, revoke, delegated transaction // general actions: // - create new program owned state Merkle tree and queue // - create new program owned address Merkle tree and queue // minimal start // struct with env and test-indexer // only spl transactions // second pr // refactor sol tests to functions that can be reused // TODO: implement traits for context object and indexer that we can implement with an rpc as well // context trait: send_transaction -> return transaction result, get_account_info -> return account info // indexer trait: get_compressed_accounts_by_owner -> return compressed accounts, // refactor all tests to work with that so that we can run all tests with a test validator and concurrency use light_compressed_token::token_data::AccountState; use light_prover_client::gnark::helpers::{ProofType, ProverConfig}; use light_registry::protocol_config::state::{ProtocolConfig, ProtocolConfigPda}; use light_registry::sdk::create_finalize_registration_instruction; use light_registry::utils::get_protocol_config_pda_address; use light_registry::ForesterConfig; use log::info; use num_bigint::{BigUint, RandBigInt}; use num_traits::Num; use rand::distributions::uniform::{SampleRange, SampleUniform}; use rand::prelude::SliceRandom; use rand::rngs::{StdRng, ThreadRng}; use rand::{Rng, RngCore, SeedableRng}; use solana_sdk::pubkey::Pubkey; use solana_sdk::signature::Keypair; use solana_sdk::signature::Signature; use solana_sdk::signer::{SeedDerivable, Signer}; use spl_token::solana_program::native_token::LAMPORTS_PER_SOL; use crate::address_tree_rollover::{ assert_rolled_over_address_merkle_tree_and_queue, perform_address_merkle_tree_roll_over_forester, perform_state_merkle_tree_roll_over_forester, }; use crate::assert_epoch::{ assert_finalized_epoch_registration, assert_report_work, fetch_epoch_and_forester_pdas, }; use crate::spl::{ approve_test, burn_test, compress_test, compressed_transfer_test, create_mint_helper, create_token_account, decompress_test, freeze_test, mint_tokens_helper, revoke_test, thaw_test, }; use crate::state_tree_rollover::assert_rolled_over_pair; use crate::system_program::{ compress_sol_test, create_addresses_test, decompress_sol_test, transfer_compressed_sol_test, }; use crate::test_forester::{empty_address_queue_test, nullify_compressed_accounts}; use account_compression::utils::constants::{ STATE_MERKLE_TREE_CANOPY_DEPTH, STATE_MERKLE_TREE_HEIGHT, }; use account_compression::{ AddressMerkleTreeConfig, AddressQueueConfig, NullifierQueueConfig, StateMerkleTreeConfig, SAFETY_MARGIN, }; use forester_utils::address_merkle_tree_config::{ address_tree_ready_for_rollover, state_tree_ready_for_rollover, }; use forester_utils::forester_epoch::{Epoch, Forester, TreeAccounts, TreeType}; use forester_utils::indexer::{ AddressMerkleTreeAccounts, AddressMerkleTreeBundle, Indexer, StateMerkleTreeAccounts, StateMerkleTreeBundle, TokenDataWithContext, }; use forester_utils::registry::register_test_forester; use forester_utils::{airdrop_lamports, AccountZeroCopy}; use light_hasher::Poseidon; use light_indexed_merkle_tree::HIGHEST_ADDRESS_PLUS_ONE; use light_indexed_merkle_tree::{array::IndexedArray, reference::IndexedMerkleTree}; use light_system_program::sdk::compressed_account::CompressedAccountWithMerkleContext; use light_utils::bigint::bigint_to_be_bytes_array; use light_utils::rand::gen_prime; use crate::create_address_merkle_tree_and_queue_account_with_assert; use crate::indexer::TestIndexer; use light_client::rpc::errors::RpcError; use light_client::rpc::RpcConnection; use light_client::transaction_params::{FeeConfig, TransactionParams}; use light_program_test::test_env::{create_state_merkle_tree_and_queue_account, EnvAccounts}; use light_program_test::test_rpc::ProgramTestRpcConnection; use light_prover_client::gnark::helpers::ProverMode; pub struct User { pub keypair: Keypair, // Vector of (mint, token account) pub token_accounts: Vec<(Pubkey, Pubkey)>, } #[derive(Debug, Default)] pub struct Stats { pub spl_transfers: u64, pub mints: u64, pub spl_decompress: u64, pub spl_compress: u64, pub sol_transfers: u64, pub sol_decompress: u64, pub sol_compress: u64, pub create_address: u64, pub create_pda: u64, pub create_state_mt: u64, pub create_address_mt: u64, pub rolledover_state_trees: u64, pub rolledover_address_trees: u64, pub spl_approved: u64, pub spl_revoked: u64, pub spl_burned: u64, pub spl_frozen: u64, pub spl_thawed: u64, pub registered_foresters: u64, pub created_foresters: u64, pub work_reported: u64, pub finalized_registrations: u64, } impl Stats { pub fn print(&self, users: u64) { println!("Stats:"); println!("Users {}", users); println!("Mints {}", self.mints); println!("Spl transfers {}", self.spl_transfers); println!("Spl decompress {}", self.spl_decompress); println!("Spl compress {}", self.spl_compress); println!("Sol transfers {}", self.sol_transfers); println!("Sol decompress {}", self.sol_decompress); println!("Sol compress {}", self.sol_compress); println!("Create address {}", self.create_address); println!("Create pda {}", self.create_pda); println!("Create state mt {}", self.create_state_mt); println!("Create address mt {}", self.create_address_mt); println!("Rolled over state trees {}", self.rolledover_state_trees); println!( "Rolled over address trees {}", self.rolledover_address_trees ); println!("Spl approved {}", self.spl_approved); println!("Spl revoked {}", self.spl_revoked); println!("Spl burned {}", self.spl_burned); println!("Spl frozen {}", self.spl_frozen); println!("Spl thawed {}", self.spl_thawed); println!("Registered foresters {}", self.registered_foresters); println!("Created foresters {}", self.created_foresters); println!("Work reported {}", self.work_reported); println!("Finalized registrations {}", self.finalized_registrations); } } pub async fn init_program_test_env( rpc: ProgramTestRpcConnection, env_accounts: &EnvAccounts, ) -> E2ETestEnv<ProgramTestRpcConnection, TestIndexer<ProgramTestRpcConnection>> { let indexer: TestIndexer<ProgramTestRpcConnection> = TestIndexer::init_from_env( &env_accounts.forester.insecure_clone(), env_accounts, Some(ProverConfig { run_mode: Some(ProverMode::Rpc), circuits: vec![], }), ) .await; E2ETestEnv::<ProgramTestRpcConnection, TestIndexer<ProgramTestRpcConnection>>::new( rpc, indexer, env_accounts, KeypairActionConfig::all_default(), GeneralActionConfig::default(), 10, None, ) .await } #[derive(Debug, PartialEq)] pub struct TestForester { keypair: Keypair, forester: Forester, is_registered: Option<u64>, } pub struct E2ETestEnv<R: RpcConnection, I: Indexer<R>> { pub payer: Keypair, pub governance_keypair: Keypair, pub indexer: I, pub users: Vec<User>, pub mints: Vec<Pubkey>, pub foresters: Vec<TestForester>, pub rpc: R, pub keypair_action_config: KeypairActionConfig, pub general_action_config: GeneralActionConfig, pub round: u64, pub rounds: u64, pub rng: StdRng, pub stats: Stats, pub epoch: u64, pub slot: u64, /// Forester struct is reused but not used for foresting here /// Epoch config keeps track of the ongong epochs. pub epoch_config: Forester, pub protocol_config: ProtocolConfig, pub registration_epoch: u64, } impl<R: RpcConnection, I: Indexer<R>> E2ETestEnv<R, I> where R: RpcConnection, I: Indexer<R>, { pub async fn new( mut rpc: R, mut indexer: I, env_accounts: &EnvAccounts, keypair_action_config: KeypairActionConfig, general_action_config: GeneralActionConfig, rounds: u64, seed: Option<u64>, ) -> Self { let payer = rpc.get_payer().insecure_clone(); airdrop_lamports(&mut rpc, &payer.pubkey(), 1_000_000_000_000) .await .unwrap(); airdrop_lamports(&mut rpc, &env_accounts.forester.pubkey(), 1_000_000_000_000) .await .unwrap(); let mut thread_rng = ThreadRng::default(); let random_seed = thread_rng.next_u64(); let seed: u64 = seed.unwrap_or(random_seed); // Keep this print so that in case the test fails // we can use the seed to reproduce the error. println!("\n\ne2e test seed {}\n\n", seed); let mut rng = StdRng::seed_from_u64(seed); let user = Self::create_user(&mut rng, &mut rpc).await; let mint = create_mint_helper(&mut rpc, &payer).await; mint_tokens_helper( &mut rpc, &mut indexer, &env_accounts.merkle_tree_pubkey, &payer, &mint, vec![100_000_000; 1], vec![user.keypair.pubkey()], ) .await; let protocol_config_pda_address = get_protocol_config_pda_address().0; println!("here"); let protocol_config = rpc .get_anchor_account::<ProtocolConfigPda>(&protocol_config_pda_address) .await .unwrap() .unwrap() .config; // TODO: add clear test env enum // register foresters is only compatible with ProgramTest environment let (foresters, epoch_config) = if let Some(registered_epoch) = env_accounts.forester_epoch.as_ref() { let _forester = Forester { registration: registered_epoch.clone(), active: registered_epoch.clone(), ..Default::default() }; // Forester epoch account is assumed to exist (is inited with test program deployment) let forester = TestForester { keypair: env_accounts.forester.insecure_clone(), forester: _forester.clone(), is_registered: Some(0), }; (vec![forester], _forester) } else { (Vec::<TestForester>::new(), Forester::default()) }; Self { payer, indexer, users: vec![user], rpc, keypair_action_config, general_action_config, round: 0, rounds, rng, mints: vec![], stats: Stats::default(), foresters, registration_epoch: 0, epoch: 0, slot: 0, epoch_config, protocol_config, governance_keypair: env_accounts.governance_authority.insecure_clone(), } } /// Creates a new user with a random keypair and 100 sol pub async fn create_user(rng: &mut StdRng, rpc: &mut R) -> User { let keypair: Keypair = Keypair::from_seed(&[rng.gen_range(0..255); 32]).unwrap(); rpc.airdrop_lamports(&keypair.pubkey(), LAMPORTS_PER_SOL * 5000) .await .unwrap(); User { keypair, token_accounts: vec![], } } pub async fn get_balance(&mut self, pubkey: &Pubkey) -> u64 { self.rpc.get_balance(pubkey).await.unwrap() } pub async fn execute_rounds(&mut self) { for _ in 0..=self.rounds { self.execute_round().await; } } pub async fn execute_round(&mut self) { println!("\n------------------------------------------------------\n"); println!("Round: {}", self.round); self.stats.print(self.users.len() as u64); // TODO: check at the beginning of the round that the Merkle trees are in sync let len = self.users.len(); for i in 0..len { self.activate_keypair_actions(&self.users[i].keypair.pubkey()) .await; } self.activate_general_actions().await; self.round += 1; } /// 1. Add a new keypair /// 2. Create a new state Merkle tree pub async fn activate_general_actions(&mut self) { // If we want to test rollovers we set the threshold to 0 for all newly created trees let rollover_threshold = if self.general_action_config.rollover.is_some() { Some(0) } else { None }; if self .rng .gen_bool(self.general_action_config.add_keypair.unwrap_or_default()) { let user = Self::create_user(&mut self.rng, &mut self.rpc).await; self.users.push(user); } if self.rng.gen_bool( self.general_action_config .create_state_mt .unwrap_or_default(), ) { self.create_state_tree(rollover_threshold).await; self.stats.create_state_mt += 1; } if self.rng.gen_bool( self.general_action_config .create_address_mt .unwrap_or_default(), ) { self.create_address_tree(rollover_threshold).await; self.stats.create_address_mt += 1; } if self.rng.gen_bool( self.general_action_config .nullify_compressed_accounts .unwrap_or_default(), ) { for state_tree_bundle in self.indexer.get_state_merkle_trees_mut().iter_mut() { println!("\n --------------------------------------------------\n\t\t NULLIFYING LEAVES\n --------------------------------------------------"); // find forester which is eligible this slot for this tree if let Some(payer) = Self::get_eligible_forester_for_queue( &state_tree_bundle.accounts.nullifier_queue, &self.foresters, self.slot, ) { // TODO: add newly addeded trees to foresters nullify_compressed_accounts( &mut self.rpc, &payer, state_tree_bundle, self.epoch, false, ) .await .unwrap(); } else { println!("No forester found for nullifier queue"); }; } } if self.rng.gen_bool( self.general_action_config .empty_address_queue .unwrap_or_default(), ) { for address_merkle_tree_bundle in self.indexer.get_address_merkle_trees_mut().iter_mut() { // find forester which is eligible this slot for this tree if let Some(payer) = Self::get_eligible_forester_for_queue( &address_merkle_tree_bundle.accounts.queue, &self.foresters, self.slot, ) { println!("\n --------------------------------------------------\n\t\t Empty Address Queue\n --------------------------------------------------"); println!("epoch {}", self.epoch); println!("forester {}", payer.pubkey()); // TODO: add newly addeded trees to foresters empty_address_queue_test( &payer, &mut self.rpc, address_merkle_tree_bundle, false, self.epoch, false, ) .await .unwrap(); } else { println!("No forester found for address queue"); }; } } for index in 0..self.indexer.get_state_merkle_trees().len() { let is_read_for_rollover = state_tree_ready_for_rollover( &mut self.rpc, self.indexer.get_state_merkle_trees()[index] .accounts .merkle_tree, ) .await; if self .rng .gen_bool(self.general_action_config.rollover.unwrap_or_default()) && is_read_for_rollover { println!("\n --------------------------------------------------\n\t\t Rollover State Merkle Tree\n --------------------------------------------------"); // find forester which is eligible this slot for this tree if let Some(payer) = Self::get_eligible_forester_for_queue( &self.indexer.get_state_merkle_trees()[index] .accounts .nullifier_queue, &self.foresters, self.slot, ) { self.rollover_state_merkle_tree_and_queue(index, &payer, self.epoch) .await .unwrap(); self.stats.rolledover_state_trees += 1; } } } for index in 0..self.indexer.get_address_merkle_trees().len() { let is_read_for_rollover = address_tree_ready_for_rollover( &mut self.rpc, self.indexer.get_address_merkle_trees()[index] .accounts .merkle_tree, ) .await; if self .rng .gen_bool(self.general_action_config.rollover.unwrap_or_default()) && is_read_for_rollover { // find forester which is eligible this slot for this tree if let Some(payer) = Self::get_eligible_forester_for_queue( &self.indexer.get_address_merkle_trees()[index] .accounts .queue, &self.foresters, self.slot, ) { println!("\n --------------------------------------------------\n\t\t Rollover Address Merkle Tree\n --------------------------------------------------"); self.rollover_address_merkle_tree_and_queue(index, &payer, self.epoch) .await .unwrap(); self.stats.rolledover_address_trees += 1; } } } if self .rng .gen_bool(self.general_action_config.add_forester.unwrap_or_default()) { println!("\n --------------------------------------------------\n\t\t Add Forester\n --------------------------------------------------"); let forester = TestForester { keypair: Keypair::new(), forester: Forester::default(), is_registered: None, }; let forester_config = ForesterConfig { fee: self.rng.gen_range(0..=100), }; register_test_forester( &mut self.rpc, &self.governance_keypair, &forester.keypair.pubkey(), forester_config, ) .await .unwrap(); self.foresters.push(forester); self.stats.created_foresters += 1; } // advance to next light slot and perform forester epoch actions if !self.general_action_config.disable_epochs { println!("\n --------------------------------------------------\n\t\t Start Epoch Actions \n --------------------------------------------------"); let current_solana_slot = self.rpc.get_slot().await.unwrap(); let current_light_slot = self .protocol_config .get_current_active_epoch_progress(current_solana_slot) / self.protocol_config.slot_length; // If slot didn't change, advance to next slot // if current_light_slot != self.slot { let new_slot = current_solana_slot + self.protocol_config.slot_length; println!("advanced slot from {} to {}", self.slot, current_light_slot); println!("solana slot from {} to {}", current_solana_slot, new_slot); self.rpc.warp_to_slot(new_slot).await.unwrap(); self.slot = current_light_slot + 1; let current_solana_slot = self.rpc.get_slot().await.unwrap(); // need to detect whether new registration phase started let current_registration_epoch = self .protocol_config .get_latest_register_epoch(current_solana_slot) .unwrap(); // If reached new registration phase register all foresters if current_registration_epoch != self.registration_epoch { println!("\n --------------------------------------------------\n\t\t Register Foresters for new Epoch \n --------------------------------------------------"); self.registration_epoch = current_registration_epoch; println!("new register epoch {}", self.registration_epoch); println!("num foresters {}", self.foresters.len()); for forester in self.foresters.iter_mut() { println!( "registered forester {} for epoch {}", forester.keypair.pubkey(), self.registration_epoch ); let registered_epoch = Epoch::register( &mut self.rpc, &self.protocol_config, &forester.keypair, &forester.keypair.pubkey(), ) .await .unwrap() .unwrap(); println!("registered_epoch {:?}", registered_epoch.phases); forester.forester.registration = registered_epoch; if forester.is_registered.is_none() { forester.is_registered = Some(self.registration_epoch); } self.stats.registered_foresters += 1; } } let current_active_epoch = self .protocol_config .get_current_active_epoch(current_solana_slot) .unwrap(); // If reached new active epoch // 1. move epoch in every forester to report work epoch // 2. report work for every forester // 3. finalize registration for every forester #[allow(clippy::comparison_chain)] if current_active_epoch > self.epoch { self.slot = current_light_slot; self.epoch = current_active_epoch; // 1. move epoch in every forester to report work epoch for forester in self.foresters.iter_mut() { if forester.is_registered.is_none() { continue; } forester.forester.switch_to_report_work(); } println!("\n --------------------------------------------------\n\t\t Report Work \n --------------------------------------------------"); // 2. report work for every forester for forester in self.foresters.iter_mut() { if forester.is_registered.is_none() { continue; } println!("report work for forester {}", forester.keypair.pubkey()); println!( "forester.forester.report_work.forester_epoch_pda {}", forester.forester.report_work.forester_epoch_pda ); println!( "forester.forester.report_work.epoch_pda {}", forester.forester.report_work.epoch_pda ); let (pre_forester_epoch_pda, pre_epoch_pda) = fetch_epoch_and_forester_pdas( &mut self.rpc, &forester.forester.report_work.forester_epoch_pda, &forester.forester.report_work.epoch_pda, ) .await; forester .forester .report_work(&mut self.rpc, &forester.keypair, &forester.keypair.pubkey()) .await .unwrap(); println!("reported work"); assert_report_work( &mut self.rpc, &forester.forester.report_work.forester_epoch_pda, &forester.forester.report_work.epoch_pda, pre_forester_epoch_pda, pre_epoch_pda, ) .await; self.stats.work_reported += 1; } // 3. finalize registration for every forester println!("\n --------------------------------------------------\n\t\t Finalize Registration \n --------------------------------------------------"); // 3.1 get tree accounts // TODO: use TreeAccounts in TestIndexer let mut tree_accounts = self .indexer .get_state_merkle_trees() .iter() .map(|state_merkle_tree_bundle| TreeAccounts { tree_type: TreeType::State, merkle_tree: state_merkle_tree_bundle.accounts.merkle_tree, queue: state_merkle_tree_bundle.accounts.nullifier_queue, is_rolledover: false, }) .collect::<Vec<TreeAccounts>>(); self.indexer.get_address_merkle_trees().iter().for_each( |address_merkle_tree_bundle| { tree_accounts.push(TreeAccounts { tree_type: TreeType::Address, merkle_tree: address_merkle_tree_bundle.accounts.merkle_tree, queue: address_merkle_tree_bundle.accounts.queue, is_rolledover: false, }); }, ); // 3.2 finalize registration for every forester for forester in self.foresters.iter_mut() { if forester.is_registered.is_none() { continue; } println!( "registered forester {} for epoch {}", forester.keypair.pubkey(), self.epoch ); println!( "forester.forester registration epoch {:?}", forester.forester.registration.epoch ); println!( "forester.forester active epoch {:?}", forester.forester.active.epoch ); println!( "forester.forester report_work epoch {:?}", forester.forester.report_work.epoch ); forester .forester .active .fetch_account_and_add_trees_with_schedule(&mut self.rpc, &tree_accounts) .await .unwrap(); let ix = create_finalize_registration_instruction( &forester.keypair.pubkey(), &forester.keypair.pubkey(), forester.forester.active.epoch, ); self.rpc .create_and_send_transaction( &[ix], &forester.keypair.pubkey(), &[&forester.keypair], ) .await .unwrap(); assert_finalized_epoch_registration( &mut self.rpc, &forester.forester.active.forester_epoch_pda, &forester.forester.active.epoch_pda, ) .await; self.stats.finalized_registrations += 1; } } else if current_active_epoch < self.epoch { panic!( "current_active_epoch {} is less than self.epoch {}", current_active_epoch, self.epoch ); } } } pub async fn create_state_tree(&mut self, rollover_threshold: Option<u64>) { let merkle_tree_keypair = Keypair::new(); //from_seed(&[self.rng.gen_range(0..255); 32]).unwrap(); let nullifier_queue_keypair = Keypair::new(); //from_seed(&[self.rng.gen_range(0..255); 32]).unwrap(); let cpi_context_keypair = Keypair::new(); let rollover_threshold = if let Some(rollover_threshold) = rollover_threshold { Some(rollover_threshold) } else if self.rng.gen_bool(0.5) && !self.keypair_action_config.fee_assert { Some(self.rng.gen_range(1..100)) } else { None }; let merkle_tree_config = if !self.keypair_action_config.fee_assert { StateMerkleTreeConfig { height: 26, changelog_size: self.rng.gen_range(1..5000), roots_size: self.rng.gen_range(1..10000), canopy_depth: 10, network_fee: Some(5000), close_threshold: None, rollover_threshold, } } else { StateMerkleTreeConfig::default() }; println!("merkle tree config: {:?}", merkle_tree_config); let queue_config = if !self.keypair_action_config.fee_assert { let capacity: u32 = gen_prime(&mut self.rng, 1..10000).unwrap(); NullifierQueueConfig { capacity: capacity as u16, sequence_threshold: merkle_tree_config.roots_size + SAFETY_MARGIN, network_fee: None, } } else if rollover_threshold.is_some() { panic!("rollover_threshold should not be set when fee_assert is set (keypair_action_config.fee_assert)"); } else { NullifierQueueConfig::default() }; let forester = Pubkey::new_unique(); println!("queue config: {:?}", queue_config); create_state_merkle_tree_and_queue_account( &self.payer, true, &mut self.rpc, &merkle_tree_keypair, &nullifier_queue_keypair, Some(&cpi_context_keypair), None, Some(forester), 1, &merkle_tree_config, &queue_config, ) .await .unwrap(); let merkle_tree = Box::new(light_merkle_tree_reference::MerkleTree::<Poseidon>::new( STATE_MERKLE_TREE_HEIGHT as usize, STATE_MERKLE_TREE_CANOPY_DEPTH as usize, )); let state_tree_account = AccountZeroCopy::<account_compression::StateMerkleTreeAccount>::new( &mut self.rpc, nullifier_queue_keypair.pubkey(), ) .await; self.indexer .get_state_merkle_trees_mut() .push(StateMerkleTreeBundle { rollover_fee: state_tree_account .deserialized() .metadata .rollover_metadata .rollover_fee as i64, accounts: StateMerkleTreeAccounts { merkle_tree: merkle_tree_keypair.pubkey(), nullifier_queue: nullifier_queue_keypair.pubkey(), cpi_context: cpi_context_keypair.pubkey(), }, merkle_tree, }); // TODO: Add assert } pub async fn create_address_tree(&mut self, rollover_threshold: Option<u64>) { let merkle_tree_keypair = Keypair::new(); let nullifier_queue_keypair = Keypair::new(); let rollover_threshold = if let Some(rollover_threshold) = rollover_threshold { Some(rollover_threshold) } else if self.rng.gen_bool(0.5) && !self.keypair_action_config.fee_assert { Some(self.rng.gen_range(1..100)) } else { None }; let (config, address_config) = if !self.keypair_action_config.fee_assert { let root_history = self.rng.gen_range(1..10000); ( AddressMerkleTreeConfig { height: 26, changelog_size: self.rng.gen_range(1..5000), roots_size: root_history, canopy_depth: 10, address_changelog_size: self.rng.gen_range(1..5000), rollover_threshold, network_fee: Some(5000), close_threshold: None, // TODO: double check that close threshold cannot be set }, AddressQueueConfig { sequence_threshold: root_history + SAFETY_MARGIN, ..Default::default() }, ) } else if rollover_threshold.is_some() { panic!("rollover_threshold should not be set when fee_assert is set (keypair_action_config.fee_assert)"); } else { ( AddressMerkleTreeConfig::default(), AddressQueueConfig::default(), ) }; create_address_merkle_tree_and_queue_account_with_assert( &self.payer, true, &mut self.rpc, &merkle_tree_keypair, &nullifier_queue_keypair, None, None, &config, &address_config, 0, ) .await .unwrap(); let init_value = BigUint::from_str_radix(HIGHEST_ADDRESS_PLUS_ONE, 10).unwrap(); let mut merkle_tree = Box::new( IndexedMerkleTree::<Poseidon, usize>::new( STATE_MERKLE_TREE_HEIGHT as usize, STATE_MERKLE_TREE_CANOPY_DEPTH as usize, ) .unwrap(), ); let mut indexed_array = Box::<IndexedArray<Poseidon, usize>>::default(); merkle_tree.append(&init_value, &mut indexed_array).unwrap(); let queue_account = AccountZeroCopy::<account_compression::QueueAccount>::new( &mut self.rpc, nullifier_queue_keypair.pubkey(), ) .await; self.indexer .get_address_merkle_trees_mut() .push(AddressMerkleTreeBundle { rollover_fee: queue_account .deserialized() .metadata .rollover_metadata .rollover_fee as i64, accounts: AddressMerkleTreeAccounts { merkle_tree: merkle_tree_keypair.pubkey(), queue: nullifier_queue_keypair.pubkey(), }, merkle_tree, indexed_array, }); // TODO: Add assert } pub fn safe_gen_range<T, RR>(rng: &mut StdRng, range: RR, empty_fallback: T) -> T where T: SampleUniform + Copy, RR: SampleRange<T> + Sized, { if range.is_empty() { return empty_fallback; } rng.gen_range(range) } /// 1. Transfer spl tokens between random users pub async fn activate_keypair_actions(&mut self, user: &Pubkey) { let user_index = self .users .iter() .position(|u| &u.keypair.pubkey() == user) .unwrap(); // compress spl // check sufficient spl balance if self .rng .gen_bool(self.keypair_action_config.compress_spl.unwrap_or(0.0)) && self.users[user_index].token_accounts.is_empty() // TODO: enable compress spl test { self.compress_spl(user_index).await; } // decompress spl // check sufficient compressed spl balance if self .rng .gen_bool(self.keypair_action_config.decompress_spl.unwrap_or(0.0)) { self.decompress_spl(user_index).await; } // transfer spl // check sufficient compressed spl balance if self .rng .gen_bool(self.keypair_action_config.transfer_spl.unwrap_or(0.0)) { self.transfer_spl(user_index).await; } // create address if self .rng .gen_bool(self.keypair_action_config.create_address.unwrap_or(0.0)) { self.create_address(None, None).await; } // compress sol // check sufficient sol balance let balance = self .rpc .get_balance(&self.users[user_index].keypair.pubkey()) .await .unwrap(); if self .rng .gen_bool(self.keypair_action_config.compress_sol.unwrap_or(0.0)) && balance > 1000 { self.compress_sol(user_index, balance).await; } else { println!("Not enough balance to compress sol. Balance: {}", balance); } // decompress sol // check sufficient compressed sol balance if self .rng .gen_bool(self.keypair_action_config.decompress_sol.unwrap_or(0.0)) { self.decompress_sol(user_index).await; } // transfer sol if self .rng .gen_bool(self.keypair_action_config.transfer_sol.unwrap_or(0.0)) { self.transfer_sol(user_index).await; } // approve spl if self .rng .gen_bool(self.keypair_action_config.approve_spl.unwrap_or(0.0)) && !self.users[user_index].token_accounts.is_empty() { self.approve_spl(user_index).await; } // revoke spl if self .rng .gen_bool(self.keypair_action_config.revoke_spl.unwrap_or(0.0)) && !self.users[user_index].token_accounts.is_empty() { self.revoke_spl(user_index).await; } // burn spl if self .rng .gen_bool(self.keypair_action_config.burn_spl.unwrap_or(0.0)) && !self.users[user_index].token_accounts.is_empty() { self.burn_spl(user_index).await; } // freeze spl if self .rng .gen_bool(self.keypair_action_config.freeze_spl.unwrap_or(0.0)) && !self.users[user_index].token_accounts.is_empty() { self.freeze_spl(user_index).await; } // thaw spl if self .rng .gen_bool(self.keypair_action_config.thaw_spl.unwrap_or(0.0)) && !self.users[user_index].token_accounts.is_empty() { self.thaw_spl(user_index).await; } } pub fn get_eligible_forester_for_queue( queue_pubkey: &Pubkey, foresters: &[TestForester], light_slot: u64, ) -> Option<Keypair> { for f in foresters.iter() { let tree = f .forester .active .merkle_trees .iter() .find(|mt| mt.tree_accounts.queue == *queue_pubkey); if let Some(tree) = tree { if tree.is_eligible(light_slot) { return Some(f.keypair.insecure_clone()); } } } None } pub async fn transfer_sol_deterministic( &mut self, from: &Keypair, to: &Pubkey, tree_index: Option<usize>, ) -> Result<Signature, RpcError> { let input_compressed_accounts = self.get_compressed_sol_accounts(&from.pubkey()); let output_merkle_tree = self.indexer.get_state_merkle_trees()[tree_index.unwrap_or(0)] .accounts .merkle_tree; let recipients = vec![*to]; let transaction_params = if self.keypair_action_config.fee_assert { Some(TransactionParams { num_new_addresses: 0, num_input_compressed_accounts: input_compressed_accounts.len() as u8, num_output_compressed_accounts: 1u8, compress: 0, fee_config: FeeConfig::default(), }) } else { None }; transfer_compressed_sol_test( &mut self.rpc, &mut self.indexer, from, input_compressed_accounts.as_slice(), recipients.as_slice(), &[output_merkle_tree], transaction_params, ) .await } pub async fn transfer_sol(&mut self, user_index: usize) { let input_compressed_accounts = self.get_random_compressed_sol_accounts(user_index); if !input_compressed_accounts.is_empty() { println!("\n --------------------------------------------------\n\t\t Transfer Sol\n --------------------------------------------------"); let recipients = self .users .iter() .map(|u| u.keypair.pubkey()) .collect::<Vec<Pubkey>>(); let num_output_merkle_trees = Self::safe_gen_range( &mut self.rng, 1..std::cmp::min( self.keypair_action_config .max_output_accounts .unwrap_or(recipients.len() as u64), recipients.len() as u64, ), 1, ); let recipients = recipients .choose_multiple(&mut self.rng, num_output_merkle_trees as usize) .copied() .collect::<Vec<_>>(); let output_merkle_trees = self.get_merkle_tree_pubkeys(num_output_merkle_trees); let transaction_parameters = if self.keypair_action_config.fee_assert { Some(TransactionParams { num_new_addresses: 0, num_input_compressed_accounts: input_compressed_accounts.len() as u8, num_output_compressed_accounts: num_output_merkle_trees as u8, compress: 0, fee_config: FeeConfig::default(), }) } else { None }; transfer_compressed_sol_test( &mut self.rpc, &mut self.indexer, &self.users[user_index].keypair, input_compressed_accounts.as_slice(), recipients.as_slice(), output_merkle_trees.as_slice(), transaction_parameters, ) .await .unwrap(); self.stats.sol_transfers += 1; } } pub async fn decompress_sol(&mut self, user_index: usize) { let input_compressed_accounts = self.get_random_compressed_sol_accounts(user_index); if !input_compressed_accounts.is_empty() { println!("\n --------------------------------------------------\n\t\t Decompress Sol\n --------------------------------------------------"); let output_merkle_tree = self.get_merkle_tree_pubkeys(1)[0]; let recipient = self.users [Self::safe_gen_range(&mut self.rng, 0..std::cmp::min(self.users.len(), 6), 0)] .keypair .pubkey(); let balance = input_compressed_accounts .iter() .map(|x| x.compressed_account.lamports) .sum::<u64>(); let decompress_amount = Self::safe_gen_range(&mut self.rng, 1000..balance, balance / 2); let transaction_paramets = if self.keypair_action_config.fee_assert { Some(TransactionParams { num_new_addresses: 0, num_input_compressed_accounts: input_compressed_accounts.len() as u8, num_output_compressed_accounts: 1u8, compress: 0, fee_config: FeeConfig::default(), }) } else { None }; decompress_sol_test( &mut self.rpc, &mut self.indexer, &self.users[user_index].keypair, &input_compressed_accounts, &recipient, decompress_amount, &output_merkle_tree, transaction_paramets, ) .await .unwrap(); self.stats.sol_decompress += 1; } } pub async fn compress_sol_deterministic( &mut self, from: &Keypair, amount: u64, tree_index: Option<usize>, ) { let input_compressed_accounts = self.get_compressed_sol_accounts(&from.pubkey()); let output_merkle_tree = self.indexer.get_state_merkle_trees()[tree_index.unwrap_or(0)] .accounts .merkle_tree; let transaction_parameters = if self.keypair_action_config.fee_assert { Some(TransactionParams { num_new_addresses: 0, num_input_compressed_accounts: input_compressed_accounts.len() as u8, num_output_compressed_accounts: 1u8, compress: amount as i64, fee_config: FeeConfig::default(), }) } else { None }; compress_sol_test( &mut self.rpc, &mut self.indexer, from, input_compressed_accounts.as_slice(), false, amount, &output_merkle_tree, transaction_parameters, ) .await .unwrap(); } pub async fn compress_sol(&mut self, user_index: usize, balance: u64) { println!("\n --------------------------------------------------\n\t\t Compress Sol\n --------------------------------------------------"); // Limit max compress amount to 1 sol so that context.payer doesn't get depleted by airdrops. let max_amount = std::cmp::min(balance, 1_000_000_000); let amount = Self::safe_gen_range(&mut self.rng, 1000..max_amount, max_amount / 2); let input_compressed_accounts = self.get_random_compressed_sol_accounts(user_index); let create_output_compressed_accounts_for_input_accounts = false; // TODO: debug Merkle trees in wrong order // if input_compressed_accounts.is_empty() { // false // } else { // self.rng.gen_bool(0.5) // }; let output_merkle_tree = self.get_merkle_tree_pubkeys(1)[0]; let transaction_parameters = if self.keypair_action_config.fee_assert { Some(TransactionParams { num_new_addresses: 0, num_input_compressed_accounts: input_compressed_accounts.len() as u8, num_output_compressed_accounts: 1u8, compress: amount as i64, fee_config: FeeConfig::default(), }) } else { None }; compress_sol_test( &mut self.rpc, &mut self.indexer, &self.users[user_index].keypair, input_compressed_accounts.as_slice(), create_output_compressed_accounts_for_input_accounts, amount, &output_merkle_tree, transaction_parameters, ) .await .unwrap(); airdrop_lamports( &mut self.rpc, &self.users[user_index].keypair.pubkey(), amount, ) .await .unwrap(); self.stats.sol_compress += 1; } pub async fn create_address( &mut self, optional_addresses: Option<Vec<Pubkey>>, address_tree_index: Option<usize>, ) -> Vec<Pubkey> { println!("\n --------------------------------------------------\n\t\t Create Address\n --------------------------------------------------"); // select number of addresses to create let num_addresses = self.rng.gen_range(1..=2); let (address_merkle_tree_pubkeys, address_queue_pubkeys) = if let Some(address_tree_index) = address_tree_index { ( vec![ self.indexer.get_address_merkle_trees()[address_tree_index] .accounts .merkle_tree; num_addresses as usize ], vec![ self.indexer.get_address_merkle_trees()[address_tree_index] .accounts .queue; num_addresses as usize ], ) } else { // select random address Merkle tree(s) self.get_address_merkle_tree_pubkeys(num_addresses) }; let mut address_seeds = Vec::new(); let mut created_addresses = Vec::new(); if let Some(addresses) = optional_addresses { for address in addresses { let address_seed: [u8; 32] = address.to_bytes(); address_seeds.push(address_seed); created_addresses.push(address); } } else { for _ in 0..num_addresses { let address_seed: [u8; 32] = bigint_to_be_bytes_array::<32>(&self.rng.gen_biguint(256)).unwrap(); address_seeds.push(address_seed); created_addresses.push(Pubkey::from(address_seed)); } } let output_compressed_accounts = self.get_merkle_tree_pubkeys(num_addresses); let transaction_parameters = if self.keypair_action_config.fee_assert { Some(TransactionParams { num_new_addresses: num_addresses as u8, num_input_compressed_accounts: 0u8, num_output_compressed_accounts: num_addresses as u8, compress: 0, fee_config: FeeConfig::default(), }) } else { None }; // TODO: add other input compressed accounts // (to test whether the address generation degrades performance) create_addresses_test( &mut self.rpc, &mut self.indexer, address_merkle_tree_pubkeys.as_slice(), address_queue_pubkeys.as_slice(), output_compressed_accounts, address_seeds.as_slice(), &Vec::new(), false, transaction_parameters, ) .await .unwrap(); self.stats.create_address += num_addresses; created_addresses } pub async fn transfer_spl(&mut self, user_index: usize) { let user = &self.users[user_index].keypair.pubkey(); println!("\n --------------------------------------------------\n\t\t Tranfer Spl\n --------------------------------------------------"); let (mint, mut token_accounts) = self.select_random_compressed_token_accounts(user).await; if token_accounts.is_empty() { let mt_pubkeys = self.get_merkle_tree_pubkeys(1); mint_tokens_helper( &mut self.rpc, &mut self.indexer, &mt_pubkeys[0], &self.payer, &mint, vec![Self::safe_gen_range(&mut self.rng, 100_000..1_000_000, 100_000); 1], vec![*user; 1], ) .await; let (_, _token_accounts) = self.select_random_compressed_token_accounts(user).await; token_accounts = _token_accounts; } let recipients = token_accounts .iter() .map(|_| { self.users [Self::safe_gen_range(&mut self.rng, 0..std::cmp::min(self.users.len(), 6), 0)] .keypair .pubkey() }) .collect::<Vec<_>>(); println!("Recipients: {:?}", recipients.len()); let max_amount = token_accounts .iter() .map(|token_account| token_account.token_data.amount) .sum::<u64>(); let amount = Self::safe_gen_range(&mut self.rng, 1000..max_amount, max_amount / 2); let equal_amount = amount / recipients.len() as u64; let num_output_compressed_accounts = if max_amount - amount != 0 { recipients.len() + 1 } else { recipients.len() }; // get different amounts for each recipient so that every compressed account is unique let amounts = recipients .iter() .enumerate() .map(|(i, _)| equal_amount - i as u64) .collect::<Vec<u64>>(); let output_merkle_tree_pubkeys = self.get_merkle_tree_pubkeys(num_output_compressed_accounts as u64); let transaction_paramets = if self.keypair_action_config.fee_assert { Some(TransactionParams { num_new_addresses: 0u8, num_input_compressed_accounts: token_accounts.len() as u8, num_output_compressed_accounts: output_merkle_tree_pubkeys.len() as u8, compress: 0, fee_config: FeeConfig::default(), }) } else { None }; compressed_transfer_test( &self.rpc.get_payer().insecure_clone(), &mut self.rpc, &mut self.indexer, &mint, &self.users[user_index].keypair.insecure_clone(), &recipients, &amounts, None, &token_accounts, &output_merkle_tree_pubkeys, None, false, transaction_paramets, ) .await; self.stats.spl_transfers += 1; } pub async fn approve_spl(&mut self, user_index: usize) { let user = &self.users[user_index].keypair.pubkey(); println!("\n --------------------------------------------------\n\t\t Approve Spl\n --------------------------------------------------"); let (mint, mut token_accounts) = self.select_random_compressed_token_accounts(user).await; if token_accounts.is_empty() { let mt_pubkeys = self.get_merkle_tree_pubkeys(1); mint_tokens_helper( &mut self.rpc, &mut self.indexer, &mt_pubkeys[0], &self.payer, &mint, vec![Self::safe_gen_range(&mut self.rng, 100_000..1_000_000, 100_000); 1], vec![*user; 1], ) .await; let (_, _token_accounts) = self.select_random_compressed_token_accounts(user).await; token_accounts = _token_accounts; } println!("token_accounts: {:?}", token_accounts); let rnd_user_index = self.rng.gen_range(0..self.users.len()); let delegate = self.users[rnd_user_index].keypair.pubkey(); let max_amount = token_accounts .iter() .map(|token_account| token_account.token_data.amount) .sum::<u64>(); let delegate_amount = Self::safe_gen_range(&mut self.rng, 0..max_amount, max_amount / 2); let num_output_compressed_accounts = if delegate_amount != max_amount { 2 } else { 1 }; let output_merkle_tree_pubkeys = self.get_merkle_tree_pubkeys(2); let transaction_paramets = if self.keypair_action_config.fee_assert { Some(TransactionParams { num_new_addresses: 0u8, num_input_compressed_accounts: token_accounts.len() as u8, num_output_compressed_accounts, compress: 0, fee_config: FeeConfig::default(), }) } else { None }; approve_test( &self.users[user_index].keypair, &mut self.rpc, &mut self.indexer, token_accounts, delegate_amount, None, &delegate, &output_merkle_tree_pubkeys[0], &output_merkle_tree_pubkeys[1], transaction_paramets, ) .await; self.stats.spl_approved += 1; } pub async fn revoke_spl(&mut self, user_index: usize) { let user = &self.users[user_index].keypair.pubkey(); println!("\n --------------------------------------------------\n\t\t Revoke Spl\n --------------------------------------------------"); let (mint, mut token_accounts) = self .select_random_compressed_token_accounts_delegated(user, true, None, false) .await; if token_accounts.is_empty() { let mt_pubkeys = self.get_merkle_tree_pubkeys(1); mint_tokens_helper( &mut self.rpc, &mut self.indexer, &mt_pubkeys[0], &self.payer, &mint, vec![Self::safe_gen_range(&mut self.rng, 100_000..1_000_000, 100_000); 1], vec![*user; 1], ) .await; self.approve_spl(user_index).await; let (_, _token_accounts) = self .select_random_compressed_token_accounts_delegated(user, true, None, false) .await; token_accounts = _token_accounts; } let num_output_compressed_accounts = 1; let output_merkle_tree_pubkeys = self.get_merkle_tree_pubkeys(1); let transaction_paramets = if self.keypair_action_config.fee_assert { Some(TransactionParams { num_new_addresses: 0u8, num_input_compressed_accounts: token_accounts.len() as u8, num_output_compressed_accounts, compress: 0, fee_config: FeeConfig::default(), }) } else { None }; revoke_test( &self.users[user_index].keypair, &mut self.rpc, &mut self.indexer, token_accounts, &output_merkle_tree_pubkeys[0], transaction_paramets, ) .await; self.stats.spl_revoked += 1; } pub async fn burn_spl(&mut self, user_index: usize) { let user = &self.users[user_index].keypair.pubkey(); println!("\n --------------------------------------------------\n\t\t Burn Spl\n --------------------------------------------------"); let (mint, mut token_accounts) = self.select_random_compressed_token_accounts(user).await; if token_accounts.is_empty() { let mt_pubkeys = self.get_merkle_tree_pubkeys(1); mint_tokens_helper( &mut self.rpc, &mut self.indexer, &mt_pubkeys[0], &self.payer, &mint, vec![Self::safe_gen_range(&mut self.rng, 100_000..1_000_000, 100_000); 1], vec![*user; 1], ) .await; let (_, _token_accounts) = self.select_random_compressed_token_accounts(user).await; token_accounts = _token_accounts; } let max_amount = token_accounts .iter() .map(|token_account| token_account.token_data.amount) .sum::<u64>(); let burn_amount = Self::safe_gen_range(&mut self.rng, 0..max_amount, max_amount / 2); let num_output_compressed_accounts = if burn_amount != max_amount { 1 } else { 0 }; let output_merkle_tree_pubkeys = self.get_merkle_tree_pubkeys(1); let transaction_paramets = if self.keypair_action_config.fee_assert { Some(TransactionParams { num_new_addresses: 0u8, num_input_compressed_accounts: token_accounts.len() as u8, num_output_compressed_accounts, compress: 0, fee_config: FeeConfig::default(), }) } else { None }; burn_test( &self.users[user_index].keypair, &mut self.rpc, &mut self.indexer, token_accounts, &output_merkle_tree_pubkeys[0], burn_amount, false, transaction_paramets, false, ) .await; self.stats.spl_burned += 1; } pub async fn freeze_spl(&mut self, user_index: usize) { let user = &self.users[user_index].keypair.pubkey(); println!("\n --------------------------------------------------\n\t\t Freeze Spl\n --------------------------------------------------"); let (mint, mut token_accounts) = self.select_random_compressed_token_accounts(user).await; if token_accounts.is_empty() { let mt_pubkeys = self.get_merkle_tree_pubkeys(1); mint_tokens_helper( &mut self.rpc, &mut self.indexer, &mt_pubkeys[0], &self.payer, &mint, vec![Self::safe_gen_range(&mut self.rng, 100_000..1_000_000, 100_000); 1], vec![*user; 1], ) .await; let (_, _token_accounts) = self .select_random_compressed_token_accounts_delegated(user, false, None, false) .await; token_accounts = _token_accounts; } let output_merkle_tree_pubkeys = self.get_merkle_tree_pubkeys(1); let transaction_paramets = if self.keypair_action_config.fee_assert { Some(TransactionParams { num_new_addresses: 0u8, num_input_compressed_accounts: token_accounts.len() as u8, num_output_compressed_accounts: token_accounts.len() as u8, compress: 0, fee_config: FeeConfig::default(), }) } else { None }; freeze_test( &self.rpc.get_payer().insecure_clone(), &mut self.rpc, &mut self.indexer, token_accounts, &output_merkle_tree_pubkeys[0], transaction_paramets, ) .await; self.stats.spl_frozen += 1; } pub async fn thaw_spl(&mut self, user_index: usize) { let user = &self.users[user_index].keypair.pubkey(); println!("\n --------------------------------------------------\n\t\t Thaw Spl\n --------------------------------------------------"); let (_, mut token_accounts) = self .select_random_compressed_token_accounts_frozen(user) .await; if token_accounts.is_empty() { self.freeze_spl(user_index).await; let (_, _token_accounts) = self .select_random_compressed_token_accounts_frozen(user) .await; token_accounts = _token_accounts; } let output_merkle_tree_pubkeys = self.get_merkle_tree_pubkeys(1); let transaction_paramets = if self.keypair_action_config.fee_assert { Some(TransactionParams { num_new_addresses: 0u8, num_input_compressed_accounts: token_accounts.len() as u8, num_output_compressed_accounts: token_accounts.len() as u8, compress: 0, fee_config: FeeConfig::default(), }) } else { None }; thaw_test( &self.rpc.get_payer().insecure_clone(), &mut self.rpc, &mut self.indexer, token_accounts, &output_merkle_tree_pubkeys[0], transaction_paramets, ) .await; self.stats.spl_thawed += 1; } pub async fn compress_spl(&mut self, user_index: usize) { println!("\n --------------------------------------------------\n\t\t Compress Spl\n --------------------------------------------------"); let mut balance = 0; let mut mint = Pubkey::default(); let mut token_account = Pubkey::default(); for _ in 0..self.users[user_index].token_accounts.len() { let (_mint, _token_account) = self.users[user_index].token_accounts[self .rng .gen_range(0..self.users[user_index].token_accounts.len())]; token_account = _token_account; mint = _mint; self.rpc.get_account(_token_account).await.unwrap(); use solana_sdk::program_pack::Pack; let account = spl_token::state::Account::unpack( &self .rpc .get_account(_token_account) .await .unwrap() .unwrap() .data, ) .unwrap(); balance = account.amount; if balance != 0 { break; } } if balance != 0 { self.users[user_index] .token_accounts .push((mint, token_account)); let output_merkle_tree_account = self.get_merkle_tree_pubkeys(1); let amount = Self::safe_gen_range(&mut self.rng, 1000..balance, balance / 2); let transaction_paramets = if self.keypair_action_config.fee_assert { Some(TransactionParams { num_new_addresses: 0u8, num_input_compressed_accounts: 0u8, num_output_compressed_accounts: 1u8, compress: 0, // sol amount this is a spl compress test fee_config: FeeConfig::default(), }) } else { None }; compress_test( &self.users[user_index].keypair, &mut self.rpc, &mut self.indexer, amount, &mint, &output_merkle_tree_account[0], &token_account, transaction_paramets, false, ) .await; self.stats.spl_compress += 1; } } pub async fn decompress_spl(&mut self, user_index: usize) { let user = &self.users[user_index].keypair.pubkey(); println!("\n --------------------------------------------------\n\t\t Decompress Spl\n --------------------------------------------------"); let (mint, mut token_accounts) = self.select_random_compressed_token_accounts(user).await; if token_accounts.is_empty() { let mt_pubkeys = self.get_merkle_tree_pubkeys(1); mint_tokens_helper( &mut self.rpc, &mut self.indexer, &mt_pubkeys[0], &self.payer, &mint, vec![Self::safe_gen_range(&mut self.rng, 100_000..1_000_000, 100_000); 1], vec![*user; 1], ) .await; let (_, _token_accounts) = self.select_random_compressed_token_accounts(user).await; token_accounts = _token_accounts; } let token_account = match self.users[user_index] .token_accounts .iter() .find(|t| t.0 == mint) { Some(token_account) => token_account.1, None => { let token_account_keypair = Keypair::new(); create_token_account( &mut self.rpc, &mint, &token_account_keypair, &self.users[user_index].keypair, ) .await .unwrap(); token_account_keypair.pubkey() } }; self.users[user_index] .token_accounts .push((mint, token_account)); let output_merkle_tree_account = self.get_merkle_tree_pubkeys(1); let max_amount = token_accounts .iter() .map(|token_account| token_account.token_data.amount) .sum::<u64>(); let amount = Self::safe_gen_range(&mut self.rng, 1000..max_amount, max_amount / 2); let transaction_paramets = if self.keypair_action_config.fee_assert { Some(TransactionParams { num_new_addresses: 0u8, num_input_compressed_accounts: token_accounts.len() as u8, num_output_compressed_accounts: 1u8, compress: 0, fee_config: FeeConfig::default(), }) } else { None }; // decompress decompress_test( &self.users[user_index].keypair, &mut self.rpc, &mut self.indexer, token_accounts.clone(), amount, &output_merkle_tree_account[0], &token_account, transaction_paramets, false, ) .await; self.stats.spl_decompress += 1; } pub async fn rollover_state_merkle_tree_and_queue( &mut self, index: usize, payer: &Keypair, epoch: u64, ) -> Result<(), RpcError> { let bundle = self.indexer.get_state_merkle_trees()[index].accounts; let new_nullifier_queue_keypair = Keypair::new(); let new_merkle_tree_keypair = Keypair::new(); // TODO: move into registry program let new_cpi_signature_keypair = Keypair::new(); let fee_payer_balance = self .rpc .get_balance(&self.indexer.get_payer().pubkey()) .await .unwrap(); let rollover_signature_and_slot = perform_state_merkle_tree_roll_over_forester( payer, &mut self.rpc, &new_nullifier_queue_keypair, &new_merkle_tree_keypair, &new_cpi_signature_keypair, &bundle.merkle_tree, &bundle.nullifier_queue, epoch, false, ) .await .unwrap(); info!("Rollover signature: {:?}", rollover_signature_and_slot.0); let additional_rent = self .rpc .get_minimum_balance_for_rent_exemption( ProtocolConfig::default().cpi_context_size as usize, ) .await .unwrap(); info!("additional_rent: {:?}", additional_rent); assert_rolled_over_pair( &self.indexer.get_payer().pubkey(), &mut self.rpc, &fee_payer_balance, &bundle.merkle_tree, &bundle.nullifier_queue, &new_merkle_tree_keypair.pubkey(), &new_nullifier_queue_keypair.pubkey(), rollover_signature_and_slot.1, additional_rent, 4, ) .await; self.indexer .get_state_merkle_trees_mut() .push(StateMerkleTreeBundle { // TODO: fetch correct fee when this property is used rollover_fee: 0, accounts: StateMerkleTreeAccounts { merkle_tree: new_merkle_tree_keypair.pubkey(), nullifier_queue: new_nullifier_queue_keypair.pubkey(), cpi_context: new_cpi_signature_keypair.pubkey(), }, merkle_tree: Box::new(light_merkle_tree_reference::MerkleTree::<Poseidon>::new( STATE_MERKLE_TREE_HEIGHT as usize, STATE_MERKLE_TREE_CANOPY_DEPTH as usize, )), }); Ok(()) } pub async fn rollover_address_merkle_tree_and_queue( &mut self, index: usize, payer: &Keypair, epoch: u64, ) -> Result<(), RpcError> { let bundle = self.indexer.get_address_merkle_trees()[index].accounts; let new_nullifier_queue_keypair = Keypair::new(); let new_merkle_tree_keypair = Keypair::new(); let fee_payer_balance = self .rpc .get_balance(&self.indexer.get_payer().pubkey()) .await .unwrap(); println!("prior balance {}", fee_payer_balance); perform_address_merkle_tree_roll_over_forester( payer, &mut self.rpc, &new_nullifier_queue_keypair, &new_merkle_tree_keypair, &bundle.merkle_tree, &bundle.queue, epoch, false, ) .await?; assert_rolled_over_address_merkle_tree_and_queue( &self.indexer.get_payer().pubkey(), &mut self.rpc, &fee_payer_balance, &bundle.merkle_tree, &bundle.queue, &new_merkle_tree_keypair.pubkey(), &new_nullifier_queue_keypair.pubkey(), ) .await; self.indexer.add_address_merkle_tree_accounts( &new_merkle_tree_keypair, &new_nullifier_queue_keypair, None, ); Ok(()) } pub fn get_random_compressed_sol_accounts( &mut self, user_index: usize, ) -> Vec<CompressedAccountWithMerkleContext> { let input_compressed_accounts = self .indexer .get_compressed_accounts_by_owner(&self.users[user_index].keypair.pubkey()); let range = std::cmp::min(input_compressed_accounts.len(), 4); let number_of_compressed_accounts = Self::safe_gen_range(&mut self.rng, 0..=range, 0); input_compressed_accounts[0..number_of_compressed_accounts].to_vec() } pub fn get_compressed_sol_accounts( &self, pubkey: &Pubkey, ) -> Vec<CompressedAccountWithMerkleContext> { self.indexer.get_compressed_accounts_by_owner(pubkey) } pub fn get_merkle_tree_pubkeys(&mut self, num: u64) -> Vec<Pubkey> { let mut pubkeys = vec![]; for _ in 0..num { let range_max: usize = std::cmp::min( self.keypair_action_config .max_output_accounts .unwrap_or(self.indexer.get_state_merkle_trees().len() as u64), self.indexer.get_state_merkle_trees().len() as u64, ) as usize; let index = Self::safe_gen_range(&mut self.rng, 0..range_max, 0); pubkeys.push( self.indexer.get_state_merkle_trees()[index] .accounts .merkle_tree, ); } pubkeys.sort(); pubkeys } pub fn get_address_merkle_tree_pubkeys(&mut self, num: u64) -> (Vec<Pubkey>, Vec<Pubkey>) { let mut pubkeys = vec![]; let mut queue_pubkeys = vec![]; for _ in 0..num { let index = Self::safe_gen_range( &mut self.rng, 0..self.indexer.get_address_merkle_trees().len(), 0, ); pubkeys.push( self.indexer.get_address_merkle_trees()[index] .accounts .merkle_tree, ); queue_pubkeys.push( self.indexer.get_address_merkle_trees()[index] .accounts .queue, ); } (pubkeys, queue_pubkeys) } pub async fn select_random_compressed_token_accounts( &mut self, user: &Pubkey, ) -> (Pubkey, Vec<TokenDataWithContext>) { self.select_random_compressed_token_accounts_delegated(user, false, None, false) .await } pub async fn select_random_compressed_token_accounts_frozen( &mut self, user: &Pubkey, ) -> (Pubkey, Vec<TokenDataWithContext>) { self.select_random_compressed_token_accounts_delegated(user, false, None, true) .await } pub async fn select_random_compressed_token_accounts_delegated( &mut self, user: &Pubkey, delegated: bool, delegate: Option<Pubkey>, frozen: bool, ) -> (Pubkey, Vec<TokenDataWithContext>) { let user_token_accounts = &mut self.indexer.get_compressed_token_accounts_by_owner(user); // clean up dust so that we don't run into issues that account balances are too low user_token_accounts.retain(|t| t.token_data.amount > 1000); let mut token_accounts_with_mint; let mint; if user_token_accounts.is_empty() { mint = self.indexer.get_token_compressed_accounts()[self .rng .gen_range(0..self.indexer.get_token_compressed_accounts().len())] .token_data .mint; let number_of_compressed_accounts = Self::safe_gen_range(&mut self.rng, 1..8, 1); let mt_pubkey = self.indexer.get_state_merkle_trees()[0] .accounts .merkle_tree; mint_tokens_helper( &mut self.rpc, &mut self.indexer, &mt_pubkey, &self.payer, &mint, vec![ Self::safe_gen_range(&mut self.rng, 100_000..1_000_000, 100_000); number_of_compressed_accounts ], vec![*user; number_of_compressed_accounts], ) .await; token_accounts_with_mint = self .indexer .get_compressed_token_accounts_by_owner(user) .iter() .filter(|token_account| token_account.token_data.mint == mint) .cloned() .collect::<Vec<_>>(); } else { mint = user_token_accounts [Self::safe_gen_range(&mut self.rng, 0..user_token_accounts.len(), 0)] .token_data .mint; token_accounts_with_mint = user_token_accounts .iter() .filter(|token_account| token_account.token_data.mint == mint) .map(|token_account| (*token_account).clone()) .collect::<Vec<TokenDataWithContext>>(); } if delegated { token_accounts_with_mint = token_accounts_with_mint .iter() .filter(|token_account| token_account.token_data.delegate.is_some()) .map(|token_account| (*token_account).clone()) .collect::<Vec<TokenDataWithContext>>(); if token_accounts_with_mint.is_empty() { return (mint, Vec::new()); } } if let Some(delegate) = delegate { token_accounts_with_mint = token_accounts_with_mint .iter() .filter(|token_account| token_account.token_data.delegate.unwrap() == delegate) .map(|token_account| (*token_account).clone()) .collect::<Vec<TokenDataWithContext>>(); } if frozen { token_accounts_with_mint = token_accounts_with_mint .iter() .filter(|token_account| token_account.token_data.state == AccountState::Frozen) .map(|token_account| (*token_account).clone()) .collect::<Vec<TokenDataWithContext>>(); if token_accounts_with_mint.is_empty() { return (mint, Vec::new()); } } else { token_accounts_with_mint = token_accounts_with_mint .iter() .filter(|token_account| token_account.token_data.state == AccountState::Initialized) .map(|token_account| (*token_account).clone()) .collect::<Vec<TokenDataWithContext>>(); } let range_end = if token_accounts_with_mint.len() == 1 { 1 } else if !token_accounts_with_mint.is_empty() { self.rng .gen_range(1..std::cmp::min(token_accounts_with_mint.len(), 4)) } else { return (mint, Vec::new()); }; let mut get_random_subset_of_token_accounts = token_accounts_with_mint[0..range_end].to_vec(); // Sorting input and output Merkle tree pubkeys the same way so the pubkey indices do not get out of order get_random_subset_of_token_accounts.sort_by(|a, b| { a.compressed_account .merkle_context .merkle_tree_pubkey .cmp(&b.compressed_account.merkle_context.merkle_tree_pubkey) }); (mint, get_random_subset_of_token_accounts) } } // Configures probabilities for keypair actions // default sol configuration is all sol actions enabled with 0.5 probability pub struct KeypairActionConfig { pub compress_sol: Option<f64>, pub decompress_sol: Option<f64>, pub transfer_sol: Option<f64>, pub create_address: Option<f64>, pub compress_spl: Option<f64>, pub decompress_spl: Option<f64>, pub mint_spl: Option<f64>, pub transfer_spl: Option<f64>, pub max_output_accounts: Option<u64>, pub fee_assert: bool, pub approve_spl: Option<f64>, pub revoke_spl: Option<f64>, pub freeze_spl: Option<f64>, pub thaw_spl: Option<f64>, pub burn_spl: Option<f64>, } impl KeypairActionConfig { pub fn prover_config(&self) -> ProverConfig { let mut config = ProverConfig { run_mode: None, circuits: vec![], }; if self.inclusion() { config.circuits.push(ProofType::Inclusion); } if self.non_inclusion() { config.circuits.push(ProofType::NonInclusion); } config } pub fn inclusion(&self) -> bool { self.transfer_sol.is_some() || self.transfer_spl.is_some() } pub fn non_inclusion(&self) -> bool { self.create_address.is_some() } pub fn sol_default() -> Self { Self { compress_sol: Some(0.5), decompress_sol: Some(0.5), transfer_sol: Some(0.5), create_address: None, compress_spl: None, decompress_spl: None, mint_spl: None, transfer_spl: None, max_output_accounts: None, fee_assert: true, approve_spl: None, revoke_spl: None, freeze_spl: None, thaw_spl: None, burn_spl: None, } } pub fn spl_default() -> Self { Self { compress_sol: None, decompress_sol: None, transfer_sol: None, create_address: None, compress_spl: Some(0.7), decompress_spl: Some(0.5), mint_spl: None, transfer_spl: Some(0.5), max_output_accounts: Some(10), fee_assert: true, approve_spl: Some(0.5), revoke_spl: Some(0.5), freeze_spl: Some(0.5), thaw_spl: Some(0.5), burn_spl: Some(0.5), } } pub fn all_default() -> Self { Self { compress_sol: Some(0.5), decompress_sol: Some(1.0), transfer_sol: Some(1.0), create_address: Some(0.2), compress_spl: Some(0.7), decompress_spl: Some(0.5), mint_spl: None, transfer_spl: Some(0.5), max_output_accounts: Some(10), fee_assert: true, approve_spl: Some(0.7), revoke_spl: Some(0.7), freeze_spl: Some(0.7), thaw_spl: Some(0.7), burn_spl: Some(0.7), } } pub fn all_default_no_fee_assert() -> Self { Self { compress_sol: Some(0.5), decompress_sol: Some(1.0), transfer_sol: Some(1.0), create_address: Some(0.2), compress_spl: Some(0.7), decompress_spl: Some(0.5), mint_spl: None, transfer_spl: Some(0.5), max_output_accounts: Some(10), fee_assert: false, approve_spl: Some(0.7), revoke_spl: Some(0.7), freeze_spl: Some(0.7), thaw_spl: Some(0.7), burn_spl: Some(0.7), } } pub fn test_default() -> Self { Self { compress_sol: Some(1.0), decompress_sol: Some(1.0), transfer_sol: Some(1.0), create_address: Some(1.0), compress_spl: Some(0.0), decompress_spl: Some(0.0), mint_spl: None, transfer_spl: Some(0.0), max_output_accounts: Some(10), fee_assert: true, approve_spl: None, revoke_spl: None, freeze_spl: None, thaw_spl: None, burn_spl: None, } } pub fn test_forester_default() -> Self { Self { compress_sol: Some(0.0), decompress_sol: Some(0.0), transfer_sol: Some(1.0), create_address: None, compress_spl: None, decompress_spl: None, mint_spl: None, transfer_spl: None, max_output_accounts: Some(3), fee_assert: true, approve_spl: None, revoke_spl: None, freeze_spl: None, thaw_spl: None, burn_spl: None, } } } // Configures probabilities for general actions pub struct GeneralActionConfig { pub add_keypair: Option<f64>, pub create_state_mt: Option<f64>, pub create_address_mt: Option<f64>, pub nullify_compressed_accounts: Option<f64>, pub empty_address_queue: Option<f64>, pub rollover: Option<f64>, pub add_forester: Option<f64>, /// TODO: add this /// Creates one infinte epoch pub disable_epochs: bool, } impl Default for GeneralActionConfig { fn default() -> Self { Self { add_keypair: Some(0.3), create_state_mt: Some(1.0), create_address_mt: Some(1.0), nullify_compressed_accounts: Some(0.2), empty_address_queue: Some(0.2), rollover: None, add_forester: None, disable_epochs: false, } } } impl GeneralActionConfig { pub fn test_forester_default() -> Self { Self { add_keypair: None, create_state_mt: None, create_address_mt: None, nullify_compressed_accounts: None, empty_address_queue: None, rollover: None, add_forester: None, disable_epochs: false, } } pub fn test_with_rollover() -> Self { Self { add_keypair: Some(0.3), create_state_mt: Some(1.0), create_address_mt: Some(1.0), nullify_compressed_accounts: Some(0.2), empty_address_queue: Some(0.2), rollover: Some(0.5), add_forester: None, disable_epochs: false, } } }
0
solana_public_repos/Lightprotocol/light-protocol/test-utils
solana_public_repos/Lightprotocol/light-protocol/test-utils/src/state_tree_rollover.rs
#![allow(clippy::await_holding_refcell_ref)] use crate::assert_rollover::{ assert_rolledover_merkle_trees, assert_rolledover_merkle_trees_metadata, assert_rolledover_queues_metadata, }; use account_compression::NullifierQueueConfig; use account_compression::{ self, initialize_address_merkle_tree::AccountLoader, state::QueueAccount, StateMerkleTreeAccount, StateMerkleTreeConfig, ID, }; use anchor_lang::{InstructionData, Lamports, ToAccountMetas}; use forester_utils::{create_account_instruction, get_hash_set}; use light_client::rpc::errors::RpcError; use light_client::rpc::RpcConnection; use light_concurrent_merkle_tree::{ copy::ConcurrentMerkleTreeCopy, zero_copy::ConcurrentMerkleTreeZeroCopyMut, }; use light_hasher::Poseidon; use solana_sdk::clock::Slot; use solana_sdk::{ account::AccountSharedData, account_info::AccountInfo, instruction::{AccountMeta, Instruction}, signature::{Keypair, Signer}, transaction::Transaction, }; use solana_sdk::{account::WritableAccount, pubkey::Pubkey}; use std::mem; pub enum StateMerkleTreeRolloverMode { QueueInvalidSize, TreeInvalidSize, } #[allow(clippy::too_many_arguments)] pub async fn perform_state_merkle_tree_roll_over<R: RpcConnection>( rpc: &mut R, new_nullifier_queue_keypair: &Keypair, new_state_merkle_tree_keypair: &Keypair, merkle_tree_pubkey: &Pubkey, nullifier_queue_pubkey: &Pubkey, merkle_tree_config: &StateMerkleTreeConfig, queue_config: &NullifierQueueConfig, mode: Option<StateMerkleTreeRolloverMode>, ) -> Result<(solana_sdk::signature::Signature, Slot), RpcError> { let payer_pubkey = rpc.get_payer().pubkey(); let mut size = QueueAccount::size(queue_config.capacity as usize).unwrap(); if let Some(StateMerkleTreeRolloverMode::QueueInvalidSize) = mode { size += 1; } let create_nullifier_queue_instruction = create_account_instruction( &payer_pubkey, size, rpc.get_minimum_balance_for_rent_exemption(size) .await .unwrap(), &ID, Some(new_nullifier_queue_keypair), ); let mut state_tree_size = account_compression::state::StateMerkleTreeAccount::size( merkle_tree_config.height as usize, merkle_tree_config.changelog_size as usize, merkle_tree_config.roots_size as usize, merkle_tree_config.canopy_depth as usize, ); if let Some(StateMerkleTreeRolloverMode::TreeInvalidSize) = mode { state_tree_size += 1; } let create_state_merkle_tree_instruction = create_account_instruction( &payer_pubkey, state_tree_size, rpc.get_minimum_balance_for_rent_exemption(state_tree_size) .await .unwrap(), &ID, Some(new_state_merkle_tree_keypair), ); let instruction_data = account_compression::instruction::RolloverStateMerkleTreeAndNullifierQueue {}; let accounts = account_compression::accounts::RolloverStateMerkleTreeAndNullifierQueue { fee_payer: rpc.get_payer().pubkey(), authority: rpc.get_payer().pubkey(), registered_program_pda: None, new_state_merkle_tree: new_state_merkle_tree_keypair.pubkey(), new_nullifier_queue: new_nullifier_queue_keypair.pubkey(), old_state_merkle_tree: *merkle_tree_pubkey, old_nullifier_queue: *nullifier_queue_pubkey, }; let instruction = Instruction { program_id: account_compression::ID, accounts: [ accounts.to_account_metas(Some(true)), vec![AccountMeta::new(*merkle_tree_pubkey, false)], ] .concat(), data: instruction_data.data(), }; let blockhash = rpc.get_latest_blockhash().await.unwrap(); let transaction = Transaction::new_signed_with_payer( &[ create_nullifier_queue_instruction, create_state_merkle_tree_instruction, instruction, ], Some(&rpc.get_payer().pubkey()), &vec![ &rpc.get_payer(), &new_nullifier_queue_keypair, &new_state_merkle_tree_keypair, ], blockhash, ); rpc.process_transaction_with_context(transaction).await } pub async fn set_state_merkle_tree_next_index<R: RpcConnection>( rpc: &mut R, merkle_tree_pubkey: &Pubkey, next_index: u64, lamports: u64, ) { let mut merkle_tree = rpc.get_account(*merkle_tree_pubkey).await.unwrap().unwrap(); { let merkle_tree_deserialized = &mut ConcurrentMerkleTreeZeroCopyMut::<Poseidon, 26>::from_bytes_zero_copy_mut( &mut merkle_tree.data[8 + std::mem::size_of::<StateMerkleTreeAccount>()..], ) .unwrap(); unsafe { *merkle_tree_deserialized.next_index = next_index as usize; } } let mut account_share_data = AccountSharedData::from(merkle_tree); account_share_data.set_lamports(lamports); rpc.set_account(merkle_tree_pubkey, &account_share_data); let mut merkle_tree = rpc.get_account(*merkle_tree_pubkey).await.unwrap().unwrap(); let merkle_tree_deserialized = ConcurrentMerkleTreeZeroCopyMut::<Poseidon, 26>::from_bytes_zero_copy_mut( &mut merkle_tree.data[8 + std::mem::size_of::<StateMerkleTreeAccount>()..], ) .unwrap(); assert_eq!(merkle_tree_deserialized.next_index() as u64, next_index); } #[allow(clippy::too_many_arguments)] pub async fn assert_rolled_over_pair<R: RpcConnection>( payer: &Pubkey, rpc: &mut R, fee_payer_prior_balance: &u64, old_merkle_tree_pubkey: &Pubkey, old_nullifier_queue_pubkey: &Pubkey, new_merkle_tree_pubkey: &Pubkey, new_nullifier_queue_pubkey: &Pubkey, current_slot: u64, additional_rent: u64, num_signatures: u64, ) { let mut new_mt_account = rpc .get_account(*new_merkle_tree_pubkey) .await .unwrap() .unwrap(); let mut new_mt_lamports = 0u64; let old_account_info = AccountInfo::new( new_merkle_tree_pubkey, false, false, &mut new_mt_lamports, &mut new_mt_account.data, &ID, false, 0u64, ); let new_mt_account = AccountLoader::<StateMerkleTreeAccount>::try_from(&old_account_info).unwrap(); let new_loaded_mt_account = new_mt_account.load().unwrap(); let mut old_mt_account = rpc .get_account(*old_merkle_tree_pubkey) .await .unwrap() .unwrap(); let mut old_mt_lamports = 0u64; let new_account_info = AccountInfo::new( old_merkle_tree_pubkey, false, false, &mut old_mt_lamports, &mut old_mt_account.data, &account_compression::ID, false, 0u64, ); let old_mt_account = AccountLoader::<StateMerkleTreeAccount>::try_from(&new_account_info).unwrap(); let old_loaded_mt_account = old_mt_account.load().unwrap(); assert_rolledover_merkle_trees_metadata( &old_loaded_mt_account.metadata, &new_loaded_mt_account.metadata, current_slot, new_nullifier_queue_pubkey, ); let old_mt_data = old_account_info.try_borrow_data().unwrap(); let old_mt = ConcurrentMerkleTreeCopy::<Poseidon, 26>::from_bytes_copy( &old_mt_data[8 + mem::size_of::<StateMerkleTreeAccount>()..], ) .unwrap(); let new_mt_data = new_account_info.try_borrow_data().unwrap(); let new_mt = ConcurrentMerkleTreeCopy::<Poseidon, 26>::from_bytes_copy( &new_mt_data[8 + mem::size_of::<StateMerkleTreeAccount>()..], ) .unwrap(); assert_rolledover_merkle_trees(&old_mt, &new_mt); { let mut new_queue_account = rpc .get_account(*new_nullifier_queue_pubkey) .await .unwrap() .unwrap(); let mut new_mt_lamports = 0u64; let account_info = AccountInfo::new( new_nullifier_queue_pubkey, false, false, &mut new_mt_lamports, &mut new_queue_account.data, &account_compression::ID, false, 0u64, ); let new_queue_account = AccountLoader::<QueueAccount>::try_from(&account_info).unwrap(); let new_loaded_queue_account = new_queue_account.load().unwrap(); let mut old_queue_account = rpc .get_account(*old_nullifier_queue_pubkey) .await .unwrap() .unwrap(); let mut old_mt_lamports = 0u64; let account_info = AccountInfo::new( old_nullifier_queue_pubkey, false, false, &mut old_mt_lamports, &mut old_queue_account.data, &account_compression::ID, false, 0u64, ); let old_queue_account = AccountLoader::<QueueAccount>::try_from(&account_info).unwrap(); let old_loaded_queue_account = old_queue_account.load().unwrap(); assert_rolledover_queues_metadata( &old_loaded_queue_account.metadata, &new_loaded_queue_account.metadata, current_slot, new_merkle_tree_pubkey, new_nullifier_queue_pubkey, old_mt_account.get_lamports(), new_mt_account.get_lamports(), new_queue_account.get_lamports(), ); } let fee_payer_post_balance = rpc.get_account(*payer).await.unwrap().unwrap().lamports; // rent is reimbursed, 3 signatures cost 3 x 5000 lamports assert_eq!( *fee_payer_prior_balance, fee_payer_post_balance + 5000 * num_signatures + additional_rent ); let old_address_queue = unsafe { get_hash_set::<QueueAccount, R>(rpc, *old_nullifier_queue_pubkey).await }; let new_address_queue = unsafe { get_hash_set::<QueueAccount, R>(rpc, *new_nullifier_queue_pubkey).await }; assert_eq!( old_address_queue.get_capacity(), new_address_queue.get_capacity() ); assert_eq!( old_address_queue.sequence_threshold, new_address_queue.sequence_threshold, ); }
0
solana_public_repos/Lightprotocol/light-protocol/test-utils
solana_public_repos/Lightprotocol/light-protocol/test-utils/src/system_program.rs
use forester_utils::indexer::Indexer; use light_hasher::Poseidon; use light_system_program::sdk::event::PublicTransactionEvent; use light_system_program::{ sdk::{ address::derive_address, compressed_account::{ CompressedAccount, CompressedAccountWithMerkleContext, MerkleContext, }, invoke::{create_invoke_instruction, get_sol_pool_pda}, }, NewAddressParams, }; use solana_sdk::signature::Signature; use solana_sdk::{ pubkey::Pubkey, signature::{Keypair, Signer}, }; use crate::assert_compressed_tx::{ assert_compressed_transaction, get_merkle_tree_snapshots, AssertCompressedTransactionInputs, }; use light_client::rpc::errors::RpcError; use light_client::rpc::RpcConnection; use light_client::transaction_params::TransactionParams; #[allow(clippy::too_many_arguments)] pub async fn create_addresses_test<R: RpcConnection, I: Indexer<R>>( rpc: &mut R, test_indexer: &mut I, address_merkle_tree_pubkeys: &[Pubkey], address_merkle_tree_queue_pubkeys: &[Pubkey], mut output_merkle_tree_pubkeys: Vec<Pubkey>, address_seeds: &[[u8; 32]], input_compressed_accounts: &[CompressedAccountWithMerkleContext], create_out_compressed_accounts_for_input_compressed_accounts: bool, transaction_params: Option<TransactionParams>, ) -> Result<(), RpcError> { if address_merkle_tree_pubkeys.len() != address_seeds.len() { panic!("address_merkle_tree_pubkeys and address_seeds length mismatch for create_addresses_test"); } let mut derived_addresses = Vec::new(); for (i, address_seed) in address_seeds.iter().enumerate() { let derived_address = derive_address(&address_merkle_tree_pubkeys[i], address_seed).unwrap(); println!("derived_address: {:?}", derived_address); derived_addresses.push(derived_address); } let mut address_params = Vec::new(); for (i, seed) in address_seeds.iter().enumerate() { let new_address_params = NewAddressParams { address_queue_pubkey: address_merkle_tree_queue_pubkeys[i], address_merkle_tree_pubkey: address_merkle_tree_pubkeys[i], seed: *seed, address_merkle_tree_root_index: 0, }; address_params.push(new_address_params); } let mut output_compressed_accounts = Vec::new(); for address in derived_addresses.iter() { output_compressed_accounts.push(CompressedAccount { lamports: 0, owner: rpc.get_payer().pubkey(), data: None, address: Some(*address), }); } if create_out_compressed_accounts_for_input_compressed_accounts { for compressed_account in input_compressed_accounts.iter() { output_compressed_accounts.push(CompressedAccount { lamports: 0, owner: rpc.get_payer().pubkey(), data: None, address: compressed_account.compressed_account.address, }); output_merkle_tree_pubkeys.push(compressed_account.merkle_context.merkle_tree_pubkey); } } let payer = rpc.get_payer().insecure_clone(); let inputs = CompressedTransactionTestInputs { rpc, test_indexer, fee_payer: &payer, authority: &payer, input_compressed_accounts, output_compressed_accounts: output_compressed_accounts.as_slice(), output_merkle_tree_pubkeys: output_merkle_tree_pubkeys.as_slice(), transaction_params, relay_fee: None, compress_or_decompress_lamports: None, is_compress: false, new_address_params: &address_params, sorted_output_accounts: false, created_addresses: Some(derived_addresses.as_slice()), recipient: None, }; compressed_transaction_test(inputs).await?; Ok(()) } #[allow(clippy::too_many_arguments)] pub async fn compress_sol_test<R: RpcConnection, I: Indexer<R>>( rpc: &mut R, test_indexer: &mut I, authority: &Keypair, input_compressed_accounts: &[CompressedAccountWithMerkleContext], create_out_compressed_accounts_for_input_compressed_accounts: bool, compress_amount: u64, output_merkle_tree_pubkey: &Pubkey, transaction_params: Option<TransactionParams>, ) -> Result<(), RpcError> { let input_lamports = if input_compressed_accounts.is_empty() { 0 } else { input_compressed_accounts .iter() .map(|x| x.compressed_account.lamports) .sum::<u64>() }; let mut output_compressed_accounts = Vec::new(); output_compressed_accounts.push(CompressedAccount { lamports: input_lamports + compress_amount, owner: authority.pubkey(), data: None, address: None, }); let mut output_merkle_tree_pubkeys = vec![*output_merkle_tree_pubkey]; if create_out_compressed_accounts_for_input_compressed_accounts { for compressed_account in input_compressed_accounts.iter() { output_compressed_accounts.push(CompressedAccount { lamports: 0, owner: authority.pubkey(), data: None, address: compressed_account.compressed_account.address, }); output_merkle_tree_pubkeys.push(compressed_account.merkle_context.merkle_tree_pubkey); } } let inputs = CompressedTransactionTestInputs { rpc, test_indexer, fee_payer: authority, authority, input_compressed_accounts, output_compressed_accounts: output_compressed_accounts.as_slice(), output_merkle_tree_pubkeys: &[*output_merkle_tree_pubkey], transaction_params, relay_fee: None, compress_or_decompress_lamports: Some(compress_amount), is_compress: true, new_address_params: &[], sorted_output_accounts: false, created_addresses: None, recipient: None, }; compressed_transaction_test(inputs).await?; Ok(()) } #[allow(clippy::too_many_arguments)] pub async fn decompress_sol_test<R: RpcConnection, I: Indexer<R>>( rpc: &mut R, test_indexer: &mut I, authority: &Keypair, input_compressed_accounts: &[CompressedAccountWithMerkleContext], recipient: &Pubkey, decompress_amount: u64, output_merkle_tree_pubkey: &Pubkey, transaction_params: Option<TransactionParams>, ) -> Result<(), RpcError> { let input_lamports = input_compressed_accounts .iter() .map(|x| x.compressed_account.lamports) .sum::<u64>(); let output_compressed_accounts = vec![CompressedAccount { lamports: input_lamports - decompress_amount, owner: rpc.get_payer().pubkey(), data: None, address: None, }]; let payer = rpc.get_payer().insecure_clone(); let inputs = CompressedTransactionTestInputs { rpc, test_indexer, fee_payer: &payer, authority, input_compressed_accounts, output_compressed_accounts: output_compressed_accounts.as_slice(), output_merkle_tree_pubkeys: &[*output_merkle_tree_pubkey], transaction_params, relay_fee: None, compress_or_decompress_lamports: Some(decompress_amount), is_compress: false, new_address_params: &[], sorted_output_accounts: false, created_addresses: None, recipient: Some(*recipient), }; compressed_transaction_test(inputs).await?; Ok(()) } #[allow(clippy::too_many_arguments)] pub async fn transfer_compressed_sol_test<R: RpcConnection, I: Indexer<R>>( rpc: &mut R, test_indexer: &mut I, authority: &Keypair, input_compressed_accounts: &[CompressedAccountWithMerkleContext], recipients: &[Pubkey], output_merkle_tree_pubkeys: &[Pubkey], transaction_params: Option<TransactionParams>, ) -> Result<Signature, RpcError> { if recipients.len() != output_merkle_tree_pubkeys.len() { panic!("recipients and output_merkle_tree_pubkeys length mismatch for transfer_compressed_sol_test"); } if input_compressed_accounts.is_empty() { panic!("input_compressed_accounts is empty for transfer_compressed_sol_test"); } let input_lamports = input_compressed_accounts .iter() .map(|x| x.compressed_account.lamports) .sum::<u64>(); let mut output_compressed_accounts = Vec::new(); let mut output_merkle_tree_pubkeys = output_merkle_tree_pubkeys.to_vec(); output_merkle_tree_pubkeys.sort(); let input_addresses = input_compressed_accounts .iter() .map(|x| x.compressed_account.address) .collect::<Vec<_>>(); for (i, _) in output_merkle_tree_pubkeys.iter().enumerate() { let address = if i < input_addresses.len() { input_addresses[i] } else { None }; let mut lamports = input_lamports / output_merkle_tree_pubkeys.len() as u64; if i == 0 { lamports += input_lamports % output_merkle_tree_pubkeys.len() as u64; } output_compressed_accounts.push(CompressedAccount { lamports, owner: recipients[i], data: None, address, }); } let payer = rpc.get_payer().insecure_clone(); let inputs = CompressedTransactionTestInputs { rpc, test_indexer, fee_payer: &payer, authority, input_compressed_accounts, output_compressed_accounts: output_compressed_accounts.as_slice(), output_merkle_tree_pubkeys: output_merkle_tree_pubkeys.as_slice(), transaction_params, relay_fee: None, compress_or_decompress_lamports: None, is_compress: false, new_address_params: &[], sorted_output_accounts: false, created_addresses: None, recipient: None, }; compressed_transaction_test(inputs).await } pub struct CompressedTransactionTestInputs<'a, R: RpcConnection, I: Indexer<R>> { rpc: &'a mut R, test_indexer: &'a mut I, fee_payer: &'a Keypair, authority: &'a Keypair, input_compressed_accounts: &'a [CompressedAccountWithMerkleContext], output_compressed_accounts: &'a [CompressedAccount], output_merkle_tree_pubkeys: &'a [Pubkey], transaction_params: Option<TransactionParams>, relay_fee: Option<u64>, compress_or_decompress_lamports: Option<u64>, is_compress: bool, new_address_params: &'a [NewAddressParams], sorted_output_accounts: bool, created_addresses: Option<&'a [[u8; 32]]>, recipient: Option<Pubkey>, } #[allow(clippy::too_many_arguments)] pub async fn compressed_transaction_test<R: RpcConnection, I: Indexer<R>>( inputs: CompressedTransactionTestInputs<'_, R, I>, ) -> Result<Signature, RpcError> { let mut compressed_account_hashes = Vec::new(); let compressed_account_input_hashes = if !inputs.input_compressed_accounts.is_empty() { for compressed_account in inputs.input_compressed_accounts.iter() { compressed_account_hashes.push( compressed_account .compressed_account .hash::<Poseidon>( &compressed_account.merkle_context.merkle_tree_pubkey, &compressed_account.merkle_context.leaf_index, ) .unwrap(), ); } Some(compressed_account_hashes.as_slice()) } else { None }; let state_input_merkle_trees = inputs .input_compressed_accounts .iter() .map(|x| x.merkle_context.merkle_tree_pubkey) .collect::<Vec<Pubkey>>(); let state_input_merkle_trees = if state_input_merkle_trees.is_empty() { None } else { Some(state_input_merkle_trees.as_slice()) }; let mut root_indices = Vec::new(); let mut proof = None; let mut input_merkle_tree_snapshots = Vec::new(); let mut address_params = Vec::new(); if !inputs.input_compressed_accounts.is_empty() || !inputs.new_address_params.is_empty() { let address_merkle_tree_pubkeys = if inputs.new_address_params.is_empty() { None } else { Some( inputs .new_address_params .iter() .map(|x| x.address_merkle_tree_pubkey) .collect::<Vec<_>>(), ) }; let proof_rpc_res = inputs .test_indexer .create_proof_for_compressed_accounts( compressed_account_input_hashes, state_input_merkle_trees, inputs.created_addresses, address_merkle_tree_pubkeys, inputs.rpc, ) .await; root_indices = proof_rpc_res.root_indices; proof = Some(proof_rpc_res.proof); let input_merkle_tree_accounts = inputs .test_indexer .get_state_merkle_tree_accounts(state_input_merkle_trees.unwrap_or(&[])); input_merkle_tree_snapshots = get_merkle_tree_snapshots::<R>(inputs.rpc, input_merkle_tree_accounts.as_slice()).await; if !inputs.new_address_params.is_empty() { for (i, input_address_params) in inputs.new_address_params.iter().enumerate() { address_params.push(input_address_params.clone()); address_params[i].address_merkle_tree_root_index = proof_rpc_res.address_root_indices[i]; } } } let output_merkle_tree_accounts = inputs .test_indexer .get_state_merkle_tree_accounts(inputs.output_merkle_tree_pubkeys); let output_merkle_tree_snapshots = get_merkle_tree_snapshots::<R>(inputs.rpc, output_merkle_tree_accounts.as_slice()).await; let instruction = create_invoke_instruction( &inputs.fee_payer.pubkey(), &inputs.authority.pubkey().clone(), inputs .input_compressed_accounts .iter() .map(|x| x.compressed_account.clone()) .collect::<Vec<CompressedAccount>>() .as_slice(), inputs.output_compressed_accounts, inputs .input_compressed_accounts .iter() .map(|x| x.merkle_context) .collect::<Vec<MerkleContext>>() .as_slice(), inputs.output_merkle_tree_pubkeys, &root_indices, &address_params, proof, inputs.compress_or_decompress_lamports, inputs.is_compress, inputs.recipient, true, ); let mut recipient_balance_pre = 0; let mut compressed_sol_pda_balance_pre = 0; if inputs.compress_or_decompress_lamports.is_some() { compressed_sol_pda_balance_pre = match inputs.rpc.get_account(get_sol_pool_pda()).await.unwrap() { Some(account) => account.lamports, None => 0, }; } if inputs.recipient.is_some() { // TODO: assert sender balance after fee refactor recipient_balance_pre = match inputs .rpc .get_account(inputs.recipient.unwrap()) .await .unwrap() { Some(account) => account.lamports, None => 0, }; } let event = inputs .rpc .create_and_send_transaction_with_event::<PublicTransactionEvent>( &[instruction], &inputs.fee_payer.pubkey(), &[inputs.fee_payer, inputs.authority], inputs.transaction_params, ) .await? .unwrap(); let (created_output_compressed_accounts, _) = inputs .test_indexer .add_event_and_compressed_accounts(&event.0); let input = AssertCompressedTransactionInputs { rpc: inputs.rpc, test_indexer: inputs.test_indexer, output_compressed_accounts: inputs.output_compressed_accounts, created_output_compressed_accounts: created_output_compressed_accounts.as_slice(), event: &event.0, input_merkle_tree_snapshots: input_merkle_tree_snapshots.as_slice(), output_merkle_tree_snapshots: output_merkle_tree_snapshots.as_slice(), recipient_balance_pre, compress_or_decompress_lamports: inputs.compress_or_decompress_lamports, is_compress: inputs.is_compress, compressed_sol_pda_balance_pre, compression_recipient: inputs.recipient, created_addresses: inputs.created_addresses.unwrap_or(&[]), sorted_output_accounts: inputs.sorted_output_accounts, relay_fee: inputs.relay_fee, input_compressed_account_hashes: &compressed_account_hashes, address_queue_pubkeys: &inputs .new_address_params .iter() .map(|x| x.address_queue_pubkey) .collect::<Vec<Pubkey>>(), }; assert_compressed_transaction(input).await; Ok(event.1) }
0
solana_public_repos/Lightprotocol/light-protocol/test-utils/src
solana_public_repos/Lightprotocol/light-protocol/test-utils/src/indexer/mod.rs
pub mod test_indexer; pub use test_indexer::TestIndexer;
0
solana_public_repos/Lightprotocol/light-protocol/test-utils/src
solana_public_repos/Lightprotocol/light-protocol/test-utils/src/indexer/test_indexer.rs
use log::{debug, info, warn}; use num_bigint::BigUint; use solana_sdk::bs58; use std::marker::PhantomData; use std::sync::{Arc, Mutex}; use crate::create_address_merkle_tree_and_queue_account_with_assert; use crate::e2e_test_env::KeypairActionConfig; use crate::spl::create_initialize_mint_instructions; use account_compression::{ AddressMerkleTreeConfig, AddressQueueConfig, NullifierQueueConfig, StateMerkleTreeConfig, }; use forester_utils::indexer::{ AddressMerkleTreeAccounts, AddressMerkleTreeBundle, Indexer, IndexerError, MerkleProof, NewAddressProofWithContext, ProofRpcResult, StateMerkleTreeAccounts, StateMerkleTreeBundle, TokenDataWithContext, }; use forester_utils::{get_concurrent_merkle_tree, get_indexed_merkle_tree}; use light_client::rpc::RpcConnection; use light_client::transaction_params::FeeConfig; use light_compressed_token::constants::TOKEN_COMPRESSED_ACCOUNT_DISCRIMINATOR; use light_compressed_token::mint_sdk::create_create_token_pool_instruction; use light_compressed_token::{get_token_pool_pda, TokenData}; use light_program_test::test_env::{create_state_merkle_tree_and_queue_account, EnvAccounts}; use light_prover_client::gnark::helpers::{ProverConfig, ProverMode}; use light_utils::bigint::bigint_to_be_bytes_array; use { account_compression::{ utils::constants::{STATE_MERKLE_TREE_CANOPY_DEPTH, STATE_MERKLE_TREE_HEIGHT}, AddressMerkleTreeAccount, StateMerkleTreeAccount, }, anchor_lang::AnchorDeserialize, light_hasher::Poseidon, light_indexed_merkle_tree::{array::IndexedArray, reference::IndexedMerkleTree}, light_merkle_tree_reference::MerkleTree, light_prover_client::{ gnark::{ combined_json_formatter::CombinedJsonStruct, constants::{PROVE_PATH, SERVER_ADDRESS}, helpers::spawn_prover, inclusion_json_formatter::BatchInclusionJsonStruct, non_inclusion_json_formatter::BatchNonInclusionJsonStruct, proof_helpers::{compress_proof, deserialize_gnark_proof_json, proof_from_json_struct}, }, inclusion::merkle_inclusion_proof_inputs::{ InclusionMerkleProofInputs, InclusionProofInputs, }, non_inclusion::merkle_non_inclusion_proof_inputs::{ get_non_inclusion_proof_inputs, NonInclusionProofInputs, }, }, light_system_program::{ invoke::processor::CompressedProof, sdk::{ compressed_account::{CompressedAccountWithMerkleContext, MerkleContext}, event::PublicTransactionEvent, }, }, num_bigint::BigInt, num_traits::ops::bytes::FromBytes, reqwest::Client, solana_sdk::{ instruction::Instruction, program_pack::Pack, pubkey::Pubkey, signature::Keypair, signer::Signer, }, spl_token::instruction::initialize_mint, std::time::Duration, }; // TODO: find a different way to init Indexed array on the heap so that it doesn't break the stack #[derive(Debug)] pub struct TestIndexer<R: RpcConnection> { pub state_merkle_trees: Vec<StateMerkleTreeBundle>, pub address_merkle_trees: Vec<AddressMerkleTreeBundle>, pub payer: Keypair, pub group_pda: Pubkey, pub compressed_accounts: Vec<CompressedAccountWithMerkleContext>, pub nullified_compressed_accounts: Vec<CompressedAccountWithMerkleContext>, pub token_compressed_accounts: Vec<TokenDataWithContext>, pub token_nullified_compressed_accounts: Vec<TokenDataWithContext>, pub events: Vec<PublicTransactionEvent>, pub prover_config: Option<ProverConfig>, phantom: PhantomData<R>, } impl<R: RpcConnection + Send + Sync + 'static> Indexer<R> for TestIndexer<R> { async fn get_multiple_compressed_account_proofs( &self, hashes: Vec<String>, ) -> Result<Vec<MerkleProof>, IndexerError> { info!("Getting proofs for {:?}", hashes); let mut proofs: Vec<MerkleProof> = Vec::new(); hashes.iter().for_each(|hash| { let hash_array: [u8; 32] = bs58::decode(hash) .into_vec() .unwrap() .as_slice() .try_into() .unwrap(); self.state_merkle_trees.iter().for_each(|tree| { if let Some(leaf_index) = tree.merkle_tree.get_leaf_index(&hash_array) { let proof = tree .merkle_tree .get_proof_of_leaf(leaf_index, false) .unwrap(); proofs.push(MerkleProof { hash: hash.clone(), leaf_index: leaf_index as u64, merkle_tree: tree.accounts.merkle_tree.to_string(), proof: proof.to_vec(), root_seq: tree.merkle_tree.sequence_number as u64, }); } }) }); Ok(proofs) } async fn get_rpc_compressed_accounts_by_owner( &self, owner: &Pubkey, ) -> Result<Vec<String>, IndexerError> { let result = self.get_compressed_accounts_by_owner(owner); let mut hashes: Vec<String> = Vec::new(); for account in result.iter() { let hash = account.hash().unwrap(); let bs58_hash = bs58::encode(hash).into_string(); hashes.push(bs58_hash); } Ok(hashes) } async fn get_multiple_new_address_proofs( &self, merkle_tree_pubkey: [u8; 32], addresses: Vec<[u8; 32]>, ) -> Result<Vec<NewAddressProofWithContext>, IndexerError> { let mut proofs: Vec<NewAddressProofWithContext> = Vec::new(); for address in addresses.iter() { info!("Getting new address proof for {:?}", address); let pubkey = Pubkey::from(merkle_tree_pubkey); let address_tree_bundle = self .address_merkle_trees .iter() .find(|x| x.accounts.merkle_tree == pubkey) .unwrap(); let address_biguint = BigUint::from_bytes_be(address.as_slice()); let (old_low_address, _old_low_address_next_value) = address_tree_bundle .indexed_array .find_low_element_for_nonexistent(&address_biguint) .unwrap(); let address_bundle = address_tree_bundle .indexed_array .new_element_with_low_element_index(old_low_address.index, &address_biguint) .unwrap(); let (old_low_address, old_low_address_next_value) = address_tree_bundle .indexed_array .find_low_element_for_nonexistent(&address_biguint) .unwrap(); // Get the Merkle proof for updating low element. let low_address_proof = address_tree_bundle .merkle_tree .get_proof_of_leaf(old_low_address.index, false) .unwrap(); let low_address_index: u64 = old_low_address.index as u64; let low_address_value: [u8; 32] = bigint_to_be_bytes_array(&old_low_address.value).unwrap(); let low_address_next_index: u64 = old_low_address.next_index as u64; let low_address_next_value: [u8; 32] = bigint_to_be_bytes_array(&old_low_address_next_value).unwrap(); let low_address_proof: [[u8; 32]; 16] = low_address_proof.to_array().unwrap(); let proof = NewAddressProofWithContext { merkle_tree: merkle_tree_pubkey, low_address_index, low_address_value, low_address_next_index, low_address_next_value, low_address_proof, root: address_tree_bundle.merkle_tree.root(), root_seq: address_tree_bundle.merkle_tree.merkle_tree.sequence_number as u64, new_low_element: Some(address_bundle.new_low_element), new_element: Some(address_bundle.new_element), new_element_next_value: Some(address_bundle.new_element_next_value), }; proofs.push(proof); } Ok(proofs) } fn account_nullified(&mut self, merkle_tree_pubkey: Pubkey, account_hash: &str) { let decoded_hash: [u8; 32] = bs58::decode(account_hash) .into_vec() .unwrap() .as_slice() .try_into() .unwrap(); if let Some(state_tree_bundle) = self .state_merkle_trees .iter_mut() .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) { if let Some(leaf_index) = state_tree_bundle.merkle_tree.get_leaf_index(&decoded_hash) { state_tree_bundle .merkle_tree .update(&[0u8; 32], leaf_index) .unwrap(); } } } fn address_tree_updated( &mut self, merkle_tree_pubkey: Pubkey, context: &NewAddressProofWithContext, ) { info!("Updating address tree..."); let mut address_tree_bundle: &mut AddressMerkleTreeBundle = self .address_merkle_trees .iter_mut() .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) .unwrap(); let new_low_element = context.new_low_element.clone().unwrap(); let new_element = context.new_element.clone().unwrap(); let new_element_next_value = context.new_element_next_value.clone().unwrap(); address_tree_bundle .merkle_tree .update(&new_low_element, &new_element, &new_element_next_value) .unwrap(); address_tree_bundle .indexed_array .append_with_low_element_index(new_low_element.index, &new_element.value) .unwrap(); info!("Address tree updated"); } fn get_state_merkle_tree_accounts(&self, pubkeys: &[Pubkey]) -> Vec<StateMerkleTreeAccounts> { pubkeys .iter() .map(|x| { self.state_merkle_trees .iter() .find(|y| y.accounts.merkle_tree == *x) .unwrap() .accounts }) .collect::<Vec<_>>() } fn add_event_and_compressed_accounts( &mut self, event: &PublicTransactionEvent, ) -> ( Vec<CompressedAccountWithMerkleContext>, Vec<TokenDataWithContext>, ) { for hash in event.input_compressed_account_hashes.iter() { let index = self.compressed_accounts.iter().position(|x| { x.compressed_account .hash::<Poseidon>( &x.merkle_context.merkle_tree_pubkey, &x.merkle_context.leaf_index, ) .unwrap() == *hash }); if let Some(index) = index { self.nullified_compressed_accounts .push(self.compressed_accounts[index].clone()); self.compressed_accounts.remove(index); continue; }; if index.is_none() { let index = self .token_compressed_accounts .iter() .position(|x| { x.compressed_account .compressed_account .hash::<Poseidon>( &x.compressed_account.merkle_context.merkle_tree_pubkey, &x.compressed_account.merkle_context.leaf_index, ) .unwrap() == *hash }) .expect("input compressed account not found"); self.token_nullified_compressed_accounts .push(self.token_compressed_accounts[index].clone()); self.token_compressed_accounts.remove(index); } } let mut compressed_accounts = Vec::new(); let mut token_compressed_accounts = Vec::new(); for (i, compressed_account) in event.output_compressed_accounts.iter().enumerate() { let nullifier_queue_pubkey = self .state_merkle_trees .iter() .find(|x| { x.accounts.merkle_tree == event.pubkey_array [event.output_compressed_accounts[i].merkle_tree_index as usize] }) .unwrap() .accounts .nullifier_queue; // if data is some, try to deserialize token data, if it fails, add to compressed_accounts // if data is none add to compressed_accounts // new accounts are inserted in front so that the newest accounts are found first match compressed_account.compressed_account.data.as_ref() { Some(data) => { if compressed_account.compressed_account.owner == light_compressed_token::ID && data.discriminator == TOKEN_COMPRESSED_ACCOUNT_DISCRIMINATOR { if let Ok(token_data) = TokenData::deserialize(&mut data.data.as_slice()) { let token_account = TokenDataWithContext { token_data, compressed_account: CompressedAccountWithMerkleContext { compressed_account: compressed_account .compressed_account .clone(), merkle_context: MerkleContext { leaf_index: event.output_leaf_indices[i], merkle_tree_pubkey: event.pubkey_array[event .output_compressed_accounts[i] .merkle_tree_index as usize], nullifier_queue_pubkey, queue_index: None, }, }, }; token_compressed_accounts.push(token_account.clone()); self.token_compressed_accounts.insert(0, token_account); } } else { let compressed_account = CompressedAccountWithMerkleContext { compressed_account: compressed_account.compressed_account.clone(), merkle_context: MerkleContext { leaf_index: event.output_leaf_indices[i], merkle_tree_pubkey: event.pubkey_array[event .output_compressed_accounts[i] .merkle_tree_index as usize], nullifier_queue_pubkey, queue_index: None, }, }; compressed_accounts.push(compressed_account.clone()); self.compressed_accounts.insert(0, compressed_account); } } None => { let compressed_account = CompressedAccountWithMerkleContext { compressed_account: compressed_account.compressed_account.clone(), merkle_context: MerkleContext { leaf_index: event.output_leaf_indices[i], merkle_tree_pubkey: event.pubkey_array [event.output_compressed_accounts[i].merkle_tree_index as usize], nullifier_queue_pubkey, queue_index: None, }, }; compressed_accounts.push(compressed_account.clone()); self.compressed_accounts.insert(0, compressed_account); } }; let merkle_tree = &mut self .state_merkle_trees .iter_mut() .find(|x| { x.accounts.merkle_tree == event.pubkey_array [event.output_compressed_accounts[i].merkle_tree_index as usize] }) .unwrap() .merkle_tree; merkle_tree .append( &compressed_account .compressed_account .hash::<Poseidon>( &event.pubkey_array [event.output_compressed_accounts[i].merkle_tree_index as usize], &event.output_leaf_indices[i], ) .unwrap(), ) .expect("insert failed"); } self.events.push(event.clone()); (compressed_accounts, token_compressed_accounts) } fn get_state_merkle_trees(&self) -> &Vec<StateMerkleTreeBundle> { &self.state_merkle_trees } fn get_state_merkle_trees_mut(&mut self) -> &mut Vec<StateMerkleTreeBundle> { &mut self.state_merkle_trees } fn get_address_merkle_trees(&self) -> &Vec<AddressMerkleTreeBundle> { &self.address_merkle_trees } fn get_address_merkle_trees_mut(&mut self) -> &mut Vec<AddressMerkleTreeBundle> { &mut self.address_merkle_trees } fn get_token_compressed_accounts(&self) -> &Vec<TokenDataWithContext> { &self.token_compressed_accounts } fn get_payer(&self) -> &Keypair { &self.payer } fn get_group_pda(&self) -> &Pubkey { &self.group_pda } async fn create_proof_for_compressed_accounts( &mut self, compressed_accounts: Option<&[[u8; 32]]>, state_merkle_tree_pubkeys: Option<&[Pubkey]>, new_addresses: Option<&[[u8; 32]]>, address_merkle_tree_pubkeys: Option<Vec<Pubkey>>, rpc: &mut R, ) -> ProofRpcResult { if compressed_accounts.is_some() && ![1usize, 2usize, 3usize, 4usize, 8usize] .contains(&compressed_accounts.unwrap().len()) { panic!( "compressed_accounts must be of length 1, 2, 3, 4 or 8 != {}", compressed_accounts.unwrap().len() ) } if new_addresses.is_some() && ![1usize, 2usize].contains(&new_addresses.unwrap().len()) { panic!("new_addresses must be of length 1, 2") } let client = Client::new(); let (root_indices, address_root_indices, json_payload) = match (compressed_accounts, new_addresses) { (Some(accounts), None) => { let (payload, indices) = self .process_inclusion_proofs(state_merkle_tree_pubkeys.unwrap(), accounts, rpc) .await; (indices, Vec::new(), payload.to_string()) } (None, Some(addresses)) => { let (payload, indices) = self .process_non_inclusion_proofs( address_merkle_tree_pubkeys.unwrap().as_slice(), addresses, rpc, ) .await; (Vec::<u16>::new(), indices, payload.to_string()) } (Some(accounts), Some(addresses)) => { let (inclusion_payload, inclusion_indices) = self .process_inclusion_proofs(state_merkle_tree_pubkeys.unwrap(), accounts, rpc) .await; let (non_inclusion_payload, non_inclusion_indices) = self .process_non_inclusion_proofs( address_merkle_tree_pubkeys.unwrap().as_slice(), addresses, rpc, ) .await; let combined_payload = CombinedJsonStruct { inclusion: inclusion_payload.inputs, non_inclusion: non_inclusion_payload.inputs, } .to_string(); (inclusion_indices, non_inclusion_indices, combined_payload) } _ => { panic!("At least one of compressed_accounts or new_addresses must be provided") } }; let mut retries = 3; while retries > 0 { let response_result = client .post(&format!("{}{}", SERVER_ADDRESS, PROVE_PATH)) .header("Content-Type", "text/plain; charset=utf-8") .body(json_payload.clone()) .send() .await .expect("Failed to execute request."); if response_result.status().is_success() { let body = response_result.text().await.unwrap(); let proof_json = deserialize_gnark_proof_json(&body).unwrap(); let (proof_a, proof_b, proof_c) = proof_from_json_struct(proof_json); let (proof_a, proof_b, proof_c) = compress_proof(&proof_a, &proof_b, &proof_c); return ProofRpcResult { root_indices, address_root_indices, proof: CompressedProof { a: proof_a, b: proof_b, c: proof_c, }, }; } else { warn!("Error: {}", response_result.text().await.unwrap()); tokio::time::sleep(Duration::from_secs(1)).await; if let Some(ref prover_config) = self.prover_config { spawn_prover(true, prover_config.clone()).await; } retries -= 1; } } panic!("Failed to get proof from server"); } fn add_address_merkle_tree_accounts( &mut self, merkle_tree_keypair: &Keypair, queue_keypair: &Keypair, _owning_program_id: Option<Pubkey>, ) -> AddressMerkleTreeAccounts { info!("Adding address merkle tree accounts..."); let address_merkle_tree_accounts = AddressMerkleTreeAccounts { merkle_tree: merkle_tree_keypair.pubkey(), queue: queue_keypair.pubkey(), }; self.address_merkle_trees .push(Self::add_address_merkle_tree_bundle( address_merkle_tree_accounts, )); info!( "Address merkle tree accounts added. Total: {}", self.address_merkle_trees.len() ); address_merkle_tree_accounts } /// returns compressed_accounts with the owner pubkey /// does not return token accounts. fn get_compressed_accounts_by_owner( &self, owner: &Pubkey, ) -> Vec<CompressedAccountWithMerkleContext> { self.compressed_accounts .iter() .filter(|x| x.compressed_account.owner == *owner) .cloned() .collect() } fn get_compressed_token_accounts_by_owner(&self, owner: &Pubkey) -> Vec<TokenDataWithContext> { self.token_compressed_accounts .iter() .filter(|x| x.token_data.owner == *owner) .cloned() .collect() } fn add_state_bundle(&mut self, state_bundle: StateMerkleTreeBundle) { self.get_state_merkle_trees_mut().push(state_bundle); } } impl<R: RpcConnection> TestIndexer<R> { fn count_matching_hashes(&self, query_hashes: &[String]) -> usize { self.nullified_compressed_accounts .iter() .map(|account| self.compute_hash(account)) .filter(|bs58_hash| query_hashes.contains(bs58_hash)) .count() } fn compute_hash(&self, account: &CompressedAccountWithMerkleContext) -> String { // replace AccountType with actual type let hash = account .compressed_account .hash::<Poseidon>( &account.merkle_context.merkle_tree_pubkey, &account.merkle_context.leaf_index, ) .unwrap(); bs58::encode(hash).into_string() } pub async fn init_from_env( payer: &Keypair, env: &EnvAccounts, prover_config: Option<ProverConfig>, ) -> Self { Self::new( vec![StateMerkleTreeAccounts { merkle_tree: env.merkle_tree_pubkey, nullifier_queue: env.nullifier_queue_pubkey, cpi_context: env.cpi_context_account_pubkey, }], vec![AddressMerkleTreeAccounts { merkle_tree: env.address_merkle_tree_pubkey, queue: env.address_merkle_tree_queue_pubkey, }], payer.insecure_clone(), env.group_pda, prover_config, ) .await } pub async fn new( state_merkle_tree_accounts: Vec<StateMerkleTreeAccounts>, address_merkle_tree_accounts: Vec<AddressMerkleTreeAccounts>, payer: Keypair, group_pda: Pubkey, prover_config: Option<ProverConfig>, ) -> Self { if let Some(ref prover_config) = prover_config { spawn_prover(true, prover_config.clone()).await; } let mut state_merkle_trees = Vec::new(); for state_merkle_tree_account in state_merkle_tree_accounts.iter() { let merkle_tree = Box::new(MerkleTree::<Poseidon>::new( STATE_MERKLE_TREE_HEIGHT as usize, STATE_MERKLE_TREE_CANOPY_DEPTH as usize, )); state_merkle_trees.push(StateMerkleTreeBundle { accounts: *state_merkle_tree_account, merkle_tree, rollover_fee: FeeConfig::default().state_merkle_tree_rollover as i64, }); } let mut address_merkle_trees = Vec::new(); for address_merkle_tree_account in address_merkle_tree_accounts { address_merkle_trees.push(Self::add_address_merkle_tree_bundle( address_merkle_tree_account, )); } Self { state_merkle_trees, address_merkle_trees, payer, compressed_accounts: vec![], nullified_compressed_accounts: vec![], events: vec![], token_compressed_accounts: vec![], token_nullified_compressed_accounts: vec![], prover_config, phantom: Default::default(), group_pda, } } pub fn add_address_merkle_tree_bundle( address_merkle_tree_accounts: AddressMerkleTreeAccounts, // TODO: add config here ) -> AddressMerkleTreeBundle { let mut merkle_tree = Box::new( IndexedMerkleTree::<Poseidon, usize>::new( STATE_MERKLE_TREE_HEIGHT as usize, STATE_MERKLE_TREE_CANOPY_DEPTH as usize, ) .unwrap(), ); merkle_tree.init().unwrap(); let mut indexed_array = Box::<IndexedArray<Poseidon, usize>>::default(); indexed_array.init().unwrap(); AddressMerkleTreeBundle { merkle_tree, indexed_array, accounts: address_merkle_tree_accounts, rollover_fee: FeeConfig::default().address_queue_rollover as i64, } } pub async fn add_address_merkle_tree( &mut self, rpc: &mut R, merkle_tree_keypair: &Keypair, queue_keypair: &Keypair, owning_program_id: Option<Pubkey>, ) -> AddressMerkleTreeAccounts { create_address_merkle_tree_and_queue_account_with_assert( &self.payer, true, rpc, merkle_tree_keypair, queue_keypair, owning_program_id, None, &AddressMerkleTreeConfig::default(), &AddressQueueConfig::default(), 0, ) .await .unwrap(); self.add_address_merkle_tree_accounts(merkle_tree_keypair, queue_keypair, owning_program_id) } pub async fn add_state_merkle_tree( &mut self, rpc: &mut R, merkle_tree_keypair: &Keypair, nullifier_queue_keypair: &Keypair, cpi_context_keypair: &Keypair, owning_program_id: Option<Pubkey>, forester: Option<Pubkey>, ) { create_state_merkle_tree_and_queue_account( &self.payer, true, rpc, merkle_tree_keypair, nullifier_queue_keypair, Some(cpi_context_keypair), owning_program_id, forester, self.state_merkle_trees.len() as u64, &StateMerkleTreeConfig::default(), &NullifierQueueConfig::default(), ) .await .unwrap(); let state_merkle_tree_account = StateMerkleTreeAccounts { merkle_tree: merkle_tree_keypair.pubkey(), nullifier_queue: nullifier_queue_keypair.pubkey(), cpi_context: cpi_context_keypair.pubkey(), }; let merkle_tree = Box::new(MerkleTree::<Poseidon>::new( STATE_MERKLE_TREE_HEIGHT as usize, STATE_MERKLE_TREE_CANOPY_DEPTH as usize, )); self.state_merkle_trees.push(StateMerkleTreeBundle { merkle_tree, accounts: state_merkle_tree_account, rollover_fee: FeeConfig::default().state_merkle_tree_rollover as i64, }); } async fn process_inclusion_proofs( &self, merkle_tree_pubkeys: &[Pubkey], accounts: &[[u8; 32]], rpc: &mut R, ) -> (BatchInclusionJsonStruct, Vec<u16>) { let mut inclusion_proofs = Vec::new(); let mut root_indices = Vec::new(); for (i, account) in accounts.iter().enumerate() { let merkle_tree = &self .state_merkle_trees .iter() .find(|x| x.accounts.merkle_tree == merkle_tree_pubkeys[i]) .unwrap() .merkle_tree; let leaf_index = merkle_tree.get_leaf_index(account).unwrap(); let proof = merkle_tree.get_proof_of_leaf(leaf_index, true).unwrap(); inclusion_proofs.push(InclusionMerkleProofInputs { root: BigInt::from_be_bytes(merkle_tree.root().as_slice()), leaf: BigInt::from_be_bytes(account), path_index: BigInt::from_be_bytes(leaf_index.to_be_bytes().as_slice()), path_elements: proof.iter().map(|x| BigInt::from_be_bytes(x)).collect(), }); let fetched_merkle_tree = unsafe { get_concurrent_merkle_tree::<StateMerkleTreeAccount, R, Poseidon, 26>( rpc, merkle_tree_pubkeys[i], ) .await }; for i in 0..fetched_merkle_tree.roots.len() { info!("roots {:?} {:?}", i, fetched_merkle_tree.roots[i]); } info!( "sequence number {:?}", fetched_merkle_tree.sequence_number() ); info!("root index {:?}", fetched_merkle_tree.root_index()); info!("local sequence number {:?}", merkle_tree.sequence_number); assert_eq!( merkle_tree.root(), fetched_merkle_tree.root(), "Merkle tree root mismatch" ); root_indices.push(fetched_merkle_tree.root_index() as u16); } let inclusion_proof_inputs = InclusionProofInputs(inclusion_proofs.as_slice()); let batch_inclusion_proof_inputs = BatchInclusionJsonStruct::from_inclusion_proof_inputs(&inclusion_proof_inputs); (batch_inclusion_proof_inputs, root_indices) } async fn process_non_inclusion_proofs( &self, address_merkle_tree_pubkeys: &[Pubkey], addresses: &[[u8; 32]], rpc: &mut R, ) -> (BatchNonInclusionJsonStruct, Vec<u16>) { let mut non_inclusion_proofs = Vec::new(); let mut address_root_indices = Vec::new(); for (i, address) in addresses.iter().enumerate() { let address_tree = &self .address_merkle_trees .iter() .find(|x| x.accounts.merkle_tree == address_merkle_tree_pubkeys[i]) .unwrap(); let proof_inputs = get_non_inclusion_proof_inputs( address, &address_tree.merkle_tree, &address_tree.indexed_array, ); non_inclusion_proofs.push(proof_inputs); let fetched_address_merkle_tree = unsafe { get_indexed_merkle_tree::<AddressMerkleTreeAccount, R, Poseidon, usize, 26, 16>( rpc, address_merkle_tree_pubkeys[i], ) .await }; address_root_indices.push(fetched_address_merkle_tree.root_index() as u16); } let non_inclusion_proof_inputs = NonInclusionProofInputs(non_inclusion_proofs.as_slice()); let batch_non_inclusion_proof_inputs = BatchNonInclusionJsonStruct::from_non_inclusion_proof_inputs( &non_inclusion_proof_inputs, ); (batch_non_inclusion_proof_inputs, address_root_indices) } /// deserializes an event /// adds the output_compressed_accounts to the compressed_accounts /// removes the input_compressed_accounts from the compressed_accounts /// adds the input_compressed_accounts to the nullified_compressed_accounts pub fn add_lamport_compressed_accounts(&mut self, event_bytes: Vec<u8>) { let event_bytes = event_bytes.clone(); let event = PublicTransactionEvent::deserialize(&mut event_bytes.as_slice()).unwrap(); self.add_event_and_compressed_accounts(&event); } /// deserializes an event /// adds the output_compressed_accounts to the compressed_accounts /// removes the input_compressed_accounts from the compressed_accounts /// adds the input_compressed_accounts to the nullified_compressed_accounts /// deserialiazes token data from the output_compressed_accounts /// adds the token_compressed_accounts to the token_compressed_accounts pub fn add_compressed_accounts_with_token_data(&mut self, event: &PublicTransactionEvent) { self.add_event_and_compressed_accounts(event); } /// returns the compressed sol balance of the owner pubkey pub fn get_compressed_balance(&self, owner: &Pubkey) -> u64 { self.compressed_accounts .iter() .filter(|x| x.compressed_account.owner == *owner) .map(|x| x.compressed_account.lamports) .sum() } /// returns the compressed token balance of the owner pubkey for a token by mint pub fn get_compressed_token_balance(&self, owner: &Pubkey, mint: &Pubkey) -> u64 { self.token_compressed_accounts .iter() .filter(|x| { x.compressed_account.compressed_account.owner == *owner && x.token_data.mint == *mint }) .map(|x| x.token_data.amount) .sum() } }
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/Cargo.toml
[package] name = "light-hasher" version = "1.1.0" description = "Trait for generic usage of hash functions on Solana" repository = "https://github.com/Lightprotocol/light-protocol" license = "Apache-2.0" edition = "2021" [features] solana = [] [dependencies] light-poseidon = "0.2.0" solana-program = { workspace = true } thiserror = "1.0" [target.'cfg(not(target_os = "solana"))'.dependencies] ark-bn254 = "0.4.0" sha2 = "0.10" sha3 = "0.10" [dev-dependencies] rand = "0.8"
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src/sha256.rs
use crate::{ errors::HasherError, zero_bytes::{sha256::ZERO_BYTES, ZeroBytes}, zero_indexed_leaf::sha256::ZERO_INDEXED_LEAF, Hash, Hasher, }; #[derive(Clone, Copy)] // To allow using with zero copy Solana accounts. pub struct Sha256; impl Hasher for Sha256 { fn hash(val: &[u8]) -> Result<Hash, HasherError> { Self::hashv(&[val]) } fn hashv(vals: &[&[u8]]) -> Result<Hash, HasherError> { #[cfg(not(target_os = "solana"))] { use sha2::{Digest, Sha256}; let mut hasher = Sha256::default(); for val in vals { hasher.update(val); } Ok(hasher.finalize().into()) } // Call via a system call to perform the calculation #[cfg(target_os = "solana")] { use crate::HASH_BYTES; let mut hash_result = [0; HASH_BYTES]; unsafe { crate::syscalls::sol_sha256( vals as *const _ as *const u8, vals.len() as u64, &mut hash_result as *mut _ as *mut u8, ); } Ok(hash_result) } } fn zero_bytes() -> ZeroBytes { ZERO_BYTES } fn zero_indexed_leaf() -> [u8; 32] { ZERO_INDEXED_LEAF } }
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src/lib.rs
pub mod bytes; pub mod errors; pub mod keccak; pub mod poseidon; pub mod sha256; pub mod syscalls; pub mod zero_bytes; pub mod zero_indexed_leaf; pub use keccak::Keccak; pub use poseidon::Poseidon; pub use sha256::Sha256; pub use crate::errors::HasherError; use crate::zero_bytes::ZeroBytes; pub const HASH_BYTES: usize = 32; pub type Hash = [u8; HASH_BYTES]; pub trait Hasher { fn hash(val: &[u8]) -> Result<Hash, HasherError>; fn hashv(vals: &[&[u8]]) -> Result<Hash, HasherError>; fn zero_bytes() -> ZeroBytes; fn zero_indexed_leaf() -> [u8; 32]; } pub trait DataHasher { fn hash<H: crate::Hasher>(&self) -> Result<[u8; 32], HasherError>; } pub trait Discriminator { const DISCRIMINATOR: [u8; 8]; fn discriminator() -> [u8; 8] { Self::DISCRIMINATOR } }
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src/keccak.rs
use crate::{ errors::HasherError, zero_bytes::{keccak::ZERO_BYTES, ZeroBytes}, zero_indexed_leaf::keccak::ZERO_INDEXED_LEAF, Hash, Hasher, }; #[derive(Clone, Copy)] // To allow using with zero copy Solana accounts. pub struct Keccak; impl Hasher for Keccak { fn hash(val: &[u8]) -> Result<Hash, HasherError> { Self::hashv(&[val]) } fn hashv(vals: &[&[u8]]) -> Result<Hash, HasherError> { #[cfg(not(target_os = "solana"))] { use sha3::{Digest, Keccak256}; let mut hasher = Keccak256::default(); for val in vals { hasher.update(val); } Ok(hasher.finalize().into()) } // Call via a system call to perform the calculation #[cfg(target_os = "solana")] { use crate::HASH_BYTES; let mut hash_result = [0; HASH_BYTES]; unsafe { crate::syscalls::sol_keccak256( vals as *const _ as *const u8, vals.len() as u64, &mut hash_result as *mut _ as *mut u8, ); } Ok(hash_result) } } fn zero_bytes() -> ZeroBytes { ZERO_BYTES } fn zero_indexed_leaf() -> [u8; 32] { ZERO_INDEXED_LEAF } }
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src/errors.rs
use light_poseidon::PoseidonError; use solana_program::poseidon::PoseidonSyscallError; use thiserror::Error; #[derive(Debug, Error)] pub enum HasherError { #[error("Integer overflow, value too large")] IntegerOverflow, #[error("Poseidon hasher error: {0}")] Poseidon(#[from] PoseidonError), #[error("Poseidon syscall error: {0}")] PoseidonSyscall(#[from] PoseidonSyscallError), #[error("Unknown Solana syscall error: {0}")] UnknownSolanaSyscall(u64), } // NOTE(vadorovsky): Unfortunately, we need to do it by hand. `num_derive::ToPrimitive` // doesn't support data-carrying enums. impl From<HasherError> for u32 { fn from(e: HasherError) -> u32 { match e { HasherError::IntegerOverflow => 7001, HasherError::Poseidon(_) => 7002, HasherError::PoseidonSyscall(e) => (u64::from(e)).try_into().unwrap_or(7003), HasherError::UnknownSolanaSyscall(e) => e.try_into().unwrap_or(7004), } } } impl From<HasherError> for solana_program::program_error::ProgramError { fn from(e: HasherError) -> Self { solana_program::program_error::ProgramError::Custom(e.into()) } }
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src/bytes.rs
/// A trait providing [`as_byte_vec()`](AsByteVec::as_byte_vec) method for types which /// are used inside compressed accounts. pub trait AsByteVec { fn as_byte_vec(&self) -> Vec<Vec<u8>>; } macro_rules! impl_as_byte_vec_for_integer_type { ($int_ty:ty) => { impl AsByteVec for $int_ty { fn as_byte_vec(&self) -> Vec<Vec<u8>> { vec![self.to_le_bytes().to_vec()] } } }; } // Special implementation for `bool` since bool doesn't implement `ToLeBytes`. impl AsByteVec for bool { fn as_byte_vec(&self) -> Vec<Vec<u8>> { vec![vec![*self as u8]] } } impl_as_byte_vec_for_integer_type!(i8); impl_as_byte_vec_for_integer_type!(u8); impl_as_byte_vec_for_integer_type!(i16); impl_as_byte_vec_for_integer_type!(u16); impl_as_byte_vec_for_integer_type!(i32); impl_as_byte_vec_for_integer_type!(u32); impl_as_byte_vec_for_integer_type!(i64); impl_as_byte_vec_for_integer_type!(u64); impl_as_byte_vec_for_integer_type!(isize); impl_as_byte_vec_for_integer_type!(usize); impl_as_byte_vec_for_integer_type!(i128); impl_as_byte_vec_for_integer_type!(u128); impl<T> AsByteVec for Option<T> where T: AsByteVec, { fn as_byte_vec(&self) -> Vec<Vec<u8>> { match self { Some(hashable) => { let mut bytes = hashable.as_byte_vec(); bytes.reserve(1); bytes.insert(0, vec![1]); bytes } None => vec![vec![0]], } } } impl<const N: usize> AsByteVec for [u8; N] { fn as_byte_vec(&self) -> Vec<Vec<u8>> { vec![self.to_vec()] } } impl AsByteVec for String { fn as_byte_vec(&self) -> Vec<Vec<u8>> { vec![self.as_bytes().to_vec()] } } impl AsByteVec for solana_program::pubkey::Pubkey { fn as_byte_vec(&self) -> Vec<Vec<u8>> { vec![self.to_bytes().to_vec()] } } #[cfg(test)] mod test { use super::*; #[test] fn test_as_byte_vec_integers() { let i8_min: &dyn AsByteVec = &i8::MIN; let i8_min_bytes = i8_min.as_byte_vec(); assert_eq!(i8_min_bytes, &[&[128]]); assert_eq!(i8_min_bytes, &[i8::MIN.to_le_bytes()]); let i8_max: &dyn AsByteVec = &i8::MAX; let i8_max_bytes = i8_max.as_byte_vec(); assert_eq!(i8_max_bytes, &[&[127]]); assert_eq!(i8_max_bytes, &[i8::MAX.to_le_bytes()]); let u8_min: &dyn AsByteVec = &u8::MIN; let u8_min_bytes = u8_min.as_byte_vec(); assert_eq!(u8_min_bytes, &[&[0]]); assert_eq!(u8_min_bytes, &[u8::MIN.to_le_bytes()]); let u8_max: &dyn AsByteVec = &u8::MAX; let u8_max_bytes = u8_max.as_byte_vec(); assert_eq!(u8_max_bytes, &[&[255]]); assert_eq!(u8_max_bytes, &[u8::MAX.to_le_bytes()]); let i16_min: &dyn AsByteVec = &i16::MIN; let i16_min_bytes = i16_min.as_byte_vec(); assert_eq!(i16_min_bytes, &[&[0, 128]]); assert_eq!(i16_min_bytes, &[&i16::MIN.to_le_bytes()]); let i16_max: &dyn AsByteVec = &i16::MAX; let i16_max_bytes = i16_max.as_byte_vec(); assert_eq!(i16_max_bytes, &[&[255, 127]]); assert_eq!(i16_max_bytes, &[i16::MAX.to_le_bytes()]); let u16_min: &dyn AsByteVec = &u16::MIN; let u16_min_bytes = u16_min.as_byte_vec(); assert_eq!(u16_min_bytes, &[&[0, 0]]); assert_eq!(u16_min_bytes, &[u16::MIN.to_le_bytes()]); let u16_max: &dyn AsByteVec = &u16::MAX; let u16_max_bytes = u16_max.as_byte_vec(); assert_eq!(u16_max_bytes, &[&[255, 255]]); assert_eq!(u16_max_bytes, &[u16::MAX.to_le_bytes()]); let i32_min: &dyn AsByteVec = &i32::MIN; let i32_min_bytes = i32_min.as_byte_vec(); assert_eq!(i32_min_bytes, &[&[0, 0, 0, 128]]); assert_eq!(i32_min_bytes, &[i32::MIN.to_le_bytes()]); let i32_max: &dyn AsByteVec = &i32::MAX; let i32_max_bytes = i32_max.as_byte_vec(); assert_eq!(i32_max_bytes, &[&[255, 255, 255, 127]]); assert_eq!(i32_max_bytes, &[i32::MAX.to_le_bytes()]); let u32_min: &dyn AsByteVec = &u32::MIN; let u32_min_bytes = u32_min.as_byte_vec(); assert_eq!(u32_min_bytes, &[&[0, 0, 0, 0]]); assert_eq!(u32_min_bytes, &[u32::MIN.to_le_bytes()]); let u32_max: &dyn AsByteVec = &u32::MAX; let u32_max_bytes = u32_max.as_byte_vec(); assert_eq!(u32_max_bytes, &[&[255, 255, 255, 255]]); assert_eq!(u32_max_bytes, &[u32::MAX.to_le_bytes()]); let i64_min: &dyn AsByteVec = &i64::MIN; let i64_min_bytes = i64_min.as_byte_vec(); assert_eq!(i64_min_bytes, &[&[0, 0, 0, 0, 0, 0, 0, 128]]); assert_eq!(i64_min_bytes, &[i64::MIN.to_le_bytes()]); let i64_max: &dyn AsByteVec = &i64::MAX; let i64_max_bytes = i64_max.as_byte_vec(); assert_eq!(i64_max_bytes, &[&[255, 255, 255, 255, 255, 255, 255, 127]]); assert_eq!(i64_max_bytes, &[i64::MAX.to_le_bytes()]); let u64_min: &dyn AsByteVec = &u64::MIN; let u64_min_bytes = u64_min.as_byte_vec(); assert_eq!(u64_min_bytes, &[[0, 0, 0, 0, 0, 0, 0, 0]]); assert_eq!(i64_min_bytes, &[i64::MIN.to_le_bytes()]); let u64_max: &dyn AsByteVec = &u64::MAX; let u64_max_bytes = u64_max.as_byte_vec(); assert_eq!(u64_max_bytes, &[&[255, 255, 255, 255, 255, 255, 255, 255]]); assert_eq!(u64_max_bytes, &[u64::MAX.to_le_bytes()]); let i128_min: &dyn AsByteVec = &i128::MIN; let i128_min_bytes = i128_min.as_byte_vec(); assert_eq!( i128_min_bytes, &[&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128]] ); assert_eq!(i128_min_bytes, &[i128::MIN.to_le_bytes()]); let i128_max: &dyn AsByteVec = &i128::MAX; let i128_max_bytes = i128_max.as_byte_vec(); assert_eq!( i128_max_bytes, &[&[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 127]] ); assert_eq!(i128_max_bytes, &[i128::MAX.to_le_bytes()]); let u128_min: &dyn AsByteVec = &u128::MIN; let u128_min_bytes = u128_min.as_byte_vec(); assert_eq!( u128_min_bytes, &[&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] ); assert_eq!(u128_min_bytes, &[u128::MIN.to_le_bytes()]); let u128_max: &dyn AsByteVec = &u128::MAX; let u128_max_bytes = u128_max.as_byte_vec(); assert_eq!( u128_max_bytes, &[&[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255]] ); assert_eq!(u128_max_bytes, &[u128::MAX.to_le_bytes()]); } #[test] fn test_as_byte_vec_primitives() { let bool_false: &dyn AsByteVec = &false; assert_eq!(bool_false.as_byte_vec(), &[&[0]]); let bool_true: &dyn AsByteVec = &true; assert_eq!(bool_true.as_byte_vec(), &[&[1]]); } #[test] fn test_as_byte_vec_option() { // Very important property - `None` and `Some(0)` always have to be // different and should produce different hashes! let u8_none: Option<u8> = None; let u8_none: &dyn AsByteVec = &u8_none; assert_eq!(u8_none.as_byte_vec(), &[&[0]]); let u8_some_zero: Option<u8> = Some(0); let u8_some_zero: &dyn AsByteVec = &u8_some_zero; assert_eq!(u8_some_zero.as_byte_vec(), &[&[1], &[0]]); let u16_none: Option<u16> = None; let u16_none: &dyn AsByteVec = &u16_none; assert_eq!(u16_none.as_byte_vec(), &[&[0]]); let u16_some_zero: Option<u16> = Some(0); let u16_some_zero: &dyn AsByteVec = &u16_some_zero; assert_eq!(u16_some_zero.as_byte_vec(), &[&[1][..], &[0, 0][..]]); let u32_none: Option<u32> = None; let u32_none: &dyn AsByteVec = &u32_none; assert_eq!(u32_none.as_byte_vec(), &[&[0]]); let u32_some_zero: Option<u32> = Some(0); let u32_some_zero: &dyn AsByteVec = &u32_some_zero; assert_eq!(u32_some_zero.as_byte_vec(), &[&[1][..], &[0, 0, 0, 0][..]]); let u64_none: Option<u64> = None; let u64_none: &dyn AsByteVec = &u64_none; assert_eq!(u64_none.as_byte_vec(), &[&[0]]); let u64_some_zero: Option<u64> = Some(0); let u64_some_zero: &dyn AsByteVec = &u64_some_zero; assert_eq!( u64_some_zero.as_byte_vec(), &[&[1][..], &[0, 0, 0, 0, 0, 0, 0, 0][..]] ); let u128_none: Option<u128> = None; let u128_none: &dyn AsByteVec = &u128_none; assert_eq!(u128_none.as_byte_vec(), &[&[0]]); let u128_some_zero: Option<u128> = Some(0); let u128_some_zero: &dyn AsByteVec = &u128_some_zero; assert_eq!( u128_some_zero.as_byte_vec(), &[ &[1][..], &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0][..] ] ); } #[test] fn test_as_byte_vec_array() { let arr: [u8; 0] = []; let arr: &dyn AsByteVec = &arr; assert_eq!(arr.as_byte_vec(), &[&[]]); let arr: [u8; 1] = [255]; let arr: &dyn AsByteVec = &arr; assert_eq!(arr.as_byte_vec(), &[&[255]]); let arr: [u8; 4] = [255, 255, 255, 255]; let arr: &dyn AsByteVec = &arr; assert_eq!(arr.as_byte_vec(), &[&[255, 255, 255, 255]]); } #[test] fn test_as_byte_vec_string() { let s: &dyn AsByteVec = &"".to_string(); assert_eq!(s.as_byte_vec(), &[b""]); let s: &dyn AsByteVec = &"foobar".to_string(); assert_eq!(s.as_byte_vec(), &[b"foobar"]); } }
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src/poseidon.rs
use crate::{ errors::HasherError, zero_bytes::{poseidon::ZERO_BYTES, ZeroBytes}, zero_indexed_leaf::poseidon::ZERO_INDEXED_LEAF, Hash, Hasher, }; #[derive(Debug, Clone, Copy)] pub struct Poseidon; impl Hasher for Poseidon { fn hash(val: &[u8]) -> Result<Hash, HasherError> { Self::hashv(&[val]) } fn hashv(vals: &[&[u8]]) -> Result<Hash, HasherError> { // Perform the calculation inline, calling this from within a program is // not supported. #[cfg(not(target_os = "solana"))] { use ark_bn254::Fr; use light_poseidon::{Poseidon, PoseidonBytesHasher}; let mut hasher = Poseidon::<Fr>::new_circom(vals.len())?; let res = hasher.hash_bytes_be(vals)?; Ok(res) } // Call via a system call to perform the calculation. #[cfg(target_os = "solana")] { use solana_program::poseidon::PoseidonSyscallError; use crate::HASH_BYTES; let mut hash_result = [0; HASH_BYTES]; let result = unsafe { crate::syscalls::sol_poseidon( 0, // bn254 0, // big-endian vals as *const _ as *const u8, vals.len() as u64, &mut hash_result as *mut _ as *mut u8, ) }; match result { 0 => Ok(hash_result), e => Err(HasherError::from(PoseidonSyscallError::from(e))), } } } fn zero_bytes() -> ZeroBytes { ZERO_BYTES } fn zero_indexed_leaf() -> [u8; 32] { ZERO_INDEXED_LEAF } }
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src/zero_indexed_leaf/sha256.rs
// This file is generated by xtask. Do not edit it manually. pub const ZERO_INDEXED_LEAF: [u8; 32] = [ 131u8, 74u8, 112u8, 155u8, 162u8, 83u8, 78u8, 190u8, 62u8, 225u8, 57u8, 127u8, 212u8, 247u8, 189u8, 40u8, 139u8, 42u8, 204u8, 29u8, 32u8, 160u8, 141u8, 108u8, 134u8, 45u8, 205u8, 153u8, 182u8, 240u8, 68u8, 0u8, ];
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src/zero_indexed_leaf/keccak.rs
// This file is generated by xtask. Do not edit it manually. pub const ZERO_INDEXED_LEAF: [u8; 32] = [ 60u8, 172u8, 49u8, 121u8, 8u8, 198u8, 153u8, 254u8, 135u8, 58u8, 127u8, 110u8, 228u8, 232u8, 205u8, 99u8, 251u8, 233u8, 145u8, 139u8, 35u8, 21u8, 201u8, 123u8, 233u8, 21u8, 133u8, 89u8, 1u8, 104u8, 227u8, 1u8, ];
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src/zero_indexed_leaf/mod.rs
pub mod keccak; pub mod poseidon; pub mod sha256;
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src/zero_indexed_leaf/poseidon.rs
// This file is generated by xtask. Do not edit it manually. pub const ZERO_INDEXED_LEAF: [u8; 32] = [ 11u8, 193u8, 136u8, 210u8, 125u8, 204u8, 234u8, 220u8, 29u8, 207u8, 182u8, 175u8, 10u8, 122u8, 240u8, 143u8, 226u8, 134u8, 78u8, 236u8, 236u8, 150u8, 197u8, 174u8, 124u8, 238u8, 109u8, 179u8, 27u8, 165u8, 153u8, 170u8, ];
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src/zero_bytes/sha256.rs
// This file is generated by xtask. Do not edit it manually. use super::ZeroBytes; pub const ZERO_BYTES: ZeroBytes = [ [ 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, ], [ 245u8, 165u8, 253u8, 66u8, 209u8, 106u8, 32u8, 48u8, 39u8, 152u8, 239u8, 110u8, 211u8, 9u8, 151u8, 155u8, 67u8, 0u8, 61u8, 35u8, 32u8, 217u8, 240u8, 232u8, 234u8, 152u8, 49u8, 169u8, 39u8, 89u8, 251u8, 75u8, ], [ 219u8, 86u8, 17u8, 78u8, 0u8, 253u8, 212u8, 193u8, 248u8, 92u8, 137u8, 43u8, 243u8, 90u8, 201u8, 168u8, 146u8, 137u8, 170u8, 236u8, 177u8, 235u8, 208u8, 169u8, 108u8, 222u8, 96u8, 106u8, 116u8, 139u8, 93u8, 113u8, ], [ 199u8, 128u8, 9u8, 253u8, 240u8, 127u8, 197u8, 106u8, 17u8, 241u8, 34u8, 55u8, 6u8, 88u8, 163u8, 83u8, 170u8, 165u8, 66u8, 237u8, 99u8, 228u8, 76u8, 75u8, 193u8, 95u8, 244u8, 205u8, 16u8, 90u8, 179u8, 60u8, ], [ 83u8, 109u8, 152u8, 131u8, 127u8, 45u8, 209u8, 101u8, 165u8, 93u8, 94u8, 234u8, 233u8, 20u8, 133u8, 149u8, 68u8, 114u8, 213u8, 111u8, 36u8, 109u8, 242u8, 86u8, 191u8, 60u8, 174u8, 25u8, 53u8, 42u8, 18u8, 60u8, ], [ 158u8, 253u8, 224u8, 82u8, 170u8, 21u8, 66u8, 159u8, 174u8, 5u8, 186u8, 212u8, 208u8, 177u8, 215u8, 198u8, 77u8, 166u8, 77u8, 3u8, 215u8, 161u8, 133u8, 74u8, 88u8, 140u8, 44u8, 184u8, 67u8, 12u8, 13u8, 48u8, ], [ 216u8, 141u8, 223u8, 238u8, 212u8, 0u8, 168u8, 117u8, 85u8, 150u8, 178u8, 25u8, 66u8, 193u8, 73u8, 126u8, 17u8, 76u8, 48u8, 46u8, 97u8, 24u8, 41u8, 15u8, 145u8, 230u8, 119u8, 41u8, 118u8, 4u8, 31u8, 161u8, ], [ 135u8, 235u8, 13u8, 219u8, 165u8, 126u8, 53u8, 246u8, 210u8, 134u8, 103u8, 56u8, 2u8, 164u8, 175u8, 89u8, 117u8, 226u8, 37u8, 6u8, 199u8, 207u8, 76u8, 100u8, 187u8, 107u8, 229u8, 238u8, 17u8, 82u8, 127u8, 44u8, ], [ 38u8, 132u8, 100u8, 118u8, 253u8, 95u8, 197u8, 74u8, 93u8, 67u8, 56u8, 81u8, 103u8, 201u8, 81u8, 68u8, 242u8, 100u8, 63u8, 83u8, 60u8, 200u8, 91u8, 185u8, 209u8, 107u8, 120u8, 47u8, 141u8, 125u8, 177u8, 147u8, ], [ 80u8, 109u8, 134u8, 88u8, 45u8, 37u8, 36u8, 5u8, 184u8, 64u8, 1u8, 135u8, 146u8, 202u8, 210u8, 191u8, 18u8, 89u8, 241u8, 239u8, 90u8, 165u8, 248u8, 135u8, 225u8, 60u8, 178u8, 240u8, 9u8, 79u8, 81u8, 225u8, ], [ 255u8, 255u8, 10u8, 215u8, 230u8, 89u8, 119u8, 47u8, 149u8, 52u8, 193u8, 149u8, 200u8, 21u8, 239u8, 196u8, 1u8, 78u8, 241u8, 225u8, 218u8, 237u8, 68u8, 4u8, 192u8, 99u8, 133u8, 209u8, 17u8, 146u8, 233u8, 43u8, ], [ 108u8, 240u8, 65u8, 39u8, 219u8, 5u8, 68u8, 28u8, 216u8, 51u8, 16u8, 122u8, 82u8, 190u8, 133u8, 40u8, 104u8, 137u8, 14u8, 67u8, 23u8, 230u8, 160u8, 42u8, 180u8, 118u8, 131u8, 170u8, 117u8, 150u8, 66u8, 32u8, ], [ 183u8, 208u8, 95u8, 135u8, 95u8, 20u8, 0u8, 39u8, 239u8, 81u8, 24u8, 162u8, 36u8, 123u8, 187u8, 132u8, 206u8, 143u8, 47u8, 15u8, 17u8, 35u8, 98u8, 48u8, 133u8, 218u8, 247u8, 150u8, 12u8, 50u8, 159u8, 95u8, ], [ 223u8, 106u8, 245u8, 245u8, 187u8, 219u8, 107u8, 233u8, 239u8, 138u8, 166u8, 24u8, 228u8, 191u8, 128u8, 115u8, 150u8, 8u8, 103u8, 23u8, 30u8, 41u8, 103u8, 111u8, 139u8, 40u8, 77u8, 234u8, 106u8, 8u8, 168u8, 94u8, ], [ 181u8, 141u8, 144u8, 15u8, 94u8, 24u8, 46u8, 60u8, 80u8, 239u8, 116u8, 150u8, 158u8, 161u8, 108u8, 119u8, 38u8, 197u8, 73u8, 117u8, 124u8, 194u8, 53u8, 35u8, 195u8, 105u8, 88u8, 125u8, 167u8, 41u8, 55u8, 132u8, ], [ 212u8, 154u8, 117u8, 2u8, 255u8, 207u8, 176u8, 52u8, 11u8, 29u8, 120u8, 133u8, 104u8, 133u8, 0u8, 202u8, 48u8, 129u8, 97u8, 167u8, 249u8, 107u8, 98u8, 223u8, 157u8, 8u8, 59u8, 113u8, 252u8, 200u8, 242u8, 187u8, ], [ 143u8, 230u8, 177u8, 104u8, 146u8, 86u8, 192u8, 211u8, 133u8, 244u8, 47u8, 91u8, 190u8, 32u8, 39u8, 162u8, 44u8, 25u8, 150u8, 225u8, 16u8, 186u8, 151u8, 193u8, 113u8, 211u8, 229u8, 148u8, 141u8, 233u8, 43u8, 235u8, ], [ 141u8, 13u8, 99u8, 195u8, 158u8, 186u8, 222u8, 133u8, 9u8, 224u8, 174u8, 60u8, 156u8, 56u8, 118u8, 251u8, 95u8, 161u8, 18u8, 190u8, 24u8, 249u8, 5u8, 236u8, 172u8, 254u8, 203u8, 146u8, 5u8, 118u8, 3u8, 171u8, ], [ 149u8, 238u8, 200u8, 178u8, 229u8, 65u8, 202u8, 212u8, 233u8, 29u8, 227u8, 131u8, 133u8, 242u8, 224u8, 70u8, 97u8, 159u8, 84u8, 73u8, 108u8, 35u8, 130u8, 203u8, 108u8, 172u8, 213u8, 185u8, 140u8, 38u8, 245u8, 164u8, ], [ 248u8, 147u8, 233u8, 8u8, 145u8, 119u8, 117u8, 182u8, 43u8, 255u8, 35u8, 41u8, 77u8, 187u8, 227u8, 161u8, 205u8, 142u8, 108u8, 193u8, 195u8, 91u8, 72u8, 1u8, 136u8, 123u8, 100u8, 106u8, 111u8, 129u8, 241u8, 127u8, ], [ 205u8, 219u8, 167u8, 181u8, 146u8, 227u8, 19u8, 51u8, 147u8, 193u8, 97u8, 148u8, 250u8, 199u8, 67u8, 26u8, 191u8, 47u8, 84u8, 133u8, 237u8, 113u8, 29u8, 178u8, 130u8, 24u8, 60u8, 129u8, 158u8, 8u8, 235u8, 170u8, ], [ 138u8, 141u8, 127u8, 227u8, 175u8, 140u8, 170u8, 8u8, 90u8, 118u8, 57u8, 168u8, 50u8, 0u8, 20u8, 87u8, 223u8, 185u8, 18u8, 138u8, 128u8, 97u8, 20u8, 42u8, 208u8, 51u8, 86u8, 41u8, 255u8, 35u8, 255u8, 156u8, ], [ 254u8, 179u8, 195u8, 55u8, 215u8, 165u8, 26u8, 111u8, 191u8, 0u8, 185u8, 227u8, 76u8, 82u8, 225u8, 201u8, 25u8, 92u8, 150u8, 155u8, 212u8, 231u8, 160u8, 191u8, 213u8, 29u8, 92u8, 91u8, 237u8, 156u8, 17u8, 103u8, ], [ 231u8, 31u8, 10u8, 168u8, 60u8, 195u8, 46u8, 223u8, 190u8, 250u8, 159u8, 77u8, 62u8, 1u8, 116u8, 202u8, 133u8, 24u8, 46u8, 236u8, 159u8, 58u8, 9u8, 246u8, 166u8, 192u8, 223u8, 99u8, 119u8, 165u8, 16u8, 215u8, ], [ 49u8, 32u8, 111u8, 168u8, 10u8, 80u8, 187u8, 106u8, 190u8, 41u8, 8u8, 80u8, 88u8, 241u8, 98u8, 18u8, 33u8, 42u8, 96u8, 238u8, 200u8, 240u8, 73u8, 254u8, 203u8, 146u8, 216u8, 200u8, 224u8, 168u8, 75u8, 192u8, ], [ 33u8, 53u8, 43u8, 254u8, 203u8, 237u8, 221u8, 233u8, 147u8, 131u8, 159u8, 97u8, 76u8, 61u8, 172u8, 10u8, 62u8, 227u8, 117u8, 67u8, 249u8, 180u8, 18u8, 177u8, 97u8, 153u8, 220u8, 21u8, 142u8, 35u8, 181u8, 68u8, ], [ 97u8, 158u8, 49u8, 39u8, 36u8, 187u8, 109u8, 124u8, 49u8, 83u8, 237u8, 157u8, 231u8, 145u8, 215u8, 100u8, 163u8, 102u8, 179u8, 137u8, 175u8, 19u8, 197u8, 139u8, 248u8, 168u8, 217u8, 4u8, 129u8, 164u8, 103u8, 101u8, ], [ 124u8, 221u8, 41u8, 134u8, 38u8, 130u8, 80u8, 98u8, 141u8, 12u8, 16u8, 227u8, 133u8, 197u8, 140u8, 97u8, 145u8, 230u8, 251u8, 224u8, 81u8, 145u8, 188u8, 192u8, 79u8, 19u8, 63u8, 44u8, 234u8, 114u8, 193u8, 196u8, ], [ 132u8, 137u8, 48u8, 189u8, 123u8, 168u8, 202u8, 197u8, 70u8, 97u8, 7u8, 33u8, 19u8, 251u8, 39u8, 136u8, 105u8, 224u8, 123u8, 184u8, 88u8, 127u8, 145u8, 57u8, 41u8, 51u8, 55u8, 77u8, 1u8, 123u8, 203u8, 225u8, ], [ 136u8, 105u8, 255u8, 44u8, 34u8, 178u8, 140u8, 193u8, 5u8, 16u8, 217u8, 133u8, 50u8, 146u8, 128u8, 51u8, 40u8, 190u8, 79u8, 176u8, 232u8, 4u8, 149u8, 232u8, 187u8, 141u8, 39u8, 31u8, 91u8, 136u8, 150u8, 54u8, ], [ 181u8, 254u8, 40u8, 231u8, 159u8, 27u8, 133u8, 15u8, 134u8, 88u8, 36u8, 108u8, 233u8, 182u8, 161u8, 231u8, 180u8, 159u8, 192u8, 109u8, 183u8, 20u8, 62u8, 143u8, 224u8, 180u8, 242u8, 176u8, 197u8, 82u8, 58u8, 92u8, ], [ 152u8, 94u8, 146u8, 159u8, 112u8, 175u8, 40u8, 208u8, 189u8, 209u8, 169u8, 10u8, 128u8, 143u8, 151u8, 127u8, 89u8, 124u8, 124u8, 119u8, 140u8, 72u8, 158u8, 152u8, 211u8, 189u8, 137u8, 16u8, 211u8, 26u8, 192u8, 247u8, ], [ 198u8, 246u8, 126u8, 2u8, 230u8, 228u8, 225u8, 189u8, 239u8, 185u8, 148u8, 198u8, 9u8, 137u8, 83u8, 243u8, 70u8, 54u8, 186u8, 43u8, 108u8, 162u8, 10u8, 71u8, 33u8, 210u8, 178u8, 106u8, 136u8, 103u8, 34u8, 255u8, ], ];
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src/zero_bytes/keccak.rs
// This file is generated by xtask. Do not edit it manually. use super::ZeroBytes; pub const ZERO_BYTES: ZeroBytes = [ [ 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, ], [ 173u8, 50u8, 40u8, 182u8, 118u8, 247u8, 211u8, 205u8, 66u8, 132u8, 165u8, 68u8, 63u8, 23u8, 241u8, 150u8, 43u8, 54u8, 228u8, 145u8, 179u8, 10u8, 64u8, 178u8, 64u8, 88u8, 73u8, 229u8, 151u8, 186u8, 95u8, 181u8, ], [ 180u8, 193u8, 25u8, 81u8, 149u8, 124u8, 111u8, 143u8, 100u8, 44u8, 74u8, 246u8, 28u8, 214u8, 178u8, 70u8, 64u8, 254u8, 198u8, 220u8, 127u8, 198u8, 7u8, 238u8, 130u8, 6u8, 169u8, 158u8, 146u8, 65u8, 13u8, 48u8, ], [ 33u8, 221u8, 185u8, 163u8, 86u8, 129u8, 92u8, 63u8, 172u8, 16u8, 38u8, 182u8, 222u8, 197u8, 223u8, 49u8, 36u8, 175u8, 186u8, 219u8, 72u8, 92u8, 155u8, 165u8, 163u8, 227u8, 57u8, 138u8, 4u8, 183u8, 186u8, 133u8, ], [ 229u8, 135u8, 105u8, 179u8, 42u8, 27u8, 234u8, 241u8, 234u8, 39u8, 55u8, 90u8, 68u8, 9u8, 90u8, 13u8, 31u8, 182u8, 100u8, 206u8, 45u8, 211u8, 88u8, 231u8, 252u8, 191u8, 183u8, 140u8, 38u8, 161u8, 147u8, 68u8, ], [ 14u8, 176u8, 30u8, 191u8, 201u8, 237u8, 39u8, 80u8, 12u8, 212u8, 223u8, 201u8, 121u8, 39u8, 45u8, 31u8, 9u8, 19u8, 204u8, 159u8, 102u8, 84u8, 13u8, 126u8, 128u8, 5u8, 129u8, 17u8, 9u8, 225u8, 207u8, 45u8, ], [ 136u8, 124u8, 34u8, 189u8, 135u8, 80u8, 211u8, 64u8, 22u8, 172u8, 60u8, 102u8, 181u8, 255u8, 16u8, 45u8, 172u8, 221u8, 115u8, 246u8, 176u8, 20u8, 231u8, 16u8, 181u8, 30u8, 128u8, 34u8, 175u8, 154u8, 25u8, 104u8, ], [ 255u8, 215u8, 1u8, 87u8, 228u8, 128u8, 99u8, 252u8, 51u8, 201u8, 122u8, 5u8, 15u8, 127u8, 100u8, 2u8, 51u8, 191u8, 100u8, 108u8, 201u8, 141u8, 149u8, 36u8, 198u8, 185u8, 43u8, 207u8, 58u8, 181u8, 111u8, 131u8, ], [ 152u8, 103u8, 204u8, 95u8, 127u8, 25u8, 107u8, 147u8, 186u8, 225u8, 226u8, 126u8, 99u8, 32u8, 116u8, 36u8, 69u8, 210u8, 144u8, 242u8, 38u8, 56u8, 39u8, 73u8, 139u8, 84u8, 254u8, 197u8, 57u8, 247u8, 86u8, 175u8, ], [ 206u8, 250u8, 212u8, 229u8, 8u8, 192u8, 152u8, 185u8, 167u8, 225u8, 216u8, 254u8, 177u8, 153u8, 85u8, 251u8, 2u8, 186u8, 150u8, 117u8, 88u8, 80u8, 120u8, 113u8, 9u8, 105u8, 211u8, 68u8, 15u8, 80u8, 84u8, 224u8, ], [ 249u8, 220u8, 62u8, 127u8, 224u8, 22u8, 224u8, 80u8, 239u8, 242u8, 96u8, 51u8, 79u8, 24u8, 165u8, 212u8, 254u8, 57u8, 29u8, 130u8, 9u8, 35u8, 25u8, 245u8, 150u8, 79u8, 46u8, 46u8, 183u8, 193u8, 195u8, 165u8, ], [ 248u8, 177u8, 58u8, 73u8, 226u8, 130u8, 246u8, 9u8, 195u8, 23u8, 168u8, 51u8, 251u8, 141u8, 151u8, 109u8, 17u8, 81u8, 124u8, 87u8, 29u8, 18u8, 33u8, 162u8, 101u8, 210u8, 90u8, 247u8, 120u8, 236u8, 248u8, 146u8, ], [ 52u8, 144u8, 198u8, 206u8, 235u8, 69u8, 10u8, 236u8, 220u8, 130u8, 226u8, 130u8, 147u8, 3u8, 29u8, 16u8, 199u8, 215u8, 59u8, 248u8, 94u8, 87u8, 191u8, 4u8, 26u8, 151u8, 54u8, 10u8, 162u8, 197u8, 217u8, 156u8, ], [ 193u8, 223u8, 130u8, 217u8, 196u8, 184u8, 116u8, 19u8, 234u8, 226u8, 239u8, 4u8, 143u8, 148u8, 180u8, 211u8, 85u8, 76u8, 234u8, 115u8, 217u8, 43u8, 15u8, 122u8, 249u8, 110u8, 2u8, 113u8, 198u8, 145u8, 226u8, 187u8, ], [ 92u8, 103u8, 173u8, 215u8, 198u8, 202u8, 243u8, 2u8, 37u8, 106u8, 222u8, 223u8, 122u8, 177u8, 20u8, 218u8, 10u8, 207u8, 232u8, 112u8, 212u8, 73u8, 163u8, 164u8, 137u8, 247u8, 129u8, 214u8, 89u8, 232u8, 190u8, 204u8, ], [ 218u8, 123u8, 206u8, 159u8, 78u8, 134u8, 24u8, 182u8, 189u8, 47u8, 65u8, 50u8, 206u8, 121u8, 140u8, 220u8, 122u8, 96u8, 231u8, 225u8, 70u8, 10u8, 114u8, 153u8, 227u8, 198u8, 52u8, 42u8, 87u8, 150u8, 38u8, 210u8, ], [ 39u8, 51u8, 229u8, 15u8, 82u8, 110u8, 194u8, 250u8, 25u8, 162u8, 43u8, 49u8, 232u8, 237u8, 80u8, 242u8, 60u8, 209u8, 253u8, 249u8, 76u8, 145u8, 84u8, 237u8, 58u8, 118u8, 9u8, 162u8, 241u8, 255u8, 152u8, 31u8, ], [ 225u8, 211u8, 181u8, 200u8, 7u8, 178u8, 129u8, 228u8, 104u8, 60u8, 198u8, 214u8, 49u8, 92u8, 249u8, 91u8, 154u8, 222u8, 134u8, 65u8, 222u8, 252u8, 179u8, 35u8, 114u8, 241u8, 193u8, 38u8, 227u8, 152u8, 239u8, 122u8, ], [ 90u8, 45u8, 206u8, 10u8, 138u8, 127u8, 104u8, 187u8, 116u8, 86u8, 15u8, 143u8, 113u8, 131u8, 124u8, 44u8, 46u8, 187u8, 203u8, 247u8, 255u8, 251u8, 66u8, 174u8, 24u8, 150u8, 241u8, 63u8, 124u8, 116u8, 121u8, 160u8, ], [ 180u8, 106u8, 40u8, 182u8, 245u8, 85u8, 64u8, 248u8, 148u8, 68u8, 246u8, 61u8, 224u8, 55u8, 142u8, 61u8, 18u8, 27u8, 224u8, 158u8, 6u8, 204u8, 157u8, 237u8, 28u8, 32u8, 230u8, 88u8, 118u8, 211u8, 106u8, 160u8, ], [ 198u8, 94u8, 150u8, 69u8, 100u8, 71u8, 134u8, 182u8, 32u8, 226u8, 221u8, 42u8, 214u8, 72u8, 221u8, 252u8, 191u8, 74u8, 126u8, 91u8, 26u8, 58u8, 78u8, 207u8, 231u8, 246u8, 70u8, 103u8, 163u8, 240u8, 183u8, 226u8, ], [ 244u8, 65u8, 133u8, 136u8, 237u8, 53u8, 162u8, 69u8, 140u8, 255u8, 235u8, 57u8, 185u8, 61u8, 38u8, 241u8, 141u8, 42u8, 177u8, 59u8, 220u8, 230u8, 174u8, 229u8, 142u8, 123u8, 153u8, 53u8, 158u8, 194u8, 223u8, 217u8, ], [ 90u8, 156u8, 22u8, 220u8, 0u8, 214u8, 239u8, 24u8, 183u8, 147u8, 58u8, 111u8, 141u8, 198u8, 92u8, 203u8, 85u8, 102u8, 113u8, 56u8, 119u8, 111u8, 125u8, 234u8, 16u8, 16u8, 112u8, 220u8, 135u8, 150u8, 227u8, 119u8, ], [ 77u8, 248u8, 79u8, 64u8, 174u8, 12u8, 130u8, 41u8, 208u8, 214u8, 6u8, 158u8, 92u8, 143u8, 57u8, 167u8, 194u8, 153u8, 103u8, 122u8, 9u8, 211u8, 103u8, 252u8, 123u8, 5u8, 227u8, 188u8, 56u8, 14u8, 230u8, 82u8, ], [ 205u8, 199u8, 37u8, 149u8, 247u8, 76u8, 123u8, 16u8, 67u8, 208u8, 225u8, 255u8, 186u8, 183u8, 52u8, 100u8, 140u8, 131u8, 141u8, 251u8, 5u8, 39u8, 217u8, 113u8, 182u8, 2u8, 188u8, 33u8, 108u8, 150u8, 25u8, 239u8, ], [ 10u8, 191u8, 90u8, 201u8, 116u8, 161u8, 237u8, 87u8, 244u8, 5u8, 10u8, 165u8, 16u8, 221u8, 156u8, 116u8, 245u8, 8u8, 39u8, 123u8, 57u8, 215u8, 151u8, 59u8, 178u8, 223u8, 204u8, 197u8, 238u8, 176u8, 97u8, 141u8, ], [ 184u8, 205u8, 116u8, 4u8, 111u8, 243u8, 55u8, 240u8, 167u8, 191u8, 44u8, 142u8, 3u8, 225u8, 15u8, 100u8, 44u8, 24u8, 134u8, 121u8, 141u8, 113u8, 128u8, 106u8, 177u8, 232u8, 136u8, 217u8, 229u8, 238u8, 135u8, 208u8, ], [ 131u8, 140u8, 86u8, 85u8, 203u8, 33u8, 198u8, 203u8, 131u8, 49u8, 59u8, 90u8, 99u8, 17u8, 117u8, 223u8, 244u8, 150u8, 55u8, 114u8, 204u8, 233u8, 16u8, 129u8, 136u8, 179u8, 74u8, 200u8, 124u8, 129u8, 196u8, 30u8, ], [ 102u8, 46u8, 228u8, 221u8, 45u8, 215u8, 178u8, 188u8, 112u8, 121u8, 97u8, 177u8, 230u8, 70u8, 196u8, 4u8, 118u8, 105u8, 220u8, 182u8, 88u8, 79u8, 13u8, 141u8, 119u8, 13u8, 175u8, 93u8, 126u8, 125u8, 235u8, 46u8, ], [ 56u8, 138u8, 178u8, 14u8, 37u8, 115u8, 209u8, 113u8, 168u8, 129u8, 8u8, 231u8, 157u8, 130u8, 14u8, 152u8, 242u8, 108u8, 11u8, 132u8, 170u8, 139u8, 47u8, 74u8, 164u8, 150u8, 141u8, 187u8, 129u8, 142u8, 163u8, 34u8, ], [ 147u8, 35u8, 124u8, 80u8, 186u8, 117u8, 238u8, 72u8, 95u8, 76u8, 34u8, 173u8, 242u8, 247u8, 65u8, 64u8, 11u8, 223u8, 141u8, 106u8, 156u8, 199u8, 223u8, 126u8, 202u8, 229u8, 118u8, 34u8, 22u8, 101u8, 215u8, 53u8, ], [ 132u8, 72u8, 129u8, 139u8, 180u8, 174u8, 69u8, 98u8, 132u8, 158u8, 148u8, 158u8, 23u8, 172u8, 22u8, 224u8, 190u8, 22u8, 104u8, 142u8, 21u8, 107u8, 92u8, 241u8, 94u8, 9u8, 140u8, 98u8, 124u8, 0u8, 86u8, 169u8, ], [ 39u8, 174u8, 91u8, 160u8, 141u8, 114u8, 145u8, 201u8, 108u8, 140u8, 189u8, 220u8, 193u8, 72u8, 191u8, 72u8, 166u8, 214u8, 140u8, 121u8, 116u8, 185u8, 67u8, 86u8, 245u8, 55u8, 84u8, 239u8, 97u8, 113u8, 215u8, 87u8, ], ];
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src/zero_bytes/mod.rs
pub mod keccak; pub mod poseidon; pub mod sha256; pub const MAX_HEIGHT: usize = 32; pub type ZeroBytes = [[u8; 32]; MAX_HEIGHT + 1];
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src/zero_bytes/poseidon.rs
// This file is generated by xtask. Do not edit it manually. use super::ZeroBytes; pub const ZERO_BYTES: ZeroBytes = [ [ 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, ], [ 32u8, 152u8, 245u8, 251u8, 158u8, 35u8, 158u8, 171u8, 60u8, 234u8, 195u8, 242u8, 123u8, 129u8, 228u8, 129u8, 220u8, 49u8, 36u8, 213u8, 95u8, 254u8, 213u8, 35u8, 168u8, 57u8, 238u8, 132u8, 70u8, 182u8, 72u8, 100u8, ], [ 16u8, 105u8, 103u8, 61u8, 205u8, 177u8, 34u8, 99u8, 223u8, 48u8, 26u8, 111u8, 245u8, 132u8, 167u8, 236u8, 38u8, 26u8, 68u8, 203u8, 157u8, 198u8, 141u8, 240u8, 103u8, 164u8, 119u8, 68u8, 96u8, 177u8, 241u8, 225u8, ], [ 24u8, 244u8, 51u8, 49u8, 83u8, 126u8, 226u8, 175u8, 46u8, 61u8, 117u8, 141u8, 80u8, 247u8, 33u8, 6u8, 70u8, 124u8, 110u8, 234u8, 80u8, 55u8, 29u8, 213u8, 40u8, 213u8, 126u8, 178u8, 184u8, 86u8, 210u8, 56u8, ], [ 7u8, 249u8, 216u8, 55u8, 203u8, 23u8, 176u8, 211u8, 99u8, 32u8, 255u8, 233u8, 59u8, 165u8, 35u8, 69u8, 241u8, 183u8, 40u8, 87u8, 26u8, 86u8, 130u8, 101u8, 202u8, 172u8, 151u8, 85u8, 157u8, 188u8, 149u8, 42u8, ], [ 43u8, 148u8, 207u8, 94u8, 135u8, 70u8, 179u8, 245u8, 201u8, 99u8, 31u8, 76u8, 93u8, 243u8, 41u8, 7u8, 166u8, 153u8, 197u8, 140u8, 148u8, 178u8, 173u8, 77u8, 123u8, 92u8, 236u8, 22u8, 57u8, 24u8, 63u8, 85u8, ], [ 45u8, 238u8, 147u8, 197u8, 166u8, 102u8, 69u8, 150u8, 70u8, 234u8, 125u8, 34u8, 204u8, 169u8, 225u8, 188u8, 254u8, 215u8, 30u8, 105u8, 81u8, 185u8, 83u8, 97u8, 29u8, 17u8, 221u8, 163u8, 46u8, 160u8, 157u8, 120u8, ], [ 7u8, 130u8, 149u8, 229u8, 162u8, 43u8, 132u8, 233u8, 130u8, 207u8, 96u8, 30u8, 182u8, 57u8, 89u8, 123u8, 139u8, 5u8, 21u8, 168u8, 140u8, 181u8, 172u8, 127u8, 168u8, 164u8, 170u8, 190u8, 60u8, 135u8, 52u8, 157u8, ], [ 47u8, 165u8, 229u8, 241u8, 143u8, 96u8, 39u8, 166u8, 80u8, 27u8, 236u8, 134u8, 69u8, 100u8, 71u8, 42u8, 97u8, 107u8, 46u8, 39u8, 74u8, 65u8, 33u8, 26u8, 68u8, 76u8, 190u8, 58u8, 153u8, 243u8, 204u8, 97u8, ], [ 14u8, 136u8, 67u8, 118u8, 208u8, 216u8, 253u8, 33u8, 236u8, 183u8, 128u8, 56u8, 158u8, 148u8, 31u8, 102u8, 228u8, 94u8, 122u8, 204u8, 227u8, 226u8, 40u8, 171u8, 62u8, 33u8, 86u8, 166u8, 20u8, 252u8, 215u8, 71u8, ], [ 27u8, 114u8, 1u8, 218u8, 114u8, 73u8, 79u8, 30u8, 40u8, 113u8, 122u8, 209u8, 165u8, 46u8, 180u8, 105u8, 249u8, 88u8, 146u8, 249u8, 87u8, 113u8, 53u8, 51u8, 222u8, 97u8, 117u8, 229u8, 218u8, 25u8, 10u8, 242u8, ], [ 31u8, 141u8, 136u8, 34u8, 114u8, 94u8, 54u8, 56u8, 82u8, 0u8, 192u8, 178u8, 1u8, 36u8, 152u8, 25u8, 166u8, 230u8, 225u8, 228u8, 101u8, 8u8, 8u8, 181u8, 190u8, 188u8, 107u8, 250u8, 206u8, 125u8, 118u8, 54u8, ], [ 44u8, 93u8, 130u8, 246u8, 108u8, 145u8, 75u8, 175u8, 185u8, 112u8, 21u8, 137u8, 186u8, 140u8, 252u8, 251u8, 97u8, 98u8, 176u8, 161u8, 42u8, 207u8, 136u8, 168u8, 208u8, 135u8, 154u8, 4u8, 113u8, 181u8, 248u8, 90u8, ], [ 20u8, 197u8, 65u8, 72u8, 160u8, 148u8, 11u8, 184u8, 32u8, 149u8, 127u8, 90u8, 223u8, 63u8, 161u8, 19u8, 78u8, 245u8, 196u8, 170u8, 161u8, 19u8, 244u8, 100u8, 100u8, 88u8, 242u8, 112u8, 224u8, 191u8, 191u8, 208u8, ], [ 25u8, 13u8, 51u8, 177u8, 47u8, 152u8, 111u8, 150u8, 30u8, 16u8, 192u8, 238u8, 68u8, 216u8, 185u8, 175u8, 17u8, 190u8, 37u8, 88u8, 140u8, 173u8, 137u8, 212u8, 22u8, 17u8, 142u8, 75u8, 244u8, 235u8, 232u8, 12u8, ], [ 34u8, 249u8, 138u8, 169u8, 206u8, 112u8, 65u8, 82u8, 172u8, 23u8, 53u8, 73u8, 20u8, 173u8, 115u8, 237u8, 17u8, 103u8, 174u8, 101u8, 150u8, 175u8, 81u8, 10u8, 165u8, 179u8, 100u8, 147u8, 37u8, 224u8, 108u8, 146u8, ], [ 42u8, 124u8, 124u8, 155u8, 108u8, 229u8, 136u8, 11u8, 159u8, 111u8, 34u8, 141u8, 114u8, 191u8, 106u8, 87u8, 90u8, 82u8, 111u8, 41u8, 198u8, 110u8, 204u8, 238u8, 248u8, 183u8, 83u8, 211u8, 139u8, 186u8, 115u8, 35u8, ], [ 46u8, 129u8, 134u8, 229u8, 88u8, 105u8, 142u8, 193u8, 198u8, 122u8, 249u8, 193u8, 77u8, 70u8, 63u8, 252u8, 71u8, 0u8, 67u8, 201u8, 194u8, 152u8, 139u8, 149u8, 77u8, 117u8, 221u8, 100u8, 63u8, 54u8, 185u8, 146u8, ], [ 15u8, 87u8, 197u8, 87u8, 30u8, 154u8, 78u8, 171u8, 73u8, 226u8, 200u8, 207u8, 5u8, 13u8, 174u8, 148u8, 138u8, 239u8, 110u8, 173u8, 100u8, 115u8, 146u8, 39u8, 53u8, 70u8, 36u8, 157u8, 28u8, 31u8, 241u8, 15u8, ], [ 24u8, 48u8, 238u8, 103u8, 181u8, 251u8, 85u8, 74u8, 213u8, 246u8, 61u8, 67u8, 136u8, 128u8, 14u8, 28u8, 254u8, 120u8, 227u8, 16u8, 105u8, 125u8, 70u8, 228u8, 60u8, 156u8, 227u8, 97u8, 52u8, 247u8, 44u8, 202u8, ], [ 33u8, 52u8, 231u8, 106u8, 197u8, 210u8, 26u8, 171u8, 24u8, 108u8, 43u8, 225u8, 221u8, 143u8, 132u8, 238u8, 136u8, 10u8, 30u8, 70u8, 234u8, 247u8, 18u8, 249u8, 211u8, 113u8, 182u8, 223u8, 34u8, 25u8, 31u8, 62u8, ], [ 25u8, 223u8, 144u8, 236u8, 132u8, 78u8, 188u8, 79u8, 254u8, 235u8, 216u8, 102u8, 243u8, 56u8, 89u8, 176u8, 192u8, 81u8, 216u8, 201u8, 88u8, 238u8, 58u8, 168u8, 143u8, 143u8, 141u8, 243u8, 219u8, 145u8, 165u8, 177u8, ], [ 24u8, 204u8, 162u8, 166u8, 107u8, 92u8, 7u8, 135u8, 152u8, 30u8, 105u8, 174u8, 253u8, 132u8, 133u8, 45u8, 116u8, 175u8, 14u8, 147u8, 239u8, 73u8, 18u8, 180u8, 100u8, 140u8, 5u8, 247u8, 34u8, 239u8, 229u8, 43u8, ], [ 35u8, 136u8, 144u8, 148u8, 21u8, 35u8, 13u8, 27u8, 77u8, 19u8, 4u8, 210u8, 213u8, 79u8, 71u8, 58u8, 98u8, 131u8, 56u8, 242u8, 239u8, 173u8, 131u8, 250u8, 223u8, 5u8, 100u8, 69u8, 73u8, 210u8, 83u8, 141u8, ], [ 39u8, 23u8, 31u8, 180u8, 169u8, 123u8, 108u8, 192u8, 233u8, 232u8, 245u8, 67u8, 181u8, 41u8, 77u8, 232u8, 102u8, 162u8, 175u8, 44u8, 156u8, 141u8, 11u8, 29u8, 150u8, 230u8, 115u8, 228u8, 82u8, 158u8, 213u8, 64u8, ], [ 47u8, 246u8, 101u8, 5u8, 64u8, 246u8, 41u8, 253u8, 87u8, 17u8, 160u8, 188u8, 116u8, 252u8, 13u8, 40u8, 220u8, 178u8, 48u8, 185u8, 57u8, 37u8, 131u8, 229u8, 248u8, 213u8, 150u8, 150u8, 221u8, 230u8, 174u8, 33u8, ], [ 18u8, 12u8, 88u8, 241u8, 67u8, 212u8, 145u8, 233u8, 89u8, 2u8, 247u8, 245u8, 39u8, 119u8, 120u8, 162u8, 224u8, 173u8, 81u8, 104u8, 246u8, 173u8, 215u8, 86u8, 105u8, 147u8, 38u8, 48u8, 206u8, 97u8, 21u8, 24u8, ], [ 31u8, 33u8, 254u8, 183u8, 13u8, 63u8, 33u8, 176u8, 123u8, 248u8, 83u8, 213u8, 229u8, 219u8, 3u8, 7u8, 30u8, 196u8, 149u8, 160u8, 165u8, 101u8, 162u8, 29u8, 162u8, 214u8, 101u8, 210u8, 121u8, 72u8, 55u8, 149u8, ], [ 36u8, 190u8, 144u8, 95u8, 167u8, 19u8, 53u8, 225u8, 76u8, 99u8, 140u8, 192u8, 246u8, 106u8, 134u8, 35u8, 168u8, 38u8, 231u8, 104u8, 6u8, 138u8, 158u8, 150u8, 139u8, 177u8, 161u8, 221u8, 225u8, 138u8, 114u8, 210u8, ], [ 15u8, 134u8, 102u8, 182u8, 46u8, 209u8, 116u8, 145u8, 197u8, 12u8, 234u8, 222u8, 173u8, 87u8, 212u8, 205u8, 89u8, 126u8, 243u8, 130u8, 29u8, 101u8, 195u8, 40u8, 116u8, 76u8, 116u8, 229u8, 83u8, 218u8, 194u8, 109u8, ], [ 9u8, 24u8, 212u8, 107u8, 245u8, 45u8, 152u8, 176u8, 52u8, 65u8, 63u8, 74u8, 26u8, 28u8, 65u8, 89u8, 78u8, 122u8, 122u8, 63u8, 106u8, 224u8, 140u8, 180u8, 61u8, 26u8, 42u8, 35u8, 14u8, 25u8, 89u8, 239u8, ], [ 27u8, 190u8, 176u8, 27u8, 76u8, 71u8, 158u8, 205u8, 231u8, 105u8, 23u8, 100u8, 94u8, 64u8, 77u8, 250u8, 46u8, 38u8, 249u8, 13u8, 10u8, 252u8, 90u8, 101u8, 18u8, 133u8, 19u8, 173u8, 55u8, 92u8, 95u8, 242u8, ], [ 47u8, 104u8, 161u8, 197u8, 142u8, 37u8, 126u8, 66u8, 161u8, 122u8, 108u8, 97u8, 223u8, 245u8, 85u8, 30u8, 213u8, 96u8, 185u8, 146u8, 42u8, 177u8, 25u8, 213u8, 172u8, 142u8, 24u8, 76u8, 151u8, 52u8, 234u8, 217u8, ], ];
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src/syscalls/definitions.rs
//! This module is a partial copy from //! [solana-program](https://github.com/solana-labs/solana/blob/master/sdk/program/src/syscalls/definitions.rs), //! which is licensed under Apache License 2.0. #[cfg(target_feature = "static-syscalls")] macro_rules! define_syscall { (fn $name:ident($($arg:ident: $typ:ty),*) -> $ret:ty) => { #[inline] pub unsafe fn $name($($arg: $typ),*) -> $ret { // this enum is used to force the hash to be computed in a const context #[repr(usize)] enum Syscall { Code = sys_hash(stringify!($name)), } let syscall: extern "C" fn($($arg: $typ),*) -> $ret = core::mem::transmute(Syscall::Code); syscall($($arg),*) } }; (fn $name:ident($($arg:ident: $typ:ty),*)) => { define_syscall!(fn $name($($arg: $typ),*) -> ()); } } #[cfg(not(target_feature = "static-syscalls"))] macro_rules! define_syscall { (fn $name:ident($($arg:ident: $typ:ty),*) -> $ret:ty) => { extern "C" { pub fn $name($($arg: $typ),*) -> $ret; } }; (fn $name:ident($($arg:ident: $typ:ty),*)) => { define_syscall!(fn $name($($arg: $typ),*) -> ()); } } define_syscall!(fn sol_sha256(vals: *const u8, val_len: u64, hash_result: *mut u8) -> u64); define_syscall!(fn sol_keccak256(vals: *const u8, val_len: u64, hash_result: *mut u8) -> u64); define_syscall!(fn sol_poseidon(parameters: u64, endianness: u64, vals: *const u8, val_len: u64, hash_result: *mut u8) -> u64);
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src/syscalls/mod.rs
//! This module is a partial copy from //! [solana-program](https://github.com/solana-labs/solana/blob/master/sdk/program/src/syscalls/definitions.rs), //! which is licensed under Apache License 2.0. //! //! The purpose of the module is to provide definition of Poseidon syscall //! without upgrading solana-program and Anchor just yet. #[cfg(target_os = "solana")] mod definitions; #[cfg(target_os = "solana")] pub use definitions::*;
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hash-set/Cargo.toml
[package] name = "light-hash-set" version = "1.2.0" description = "Hash set which can be stored on a Solana account" repository = "https://github.com/Lightprotocol/light-protocol" license = "Apache-2.0" edition = "2021" [features] solana = ["solana-program"] [dependencies] light-bounded-vec = { path = "../bounded-vec", version = "1.1.0" } light-utils = { path = "../../utils", version = "1.1.0" } memoffset = "0.9" num-bigint = "0.4" num-traits = "0.2" solana-program = { workspace = true, optional = true } thiserror = "1.0" [target.'cfg(target_os = "solana")'.dependencies] light-heap = { path = "../../heap", version = "1.1.0" } [dev-dependencies] ark-bn254 = "0.4" ark-ff = "0.4" rand = "0.8"
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hash-set
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hash-set/src/zero_copy.rs
use std::{ marker::PhantomData, mem, ops::{Deref, DerefMut}, ptr::NonNull, }; use crate::{HashSet, HashSetCell, HashSetError}; /// A `HashSet` wrapper which can be instantiated from Solana account bytes /// without copying them. #[derive(Debug)] pub struct HashSetZeroCopy<'a> { pub hash_set: mem::ManuallyDrop<HashSet>, _marker: PhantomData<&'a ()>, } impl<'a> HashSetZeroCopy<'a> { // TODO(vadorovsky): Add a non-mut method: `from_bytes_zero_copy`. /// Casts a byte slice into `HashSet`. /// /// # Purpose /// /// This method is meant to be used mostly in Solana programs, where memory /// constraints are tight and we want to make sure no data is copied. /// /// # Safety /// /// This is highly unsafe. Ensuring the alignment and that the slice /// provides actual data of the hash set is the caller's responsibility. /// /// Calling it in async context (or anyhwere where the underlying data can /// be moved in the memory) is certainly going to cause undefined behavior. pub unsafe fn from_bytes_zero_copy_mut(bytes: &'a mut [u8]) -> Result<Self, HashSetError> { if bytes.len() < HashSet::non_dyn_fields_size() { return Err(HashSetError::BufferSize( HashSet::non_dyn_fields_size(), bytes.len(), )); } let capacity_values = usize::from_le_bytes(bytes[0..8].try_into().unwrap()); let sequence_threshold = usize::from_le_bytes(bytes[8..16].try_into().unwrap()); let offset = HashSet::non_dyn_fields_size() + mem::size_of::<usize>(); let values_size = mem::size_of::<Option<HashSetCell>>() * capacity_values; let expected_size = HashSet::non_dyn_fields_size() + values_size; if bytes.len() < expected_size { return Err(HashSetError::BufferSize(expected_size, bytes.len())); } let buckets = NonNull::new(bytes.as_mut_ptr().add(offset) as *mut Option<HashSetCell>).unwrap(); Ok(Self { hash_set: mem::ManuallyDrop::new(HashSet { capacity: capacity_values, sequence_threshold, buckets, }), _marker: PhantomData, }) } /// Casts a byte slice into `HashSet` and then initializes it. /// /// * `bytes` is casted into a reference of `HashSet` and used as /// storage for the struct. /// * `capacity_indices` indicates the size of the indices table. It should /// already include a desired load factor and be greater than the expected /// number of elements to avoid filling the set too early and avoid /// creating clusters. /// * `capacity_values` indicates the size of the values array. It should be /// equal to the number of expected elements, without load factor. /// * `sequence_threshold` indicates a difference of sequence numbers which /// make elements of the has set expired. Expiration means that they can /// be replaced during insertion of new elements with sequence numbers /// higher by at least a threshold. /// /// # Purpose /// /// This method is meant to be used mostly in Solana programs to initialize /// a new account which is supposed to store the hash set. /// /// # Safety /// /// This is highly unsafe. Ensuring the alignment and that the slice has /// a correct size, which is able to fit the hash set, is the caller's /// responsibility. /// /// Calling it in async context (or anywhere where the underlying data can /// be moved in memory) is certainly going to cause undefined behavior. pub unsafe fn from_bytes_zero_copy_init( bytes: &'a mut [u8], capacity_values: usize, sequence_threshold: usize, ) -> Result<Self, HashSetError> { if bytes.len() < HashSet::non_dyn_fields_size() { return Err(HashSetError::BufferSize( HashSet::non_dyn_fields_size(), bytes.len(), )); } bytes[0..8].copy_from_slice(&capacity_values.to_le_bytes()); bytes[8..16].copy_from_slice(&sequence_threshold.to_le_bytes()); bytes[16..24].copy_from_slice(&0_usize.to_le_bytes()); let hash_set = Self::from_bytes_zero_copy_mut(bytes)?; for i in 0..capacity_values { std::ptr::write(hash_set.hash_set.buckets.as_ptr().add(i), None); } Ok(hash_set) } } impl<'a> Drop for HashSetZeroCopy<'a> { fn drop(&mut self) { // SAFETY: Don't do anything here! Why? // // * Primitive fields of `HashSet` implement `Copy`, therefore `drop()` // has no effect on them - Rust drops them when they go out of scope. // * Don't drop the dynamic fields (`indices` and `values`). In // `HashSetZeroCopy`, they are backed by buffers provided by the // caller. These buffers are going to be eventually deallocated. // Performing an another `drop()` here would result double `free()` // which would result in aborting the program (either with `SIGABRT` // or `SIGSEGV`). } } impl<'a> Deref for HashSetZeroCopy<'a> { type Target = HashSet; fn deref(&self) -> &Self::Target { &self.hash_set } } impl<'a> DerefMut for HashSetZeroCopy<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.hash_set } } #[cfg(test)] mod test { use ark_bn254::Fr; use ark_ff::UniformRand; use num_bigint::BigUint; use rand::{thread_rng, Rng}; use super::*; #[test] fn test_load_from_bytes() { const VALUES: usize = 4800; const SEQUENCE_THRESHOLD: usize = 2400; // Create a buffer with random bytes. let mut bytes = vec![0u8; HashSet::size_in_account(VALUES)]; thread_rng().fill(bytes.as_mut_slice()); // Create random nullifiers. let mut rng = thread_rng(); let nullifiers: [BigUint; 2400] = std::array::from_fn(|_| BigUint::from(Fr::rand(&mut rng))); // Initialize a hash set on top of a byte slice. { let mut hs = unsafe { HashSetZeroCopy::from_bytes_zero_copy_init( bytes.as_mut_slice(), VALUES, SEQUENCE_THRESHOLD, ) .unwrap() }; // Ensure that the underlying data were properly initialized. assert_eq!(hs.hash_set.get_capacity(), VALUES); assert_eq!(hs.hash_set.sequence_threshold, SEQUENCE_THRESHOLD); for i in 0..VALUES { assert!(unsafe { &*hs.hash_set.buckets.as_ptr().add(i) }.is_none()); } for (seq, nullifier) in nullifiers.iter().enumerate() { let index = hs.insert(&nullifier, seq).unwrap(); hs.mark_with_sequence_number(index, seq).unwrap(); } } // Read the hash set from buffers again. { let mut hs = unsafe { HashSetZeroCopy::from_bytes_zero_copy_mut(bytes.as_mut_slice()).unwrap() }; for (seq, nullifier) in nullifiers.iter().enumerate() { assert_eq!(hs.contains(nullifier, Some(seq)).unwrap(), true); } for (seq, nullifier) in nullifiers.iter().enumerate() { hs.insert(&nullifier, 2400 + seq as usize).unwrap(); } drop(hs); } // Make a copy of hash set from the same buffers. { let hs = unsafe { HashSet::from_bytes_copy(bytes.as_mut_slice()).unwrap() }; for (seq, nullifier) in nullifiers.iter().enumerate() { assert_eq!( hs.contains(nullifier, Some(2400 + seq as usize)).unwrap(), true ); } } } #[test] fn test_buffer_size_error() { const VALUES: usize = 4800; const SEQUENCE_THRESHOLD: usize = 2400; let mut invalid_bytes = vec![0_u8; 256]; let res = unsafe { HashSetZeroCopy::from_bytes_zero_copy_init( invalid_bytes.as_mut_slice(), VALUES, SEQUENCE_THRESHOLD, ) }; assert!(matches!(res, Err(HashSetError::BufferSize(_, _)))); } }
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hash-set
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hash-set/src/lib.rs
use light_utils::{bigint::bigint_to_be_bytes_array, UtilsError}; use num_bigint::{BigUint, ToBigUint}; use num_traits::{FromBytes, ToPrimitive}; use std::{ alloc::{self, handle_alloc_error, Layout}, cmp::Ordering, marker::Send, mem, ptr::NonNull, }; use thiserror::Error; pub mod zero_copy; pub const ITERATIONS: usize = 20; #[derive(Debug, Error, PartialEq)] pub enum HashSetError { #[error("The hash set is full, cannot add any new elements")] Full, #[error("The provided element is already in the hash set")] ElementAlreadyExists, #[error("The provided element doesn't exist in the hash set")] ElementDoesNotExist, #[error("Could not convert the index from/to usize")] UsizeConv, #[error("Integer overflow")] IntegerOverflow, #[error("Invalid buffer size, expected {0}, got {1}")] BufferSize(usize, usize), #[error("Utils: big integer conversion error")] Utils(#[from] UtilsError), } #[cfg(feature = "solana")] impl From<HashSetError> for u32 { fn from(e: HashSetError) -> u32 { match e { HashSetError::Full => 9001, HashSetError::ElementAlreadyExists => 9002, HashSetError::ElementDoesNotExist => 9003, HashSetError::UsizeConv => 9004, HashSetError::IntegerOverflow => 9005, HashSetError::BufferSize(_, _) => 9006, HashSetError::Utils(e) => e.into(), } } } #[cfg(feature = "solana")] impl From<HashSetError> for solana_program::program_error::ProgramError { fn from(e: HashSetError) -> Self { solana_program::program_error::ProgramError::Custom(e.into()) } } #[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)] pub struct HashSetCell { pub value: [u8; 32], pub sequence_number: Option<usize>, } unsafe impl Send for HashSet {} impl HashSetCell { /// Returns the value as a byte array. pub fn value_bytes(&self) -> [u8; 32] { self.value } /// Returns the value as a big number. pub fn value_biguint(&self) -> BigUint { BigUint::from_bytes_be(self.value.as_slice()) } /// Returns the associated sequence number. pub fn sequence_number(&self) -> Option<usize> { self.sequence_number } /// Checks whether the value is marked with a sequence number. pub fn is_marked(&self) -> bool { self.sequence_number.is_some() } /// Checks whether the value is valid according to the provided /// `current_sequence_number` (which usually should be a sequence number /// associated with the Merkle tree). /// /// The value is valid if: /// /// * It was not annotated with sequence number. /// * Its sequence number is lower than the provided `sequence_number`. /// /// The value is invalid if it's lower or equal to the provided /// `sequence_number`. pub fn is_valid(&self, current_sequence_number: usize) -> bool { match self.sequence_number { Some(sequence_number) => match sequence_number.cmp(&current_sequence_number) { Ordering::Less | Ordering::Equal => false, Ordering::Greater => true, }, None => true, } } } #[derive(Debug)] pub struct HashSet { /// Capacity of the buckets. capacity: usize, /// Difference of sequence numbers, after which the given element can be /// replaced by an another one (with a sequence number higher than the /// threshold). pub sequence_threshold: usize, /// An array of buckets. It has a size equal to the expected number of /// elements. buckets: NonNull<Option<HashSetCell>>, } unsafe impl Send for HashSetCell {} impl HashSet { /// Size of the struct **without** dynamically sized fields. pub fn non_dyn_fields_size() -> usize { // capacity mem::size_of::<usize>() // sequence_threshold + mem::size_of::<usize>() } /// Size which needs to be allocated on Solana account to fit the hash set. pub fn size_in_account(capacity_values: usize) -> usize { let dyn_fields_size = Self::non_dyn_fields_size(); let buckets_size_unaligned = mem::size_of::<Option<HashSetCell>>() * capacity_values; // Make sure that alignment of `values` matches the alignment of `usize`. let buckets_size = buckets_size_unaligned + mem::align_of::<usize>() - (buckets_size_unaligned % mem::align_of::<usize>()); dyn_fields_size + buckets_size } // Create a new hash set with the given capacity pub fn new(capacity_values: usize, sequence_threshold: usize) -> Result<Self, HashSetError> { // SAFETY: It's just a regular allocation. let layout = Layout::array::<Option<HashSetCell>>(capacity_values).unwrap(); let values_ptr = unsafe { alloc::alloc(layout) as *mut Option<HashSetCell> }; if values_ptr.is_null() { handle_alloc_error(layout); } let values = NonNull::new(values_ptr).unwrap(); for i in 0..capacity_values { unsafe { std::ptr::write(values_ptr.add(i), None); } } Ok(HashSet { sequence_threshold, capacity: capacity_values, buckets: values, }) } /// Creates a copy of `HashSet` from the given byte slice. /// /// # Purpose /// /// This method is meant to be used mostly in the SDK code, to convert /// fetched Solana accounts to actual hash sets. Creating a copy is the /// safest way of conversion in async Rust. /// /// # Safety /// /// This is highly unsafe. Ensuring the alignment and that the slice /// provides actual actual data of the hash set is the caller's /// responsibility. pub unsafe fn from_bytes_copy(bytes: &mut [u8]) -> Result<Self, HashSetError> { if bytes.len() < Self::non_dyn_fields_size() { return Err(HashSetError::BufferSize( Self::non_dyn_fields_size(), bytes.len(), )); } let capacity = usize::from_le_bytes(bytes[0..8].try_into().unwrap()); let sequence_threshold = usize::from_le_bytes(bytes[8..16].try_into().unwrap()); let expected_size = Self::size_in_account(capacity); if bytes.len() != expected_size { return Err(HashSetError::BufferSize(expected_size, bytes.len())); } let buckets_layout = Layout::array::<Option<HashSetCell>>(capacity).unwrap(); // SAFETY: `I` is always a signed integer. Creating a layout for an // array of integers of any size won't cause any panic. let buckets_dst_ptr = unsafe { alloc::alloc(buckets_layout) as *mut Option<HashSetCell> }; if buckets_dst_ptr.is_null() { handle_alloc_error(buckets_layout); } let buckets = NonNull::new(buckets_dst_ptr).unwrap(); for i in 0..capacity { std::ptr::write(buckets_dst_ptr.add(i), None); } let offset = Self::non_dyn_fields_size() + mem::size_of::<usize>(); let buckets_src_ptr = bytes.as_ptr().add(offset) as *const Option<HashSetCell>; std::ptr::copy(buckets_src_ptr, buckets_dst_ptr, capacity); Ok(Self { capacity, sequence_threshold, buckets, }) } fn probe_index(&self, value: &BigUint, iteration: usize) -> usize { // Increase stepsize over the capacity of the hash set. let iteration = iteration + self.capacity / 10; let probe_index = (value + iteration.to_biguint().unwrap() * iteration.to_biguint().unwrap()) % self.capacity.to_biguint().unwrap(); probe_index.to_usize().unwrap() } /// Returns a reference to a bucket under the given `index`. Does not check /// the validity. pub fn get_bucket(&self, index: usize) -> Option<&Option<HashSetCell>> { if index >= self.capacity { return None; } let bucket = unsafe { &*self.buckets.as_ptr().add(index) }; Some(bucket) } /// Returns a mutable reference to a bucket under the given `index`. Does /// not check the validity. pub fn get_bucket_mut(&mut self, index: usize) -> Option<&mut Option<HashSetCell>> { if index >= self.capacity { return None; } let bucket = unsafe { &mut *self.buckets.as_ptr().add(index) }; Some(bucket) } /// Returns a reference to an unmarked bucket under the given index. If the /// bucket is marked, returns `None`. pub fn get_unmarked_bucket(&self, index: usize) -> Option<&Option<HashSetCell>> { let bucket = self.get_bucket(index); let is_unmarked = match bucket { Some(Some(bucket)) => !bucket.is_marked(), Some(None) => false, None => false, }; if is_unmarked { bucket } else { None } } pub fn get_capacity(&self) -> usize { self.capacity } fn insert_into_occupied_cell( &mut self, value_index: usize, value: &BigUint, current_sequence_number: usize, ) -> Result<bool, HashSetError> { // PANICS: We trust the bounds of `value_index` here. let bucket = self.get_bucket_mut(value_index).unwrap(); match bucket { // The cell in the value array is already taken. Some(bucket) => { // We can overwrite that cell only if the element // is expired - when the difference between its // sequence number and provided sequence number is // greater than the threshold. if let Some(element_sequence_number) = bucket.sequence_number { if current_sequence_number >= element_sequence_number { *bucket = HashSetCell { value: bigint_to_be_bytes_array(value)?, sequence_number: None, }; return Ok(true); } } // Otherwise, we need to prevent having multiple valid // elements with the same value. if &BigUint::from_be_bytes(bucket.value.as_slice()) == value { return Err(HashSetError::ElementAlreadyExists); } } // Panics: If there is a hash set cell pointing to a `None` value, // it means we really screwed up in the implementation... // That should never happen. None => unreachable!(), } Ok(false) } /// Inserts a value into the hash set, with `self.capacity_values` attempts. /// /// Every attempt uses quadratic probing to find an empty cell or a cell /// which can be overwritten. /// /// `current sequence_number` is used to check whether existing values can /// be overwritten. pub fn insert( &mut self, value: &BigUint, current_sequence_number: usize, ) -> Result<usize, HashSetError> { let index_bucket = self.find_element_iter(value, current_sequence_number, 0, ITERATIONS)?; let (index, is_new) = match index_bucket { Some(index) => index, None => { return Err(HashSetError::Full); } }; match is_new { // The visited hash set cell points to a value in the array. false => { if self.insert_into_occupied_cell(index, value, current_sequence_number)? { return Ok(index); } } true => { // PANICS: We trust the bounds of `index`. let bucket = self.get_bucket_mut(index).unwrap(); *bucket = Some(HashSetCell { value: bigint_to_be_bytes_array(value)?, sequence_number: None, }); return Ok(index); } } Err(HashSetError::Full) } /// Finds an index of the provided `value` inside `buckets`. /// /// Uses the optional `current_sequence_number` arguments for checking the /// validity of the element. pub fn find_element_index( &self, value: &BigUint, current_sequence_number: Option<usize>, ) -> Result<Option<usize>, HashSetError> { for i in 0..ITERATIONS { let probe_index = self.probe_index(value, i); // PANICS: `probe_index()` ensures the bounds. let bucket = self.get_bucket(probe_index).unwrap(); match bucket { Some(bucket) => { if &bucket.value_biguint() == value { match current_sequence_number { // If the caller provided `current_sequence_number`, // check the validity of the bucket. Some(current_sequence_number) => { if bucket.is_valid(current_sequence_number) { return Ok(Some(probe_index)); } continue; } None => return Ok(Some(probe_index)), } } continue; } // If we found an empty bucket, it means that there is no // chance of our element existing in the hash set. None => { return Ok(None); } } } Ok(None) } pub fn find_element( &self, value: &BigUint, current_sequence_number: Option<usize>, ) -> Result<Option<(&HashSetCell, usize)>, HashSetError> { let index = self.find_element_index(value, current_sequence_number)?; match index { Some(index) => { let bucket = self.get_bucket(index).unwrap(); match bucket { Some(bucket) => Ok(Some((bucket, index))), None => Ok(None), } } None => Ok(None), } } pub fn find_element_mut( &mut self, value: &BigUint, current_sequence_number: Option<usize>, ) -> Result<Option<(&mut HashSetCell, usize)>, HashSetError> { let index = self.find_element_index(value, current_sequence_number)?; match index { Some(index) => { let bucket = self.get_bucket_mut(index).unwrap(); match bucket { Some(bucket) => Ok(Some((bucket, index))), None => Ok(None), } } None => Ok(None), } } /// find_element_iter iterates over a fixed range of elements /// in the hash set. /// We always have to iterate over the whole range /// to make sure that the value is not in the hash-set. /// Returns the position of the first free value. pub fn find_element_iter( &mut self, value: &BigUint, current_sequence_number: usize, start_iter: usize, num_iterations: usize, ) -> Result<Option<(usize, bool)>, HashSetError> { let mut first_free_element: Option<(usize, bool)> = None; for i in start_iter..start_iter + num_iterations { let probe_index = self.probe_index(value, i); let bucket = self.get_bucket(probe_index).unwrap(); match bucket { Some(bucket) => { let is_valid = bucket.is_valid(current_sequence_number); if first_free_element.is_none() && !is_valid { first_free_element = Some((probe_index, false)); } if is_valid && &bucket.value_biguint() == value { return Err(HashSetError::ElementAlreadyExists); } else { continue; } } None => { // A previous bucket could have been freed already even // though the whole hash set has not been used yet. if first_free_element.is_none() { first_free_element = Some((probe_index, true)); } // Since we encountered an empty bucket we know for sure // that the element is not in a bucket with higher probe // index. break; } } } Ok(first_free_element) } /// Returns a first available element. pub fn first( &self, current_sequence_number: usize, ) -> Result<Option<&HashSetCell>, HashSetError> { for i in 0..self.capacity { // PANICS: The loop ensures the bounds. let bucket = self.get_bucket(i).unwrap(); if let Some(bucket) = bucket { if bucket.is_valid(current_sequence_number) { return Ok(Some(bucket)); } } } Ok(None) } /// Returns a first available element that does not have a sequence number. pub fn first_no_seq(&self) -> Result<Option<(HashSetCell, u16)>, HashSetError> { for i in 0..self.capacity { // PANICS: The loop ensures the bounds. let bucket = self.get_bucket(i).unwrap(); if let Some(bucket) = bucket { if bucket.sequence_number.is_none() { return Ok(Some((*bucket, i as u16))); } } } Ok(None) } /// Checks if the hash set contains a value. pub fn contains( &self, value: &BigUint, sequence_number: Option<usize>, ) -> Result<bool, HashSetError> { let element = self.find_element(value, sequence_number)?; Ok(element.is_some()) } /// Marks the given element with a given sequence number. pub fn mark_with_sequence_number( &mut self, index: usize, sequence_number: usize, ) -> Result<(), HashSetError> { let sequence_threshold = self.sequence_threshold; let element = self .get_bucket_mut(index) .ok_or(HashSetError::ElementDoesNotExist)?; match element { Some(element) => { element.sequence_number = Some(sequence_number + sequence_threshold); Ok(()) } None => Err(HashSetError::ElementDoesNotExist), } } /// Returns an iterator over elements. pub fn iter(&self) -> HashSetIterator { HashSetIterator { hash_set: self, current: 0, } } } impl Drop for HashSet { fn drop(&mut self) { // SAFETY: As long as `next_value_index`, `capacity_indices` and // `capacity_values` are correct, this deallocaion is safe. unsafe { let layout = Layout::array::<Option<HashSetCell>>(self.capacity).unwrap(); alloc::dealloc(self.buckets.as_ptr() as *mut u8, layout); } } } impl PartialEq for HashSet { fn eq(&self, other: &Self) -> bool { self.capacity.eq(&other.capacity) && self.sequence_threshold.eq(&other.sequence_threshold) && self.iter().eq(other.iter()) } } pub struct HashSetIterator<'a> { hash_set: &'a HashSet, current: usize, } impl<'a> Iterator for HashSetIterator<'a> { type Item = (usize, &'a HashSetCell); fn next(&mut self) -> Option<Self::Item> { while self.current < self.hash_set.get_capacity() { let element_index = self.current; self.current += 1; if let Some(Some(cur_element)) = self.hash_set.get_bucket(element_index) { return Some((element_index, cur_element)); } } None } } #[cfg(test)] mod test { use ark_bn254::Fr; use ark_ff::UniformRand; use rand::{thread_rng, Rng}; use crate::zero_copy::HashSetZeroCopy; use super::*; #[test] fn test_is_valid() { let mut rng = thread_rng(); let cell = HashSetCell { value: [0u8; 32], sequence_number: None, }; // It should be always valid, no matter the sequence number. assert_eq!(cell.is_valid(0), true); for _ in 0..100 { let seq: usize = rng.gen(); assert_eq!(cell.is_valid(seq), true); } let cell = HashSetCell { value: [0u8; 32], sequence_number: Some(2400), }; // Sequence numbers up to 2400 should succeed. for i in 0..2400 { assert_eq!(cell.is_valid(i), true); } for i in 2400..10000 { assert_eq!(cell.is_valid(i), false); } } /// Manual test cases. A simple check whether basic properties of the hash /// set work. #[test] fn test_hash_set_manual() { let mut hs = HashSet::new(256, 4).unwrap(); // Insert an element and immediately mark it with a sequence number. // An equivalent to a single insertion in Light Protocol let element_1_1 = 1.to_biguint().unwrap(); let index_1_1 = hs.insert(&element_1_1, 0).unwrap(); hs.mark_with_sequence_number(index_1_1, 1).unwrap(); // Check if element exists in the set. assert_eq!(hs.contains(&element_1_1, Some(1)).unwrap(), true); // Try inserting the same element, even though we didn't reach the // threshold. assert!(matches!( hs.insert(&element_1_1, 1), Err(HashSetError::ElementAlreadyExists) )); // Insert multiple elements and mark them with one sequence number. // An equivalent to a batched insertion in Light Protocol. let element_2_3 = 3.to_biguint().unwrap(); let element_2_6 = 6.to_biguint().unwrap(); let element_2_8 = 8.to_biguint().unwrap(); let element_2_9 = 9.to_biguint().unwrap(); let index_2_3 = hs.insert(&element_2_3, 1).unwrap(); let index_2_6 = hs.insert(&element_2_6, 1).unwrap(); let index_2_8 = hs.insert(&element_2_8, 1).unwrap(); let index_2_9 = hs.insert(&element_2_9, 1).unwrap(); assert_eq!(hs.contains(&element_2_3, Some(2)).unwrap(), true); assert_eq!(hs.contains(&element_2_6, Some(2)).unwrap(), true); assert_eq!(hs.contains(&element_2_8, Some(2)).unwrap(), true); assert_eq!(hs.contains(&element_2_9, Some(2)).unwrap(), true); hs.mark_with_sequence_number(index_2_3, 2).unwrap(); hs.mark_with_sequence_number(index_2_6, 2).unwrap(); hs.mark_with_sequence_number(index_2_8, 2).unwrap(); hs.mark_with_sequence_number(index_2_9, 2).unwrap(); assert!(matches!( hs.insert(&element_2_3, 2), Err(HashSetError::ElementAlreadyExists) )); assert!(matches!( hs.insert(&element_2_6, 2), Err(HashSetError::ElementAlreadyExists) )); assert!(matches!( hs.insert(&element_2_8, 2), Err(HashSetError::ElementAlreadyExists) )); assert!(matches!( hs.insert(&element_2_9, 2), Err(HashSetError::ElementAlreadyExists) )); let element_3_11 = 11.to_biguint().unwrap(); let element_3_13 = 13.to_biguint().unwrap(); let element_3_21 = 21.to_biguint().unwrap(); let element_3_29 = 29.to_biguint().unwrap(); let index_3_11 = hs.insert(&element_3_11, 2).unwrap(); let index_3_13 = hs.insert(&element_3_13, 2).unwrap(); let index_3_21 = hs.insert(&element_3_21, 2).unwrap(); let index_3_29 = hs.insert(&element_3_29, 2).unwrap(); assert_eq!(hs.contains(&element_3_11, Some(3)).unwrap(), true); assert_eq!(hs.contains(&element_3_13, Some(3)).unwrap(), true); assert_eq!(hs.contains(&element_3_21, Some(3)).unwrap(), true); assert_eq!(hs.contains(&element_3_29, Some(3)).unwrap(), true); hs.mark_with_sequence_number(index_3_11, 3).unwrap(); hs.mark_with_sequence_number(index_3_13, 3).unwrap(); hs.mark_with_sequence_number(index_3_21, 3).unwrap(); hs.mark_with_sequence_number(index_3_29, 3).unwrap(); assert!(matches!( hs.insert(&element_3_11, 3), Err(HashSetError::ElementAlreadyExists) )); assert!(matches!( hs.insert(&element_3_13, 3), Err(HashSetError::ElementAlreadyExists) )); assert!(matches!( hs.insert(&element_3_21, 3), Err(HashSetError::ElementAlreadyExists) )); assert!(matches!( hs.insert(&element_3_29, 3), Err(HashSetError::ElementAlreadyExists) )); let element_4_93 = 93.to_biguint().unwrap(); let element_4_65 = 64.to_biguint().unwrap(); let element_4_72 = 72.to_biguint().unwrap(); let element_4_15 = 15.to_biguint().unwrap(); let index_4_93 = hs.insert(&element_4_93, 3).unwrap(); let index_4_65 = hs.insert(&element_4_65, 3).unwrap(); let index_4_72 = hs.insert(&element_4_72, 3).unwrap(); let index_4_15 = hs.insert(&element_4_15, 3).unwrap(); assert_eq!(hs.contains(&element_4_93, Some(4)).unwrap(), true); assert_eq!(hs.contains(&element_4_65, Some(4)).unwrap(), true); assert_eq!(hs.contains(&element_4_72, Some(4)).unwrap(), true); assert_eq!(hs.contains(&element_4_15, Some(4)).unwrap(), true); hs.mark_with_sequence_number(index_4_93, 4).unwrap(); hs.mark_with_sequence_number(index_4_65, 4).unwrap(); hs.mark_with_sequence_number(index_4_72, 4).unwrap(); hs.mark_with_sequence_number(index_4_15, 4).unwrap(); // Try inserting the same elements we inserted before. // // Ones with the sequence number difference lower or equal to the // sequence threshold (4) will fail. // // Ones with the higher dif will succeed. assert!(matches!( hs.insert(&element_1_1, 4), Err(HashSetError::ElementAlreadyExists) )); assert!(matches!( hs.insert(&element_2_3, 5), Err(HashSetError::ElementAlreadyExists) )); assert!(matches!( hs.insert(&element_2_6, 5), Err(HashSetError::ElementAlreadyExists) )); assert!(matches!( hs.insert(&element_2_8, 5), Err(HashSetError::ElementAlreadyExists) )); assert!(matches!( hs.insert(&element_2_9, 5), Err(HashSetError::ElementAlreadyExists) )); hs.insert(&element_1_1, 5).unwrap(); hs.insert(&element_2_3, 6).unwrap(); hs.insert(&element_2_6, 6).unwrap(); hs.insert(&element_2_8, 6).unwrap(); hs.insert(&element_2_9, 6).unwrap(); } /// Test cases with random prime field elements. #[test] fn test_hash_set_random() { let mut hs = HashSet::new(6857, 2400).unwrap(); // The hash set should be empty. assert_eq!(hs.first(0).unwrap(), None); let mut rng = thread_rng(); let mut seq = 0; let nullifiers: [BigUint; 24000] = std::array::from_fn(|_| BigUint::from(Fr::rand(&mut rng))); for nf_chunk in nullifiers.chunks(2400) { for nullifier in nf_chunk.iter() { assert_eq!(hs.contains(&nullifier, Some(seq)).unwrap(), false); let index = hs.insert(&nullifier, seq as usize).unwrap(); assert_eq!(hs.contains(&nullifier, Some(seq)).unwrap(), true); let nullifier_bytes = bigint_to_be_bytes_array(&nullifier).unwrap(); let element = hs .find_element(&nullifier, Some(seq)) .unwrap() .unwrap() .0 .clone(); assert_eq!( element, HashSetCell { value: bigint_to_be_bytes_array(&nullifier).unwrap(), sequence_number: None, } ); assert_eq!(element.value_bytes(), nullifier_bytes); assert_eq!(&element.value_biguint(), nullifier); assert_eq!(element.sequence_number(), None); assert!(!element.is_marked()); assert!(element.is_valid(seq)); hs.mark_with_sequence_number(index, seq).unwrap(); let element = hs .find_element(&nullifier, Some(seq)) .unwrap() .unwrap() .0 .clone(); assert_eq!( element, HashSetCell { value: nullifier_bytes, sequence_number: Some(2400 + seq) } ); assert_eq!(element.value_bytes(), nullifier_bytes); assert_eq!(&element.value_biguint(), nullifier); assert_eq!(element.sequence_number(), Some(2400 + seq)); assert!(element.is_marked()); assert!(element.is_valid(seq)); // Trying to insert the same nullifier, before reaching the // sequence threshold, should fail. assert!(matches!( hs.insert(&nullifier, seq as usize + 2399), Err(HashSetError::ElementAlreadyExists), )); seq += 1; } seq += 2400; } } fn hash_set_from_bytes_copy< const CAPACITY: usize, const SEQUENCE_THRESHOLD: usize, const OPERATIONS: usize, >() { let mut hs_1 = HashSet::new(CAPACITY, SEQUENCE_THRESHOLD).unwrap(); let mut rng = thread_rng(); // Create a buffer with random bytes. let mut bytes = vec![0u8; HashSet::size_in_account(CAPACITY)]; rng.fill(bytes.as_mut_slice()); // Initialize a hash set on top of a byte slice. { let mut hs_2 = unsafe { HashSetZeroCopy::from_bytes_zero_copy_init(&mut bytes, CAPACITY, SEQUENCE_THRESHOLD) .unwrap() }; for seq in 0..OPERATIONS { let value = BigUint::from(Fr::rand(&mut rng)); hs_1.insert(&value, seq).unwrap(); hs_2.insert(&value, seq).unwrap(); } assert_eq!(hs_1, *hs_2); } // Create a copy on top of a byte slice. { let hs_2 = unsafe { HashSet::from_bytes_copy(&mut bytes).unwrap() }; assert_eq!(hs_1, hs_2); } } #[test] fn test_hash_set_from_bytes_copy_6857_2400_3600() { hash_set_from_bytes_copy::<6857, 2400, 3600>() } #[test] fn test_hash_set_from_bytes_copy_9601_2400_5000() { hash_set_from_bytes_copy::<9601, 2400, 5000>() } fn hash_set_full<const CAPACITY: usize, const SEQUENCE_THRESHOLD: usize>() { for _ in 0..100 { let mut hs = HashSet::new(CAPACITY, SEQUENCE_THRESHOLD).unwrap(); let mut rng = rand::thread_rng(); // Insert as many values as possible. The important point is to // encounter the `HashSetError::Full` at some point for i in 0..CAPACITY { let value = BigUint::from(Fr::rand(&mut rng)); match hs.insert(&value, 0) { Ok(index) => hs.mark_with_sequence_number(index, 0).unwrap(), Err(e) => { assert!(matches!(e, HashSetError::Full)); println!("initial insertions: {i}: failed, stopping"); break; } } } // Keep inserting. It should mostly fail, although there might be // also some successful insertions - there might be values which // will end up in unused buckets. for i in 0..1000 { let value = BigUint::from(Fr::rand(&mut rng)); let res = hs.insert(&value, 0); if res.is_err() { assert!(matches!(res, Err(HashSetError::Full))); } else { println!("secondary insertions: {i}: apparent success with value: {value:?}"); } } // Try again with defined sequence numbers, but still too small to // vacate any cell. for i in 0..1000 { let value = BigUint::from(Fr::rand(&mut rng)); // Sequence numbers lower than the threshold should not vacate // any cell. let sequence_number = rng.gen_range(0..hs.sequence_threshold); let res = hs.insert(&value, sequence_number); if res.is_err() { assert!(matches!(res, Err(HashSetError::Full))); } else { println!("tertiary insertions: {i}: surprising success with value: {value:?}"); } } // Use sequence numbers which are going to vacate cells. All // insertions should be successful now. for i in 0..CAPACITY { let value = BigUint::from(Fr::rand(&mut rng)); if let Err(e) = hs.insert(&value, SEQUENCE_THRESHOLD + i) { assert!(matches!(e, HashSetError::Full)); println!("insertions after fillup: {i}: failed, stopping"); break; } } } } #[test] fn test_hash_set_full_6857_2400() { hash_set_full::<6857, 2400>() } #[test] fn test_hash_set_full_9601_2400() { hash_set_full::<9601, 2400>() } #[test] fn test_hash_set_element_does_not_exist() { let mut hs = HashSet::new(4800, 2400).unwrap(); let mut rng = thread_rng(); for _ in 0..1000 { let index = rng.gen_range(0..4800); // Assert `ElementDoesNotExist` error. let res = hs.mark_with_sequence_number(index, 0); assert!(matches!(res, Err(HashSetError::ElementDoesNotExist))); } for _ in 0..1000 { // After actually appending the value, the same operation should be // possible let value = BigUint::from(Fr::rand(&mut rng)); let index = hs.insert(&value, 0).unwrap(); hs.mark_with_sequence_number(index, 1).unwrap(); } } #[test] fn test_hash_set_iter_manual() { let mut hs = HashSet::new(6857, 2400).unwrap(); let nullifier_1 = 945635_u32.to_biguint().unwrap(); let nullifier_2 = 3546656654734254353455_u128.to_biguint().unwrap(); let nullifier_3 = 543543656564_u64.to_biguint().unwrap(); let nullifier_4 = 43_u8.to_biguint().unwrap(); let nullifier_5 = 0_u8.to_biguint().unwrap(); let nullifier_6 = 65423_u32.to_biguint().unwrap(); let nullifier_7 = 745654665_u32.to_biguint().unwrap(); let nullifier_8 = 97664353453465354645645465_u128.to_biguint().unwrap(); let nullifier_9 = 453565465464565635475_u128.to_biguint().unwrap(); let nullifier_10 = 543645654645_u64.to_biguint().unwrap(); hs.insert(&nullifier_1, 0).unwrap(); hs.insert(&nullifier_2, 0).unwrap(); hs.insert(&nullifier_3, 0).unwrap(); hs.insert(&nullifier_4, 0).unwrap(); hs.insert(&nullifier_5, 0).unwrap(); hs.insert(&nullifier_6, 0).unwrap(); hs.insert(&nullifier_7, 0).unwrap(); hs.insert(&nullifier_8, 0).unwrap(); hs.insert(&nullifier_9, 0).unwrap(); hs.insert(&nullifier_10, 0).unwrap(); let inserted_nullifiers = hs .iter() .map(|(_, nullifier)| nullifier.value_biguint()) .collect::<Vec<_>>(); assert_eq!(inserted_nullifiers.len(), 10); assert_eq!(inserted_nullifiers[0], nullifier_7); assert_eq!(inserted_nullifiers[1], nullifier_3); assert_eq!(inserted_nullifiers[2], nullifier_10); assert_eq!(inserted_nullifiers[3], nullifier_1); assert_eq!(inserted_nullifiers[4], nullifier_8); assert_eq!(inserted_nullifiers[5], nullifier_5); assert_eq!(inserted_nullifiers[6], nullifier_4); assert_eq!(inserted_nullifiers[7], nullifier_2); assert_eq!(inserted_nullifiers[8], nullifier_9); assert_eq!(inserted_nullifiers[9], nullifier_6); } fn hash_set_iter_random< const INSERTIONS: usize, const CAPACITY: usize, const SEQUENCE_THRESHOLD: usize, >() { let mut hs = HashSet::new(CAPACITY, SEQUENCE_THRESHOLD).unwrap(); let mut rng = thread_rng(); let nullifiers: [BigUint; INSERTIONS] = std::array::from_fn(|_| BigUint::from(Fr::rand(&mut rng))); for nullifier in nullifiers.iter() { hs.insert(&nullifier, 0).unwrap(); } let mut sorted_nullifiers = nullifiers.iter().collect::<Vec<_>>(); let mut inserted_nullifiers = hs .iter() .map(|(_, nullifier)| nullifier.value_biguint()) .collect::<Vec<_>>(); sorted_nullifiers.sort(); inserted_nullifiers.sort(); let inserted_nullifiers = inserted_nullifiers.iter().collect::<Vec<&BigUint>>(); assert_eq!(inserted_nullifiers.len(), INSERTIONS); assert_eq!(sorted_nullifiers.as_slice(), inserted_nullifiers.as_slice()); } #[test] fn test_hash_set_iter_random_6857_2400() { hash_set_iter_random::<3500, 6857, 2400>() } #[test] fn test_hash_set_iter_random_9601_2400() { hash_set_iter_random::<5000, 9601, 2400>() } #[test] fn test_hash_set_get_bucket() { let mut hs = HashSet::new(6857, 2400).unwrap(); for i in 0..3600 { let bn_i = i.to_biguint().unwrap(); hs.insert(&bn_i, i).unwrap(); } let mut unused_indices = vec![true; 6857]; for i in 0..3600 { let bn_i = i.to_biguint().unwrap(); let i = hs.find_element_index(&bn_i, None).unwrap().unwrap(); let element = hs.get_bucket(i).unwrap().unwrap(); assert_eq!(element.value_biguint(), bn_i); unused_indices[i] = false; } // Unused cells within the capacity should be `Some(None)`. for i in unused_indices.iter().enumerate() { if *i.1 { assert!(hs.get_bucket(i.0).unwrap().is_none()); } } // Cells over the capacity should be `None`. for i in 6857..10_000 { assert!(hs.get_bucket(i).is_none()); } } #[test] fn test_hash_set_get_bucket_mut() { let mut hs = HashSet::new(6857, 2400).unwrap(); for i in 0..3600 { let bn_i = i.to_biguint().unwrap(); hs.insert(&bn_i, i).unwrap(); } let mut unused_indices = vec![false; 6857]; for i in 0..3600 { let bn_i = i.to_biguint().unwrap(); let i = hs.find_element_index(&bn_i, None).unwrap().unwrap(); let element = hs.get_bucket_mut(i).unwrap(); assert_eq!(element.unwrap().value_biguint(), bn_i); unused_indices[i] = true; // "Nullify" the element. *element = Some(HashSetCell { value: [0_u8; 32], sequence_number: None, }); } for (i, is_used) in unused_indices.iter().enumerate() { if *is_used { let element = hs.get_bucket_mut(i).unwrap().unwrap(); assert_eq!(element.value_bytes(), [0_u8; 32]); } } // Unused cells within the capacity should be `Some(None)`. for (i, is_used) in unused_indices.iter().enumerate() { if !*is_used { assert!(hs.get_bucket_mut(i).unwrap().is_none()); } } // Cells over the capacity should be `None`. for i in 6857..10_000 { assert!(hs.get_bucket_mut(i).is_none()); } } #[test] fn test_hash_set_get_unmarked_bucket() { let mut hs = HashSet::new(6857, 2400).unwrap(); // Insert incremental elements, so they end up being in the same // sequence in the hash set. (0..3600).for_each(|i| { let bn_i = i.to_biguint().unwrap(); hs.insert(&bn_i, i).unwrap(); }); for i in 0..3600 { let i = hs .find_element_index(&i.to_biguint().unwrap(), None) .unwrap() .unwrap(); let element = hs.get_unmarked_bucket(i); assert!(element.is_some()); } // Mark the elements. for i in 0..3600 { let index = hs .find_element_index(&i.to_biguint().unwrap(), None) .unwrap() .unwrap(); hs.mark_with_sequence_number(index, i).unwrap(); } for i in 0..3600 { let i = hs .find_element_index(&i.to_biguint().unwrap(), None) .unwrap() .unwrap(); let element = hs.get_unmarked_bucket(i); assert!(element.is_none()); } } #[test] fn test_hash_set_first_no_seq() { let mut hs = HashSet::new(6857, 2400).unwrap(); // Insert incremental elements, so they end up being in the same // sequence in the hash set. for i in 0..3600 { let bn_i = i.to_biguint().unwrap(); hs.insert(&bn_i, i).unwrap(); let element = hs.first_no_seq().unwrap().unwrap(); assert_eq!(element.0.value_biguint(), 0.to_biguint().unwrap()); } } }
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/indexed/Cargo.toml
[package] name = "light-indexed-merkle-tree" version = "1.1.0" description = "Implementation of indexed (and concurrent) Merkle tree in Rust" repository = "https://github.com/Lightprotocol/light-protocol" license = "Apache-2.0" edition = "2021" [features] solana = [ "light-concurrent-merkle-tree/solana", "solana-program" ] [dependencies] borsh = { version = "0.10" } light-bounded-vec = { path = "../bounded-vec", version = "1.1.0" } light-hasher = { path = "../hasher", version = "1.1.0" } light-concurrent-merkle-tree = { path = "../concurrent", version = "1.1.0" } light-merkle-tree-reference = { path = "../reference", version = "1.1.0" } light-utils = { path = "../../utils", version = "1.1.0" } memoffset = "0.9" num-bigint = "0.4" num-traits = "0.2" solana-program = { workspace = true, optional = true } thiserror = "1.0" [dev-dependencies] light-hash-set = { workspace = true } thiserror = "1.0" rand = "0.8" hex = "0.4" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0"
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/indexed
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/indexed/tests/tests.rs
use std::cell::{Ref, RefCell, RefMut}; use light_bounded_vec::BoundedVec; use light_concurrent_merkle_tree::{ errors::ConcurrentMerkleTreeError, event::IndexedMerkleTreeUpdate, light_hasher::{Hasher, Poseidon}, }; use light_hash_set::{HashSet, HashSetError}; use light_indexed_merkle_tree::{ array::{IndexedArray, IndexedElement}, errors::IndexedMerkleTreeError, reference, IndexedMerkleTree, HIGHEST_ADDRESS_PLUS_ONE, }; use light_utils::bigint::bigint_to_be_bytes_array; use num_bigint::{BigUint, RandBigInt, ToBigUint}; use num_traits::{FromBytes, Num}; use rand::thread_rng; use thiserror::Error; const MERKLE_TREE_HEIGHT: usize = 4; const MERKLE_TREE_CHANGELOG: usize = 256; const MERKLE_TREE_ROOTS: usize = 1024; const MERKLE_TREE_CANOPY: usize = 0; const MERKLE_TREE_INDEXED_CHANGELOG: usize = 64; const NET_HEIGHT: usize = MERKLE_TREE_HEIGHT - MERKLE_TREE_CANOPY; const QUEUE_ELEMENTS: usize = 1024; const SAFETY_MARGIN: usize = 10; const NR_NULLIFIERS: usize = 2; /// A mock function which imitates a Merkle tree program instruction for /// inserting nullifiers into the queue. fn program_insert<H>( // PDA mut queue: RefMut<'_, HashSet>, merkle_tree: Ref<'_, IndexedMerkleTree<H, usize, MERKLE_TREE_HEIGHT, NET_HEIGHT>>, // Instruction data nullifiers: [[u8; 32]; NR_NULLIFIERS], ) -> Result<(), HashSetError> where H: Hasher, { for i in 0..NR_NULLIFIERS { let nullifier = BigUint::from_be_bytes(nullifiers[i].as_slice()); queue.insert(&nullifier, merkle_tree.sequence_number())?; } Ok(()) } #[derive(Error, Debug)] enum RelayerUpdateError { #[error("Updating Merkle tree failed, {0:?}")] MerkleTreeUpdate(Vec<IndexedMerkleTreeError>), } /// A mock function which imitates a Merkle tree program instruction for /// inserting nullifiers from the queue to the tree. fn program_update<H>( // PDAs queue: &mut RefMut<'_, HashSet>, merkle_tree: &mut RefMut<'_, IndexedMerkleTree<H, usize, MERKLE_TREE_HEIGHT, NET_HEIGHT>>, // Instruction data changelog_index: u16, indexed_changelog_index: u16, queue_index: u16, low_nullifier: IndexedElement<usize>, low_nullifier_next_value: &BigUint, low_nullifier_proof: &mut BoundedVec<[u8; 32]>, ) -> Result<IndexedMerkleTreeUpdate<usize>, IndexedMerkleTreeError> where H: Hasher, { // Get the nullifier from the queue. let nullifier = queue .get_unmarked_bucket(queue_index as usize) .unwrap() .unwrap(); // Update the Merkle tree. let update = merkle_tree.update( usize::from(changelog_index), usize::from(indexed_changelog_index), nullifier.value_biguint(), low_nullifier.clone(), low_nullifier_next_value.clone(), low_nullifier_proof, )?; // Mark the nullifier. queue .mark_with_sequence_number(queue_index as usize, merkle_tree.sequence_number()) .unwrap(); Ok(update) } // TODO: unify these helpers with MockIndexer /// A mock function which imitates a relayer endpoint for updating the /// nullifier Merkle tree. fn relayer_update<H>( // PDAs queue: &mut RefMut<'_, HashSet>, merkle_tree: &mut RefMut<'_, IndexedMerkleTree<H, usize, MERKLE_TREE_HEIGHT, NET_HEIGHT>>, ) -> Result<(), RelayerUpdateError> where H: Hasher, { let mut relayer_indexing_array = IndexedArray::<H, usize>::default(); let mut relayer_merkle_tree = reference::IndexedMerkleTree::<H, usize>::new(MERKLE_TREE_HEIGHT, MERKLE_TREE_CANOPY) .unwrap(); let mut update_errors: Vec<IndexedMerkleTreeError> = Vec::new(); let queue_indices = queue.iter().map(|(index, _)| index).collect::<Vec<_>>(); for queue_index in queue_indices { let changelog_index = merkle_tree.changelog_index(); let indexed_changelog_index = merkle_tree.indexed_changelog_index(); let queue_element = queue.get_unmarked_bucket(queue_index).unwrap().unwrap(); // Create new element from the dequeued value. let (old_low_nullifier, old_low_nullifier_next_value) = relayer_indexing_array .find_low_element_for_nonexistent(&queue_element.value_biguint()) .unwrap(); let nullifier_bundle = relayer_indexing_array .new_element_with_low_element_index( old_low_nullifier.index, &queue_element.value_biguint(), ) .unwrap(); let mut low_nullifier_proof = relayer_merkle_tree .get_proof_of_leaf(usize::from(old_low_nullifier.index), false) .unwrap(); // Update on-chain tree. let update_successful = match program_update( queue, merkle_tree, changelog_index as u16, indexed_changelog_index as u16, queue_index as u16, old_low_nullifier, &old_low_nullifier_next_value, &mut low_nullifier_proof, ) { Ok(event) => { assert_eq!( event.new_low_element.index, nullifier_bundle.new_low_element.index ); assert_eq!( event.new_low_element.next_index, nullifier_bundle.new_low_element.next_index ); assert_eq!( event.new_low_element.value, bigint_to_be_bytes_array::<32>(&nullifier_bundle.new_low_element.value) .unwrap() ); assert_eq!( event.new_low_element.next_value, bigint_to_be_bytes_array::<32>(&nullifier_bundle.new_element.value).unwrap() ); let leaf_hash = nullifier_bundle .new_low_element .hash::<H>(&nullifier_bundle.new_element.value) .unwrap(); assert_eq!(event.new_low_element_hash, leaf_hash); let leaf_hash = nullifier_bundle .new_element .hash::<H>(&nullifier_bundle.new_element_next_value) .unwrap(); assert_eq!(event.new_high_element_hash, leaf_hash); assert_eq!( event.new_high_element.index, nullifier_bundle.new_element.index ); assert_eq!( event.new_high_element.next_index, nullifier_bundle.new_element.next_index ); assert_eq!( event.new_high_element.value, bigint_to_be_bytes_array::<32>(&nullifier_bundle.new_element.value).unwrap() ); assert_eq!( event.new_high_element.next_value, bigint_to_be_bytes_array::<32>(&nullifier_bundle.new_element_next_value) .unwrap() ); true } Err(e) => { update_errors.push(e); false } }; // Check if the on-chain Merkle tree was really updated. if update_successful { // Update off-chain tree. relayer_merkle_tree .update( &nullifier_bundle.new_low_element, &nullifier_bundle.new_element, &nullifier_bundle.new_element_next_value, ) .unwrap(); let low_nullifier_leaf = nullifier_bundle .new_low_element .hash::<H>(&nullifier_bundle.new_element.value) .unwrap(); let low_nullifier_proof = relayer_merkle_tree .get_proof_of_leaf(nullifier_bundle.new_low_element.index(), false) .unwrap(); merkle_tree .validate_proof( &low_nullifier_leaf, nullifier_bundle.new_low_element.index(), &low_nullifier_proof, ) .unwrap(); let new_nullifier_leaf = nullifier_bundle .new_element .hash::<H>(&nullifier_bundle.new_element_next_value) .unwrap(); let new_nullifier_proof = relayer_merkle_tree .get_proof_of_leaf(nullifier_bundle.new_element.index(), false) .unwrap(); merkle_tree .validate_proof( &new_nullifier_leaf, nullifier_bundle.new_element.index(), &new_nullifier_proof, ) .unwrap(); // Insert the element to the indexing array. relayer_indexing_array .append_with_low_element_index( nullifier_bundle.new_low_element.index, &nullifier_bundle.new_element.value, ) .unwrap(); } } if update_errors.is_empty() { Ok(()) } else { Err(RelayerUpdateError::MerkleTreeUpdate(update_errors)) } } /// Tests the valid case of: /// /// * Inserting nullifiers to the queue. /// * Calling the relayer to update the on-chain nullifier Merkle tree. fn insert_and_update<H>() where H: Hasher, { // On-chain PDAs. let onchain_queue: RefCell<HashSet> = RefCell::new(HashSet::new(QUEUE_ELEMENTS, MERKLE_TREE_ROOTS + SAFETY_MARGIN).unwrap()); let onchain_tree: RefCell<IndexedMerkleTree<H, usize, MERKLE_TREE_HEIGHT, NET_HEIGHT>> = RefCell::new( IndexedMerkleTree::new( MERKLE_TREE_HEIGHT, MERKLE_TREE_CHANGELOG, MERKLE_TREE_ROOTS, MERKLE_TREE_CANOPY, MERKLE_TREE_INDEXED_CHANGELOG, ) .unwrap(), ); onchain_tree.borrow_mut().init().unwrap(); // Insert a pair of nullifiers. let nullifier1 = 30_u32.to_biguint().unwrap(); let nullifier2 = 10_u32.to_biguint().unwrap(); program_insert::<H>( onchain_queue.borrow_mut(), onchain_tree.borrow(), [ bigint_to_be_bytes_array(&nullifier1).unwrap(), bigint_to_be_bytes_array(&nullifier2).unwrap(), ], ) .unwrap(); // Insert an another pair of nullifiers. let nullifier3 = 20_u32.to_biguint().unwrap(); let nullifier4 = 50_u32.to_biguint().unwrap(); program_insert::<H>( onchain_queue.borrow_mut(), onchain_tree.borrow(), [ bigint_to_be_bytes_array(&nullifier3).unwrap(), bigint_to_be_bytes_array(&nullifier4).unwrap(), ], ) .unwrap(); // Call relayer to update the tree. relayer_update::<H>( &mut onchain_queue.borrow_mut(), &mut onchain_tree.borrow_mut(), ) .unwrap(); } #[test] pub fn test_insert_and_update_poseidon() { insert_and_update::<Poseidon>() } /// Tests the invalid case of inserting the same nullifiers multiple times into /// the queue and Merkle tree - an attempt of double spending. fn double_spend<H>() where H: Hasher, { // On-chain PDAs. let onchain_queue: RefCell<HashSet> = RefCell::new(HashSet::new(20, 0).unwrap()); let onchain_tree: RefCell<IndexedMerkleTree<H, usize, MERKLE_TREE_HEIGHT, NET_HEIGHT>> = RefCell::new( IndexedMerkleTree::new( MERKLE_TREE_HEIGHT, MERKLE_TREE_CHANGELOG, MERKLE_TREE_ROOTS, MERKLE_TREE_CANOPY, MERKLE_TREE_INDEXED_CHANGELOG, ) .unwrap(), ); onchain_tree.borrow_mut().init().unwrap(); // Insert a pair of nulifiers. let nullifier1 = 30_u32.to_biguint().unwrap(); let nullifier1: [u8; 32] = bigint_to_be_bytes_array(&nullifier1).unwrap(); let nullifier2 = 10_u32.to_biguint().unwrap(); let nullifier2: [u8; 32] = bigint_to_be_bytes_array(&nullifier2).unwrap(); program_insert::<H>( onchain_queue.borrow_mut(), onchain_tree.borrow(), [nullifier1, nullifier2], ) .unwrap(); // Try inserting the same pair into the queue. It should fail with an error. let res = program_insert::<H>( onchain_queue.borrow_mut(), onchain_tree.borrow(), [nullifier1, nullifier2], ); assert!(matches!(res, Err(HashSetError::ElementAlreadyExists))); // Update the on-chain tree (so it contains the nullifiers we inserted). relayer_update::<H>( &mut onchain_queue.borrow_mut(), &mut onchain_tree.borrow_mut(), ) .unwrap(); // The nullifiers are in the tree and not in the queue anymore. We can try // our luck with double-spending again. program_insert::<H>( onchain_queue.borrow_mut(), onchain_tree.borrow(), [nullifier1, nullifier2], ) .unwrap(); // At the same time, insert also some new nullifiers which aren't spent // yet. We want to make sure that they will be processed successfully and // only the invalid nullifiers will produce errors. let nullifier3 = 25_u32.to_biguint().unwrap(); let nullifier4 = 5_u32.to_biguint().unwrap(); program_insert::<H>( onchain_queue.borrow_mut(), onchain_tree.borrow(), [ bigint_to_be_bytes_array(&nullifier3).unwrap(), bigint_to_be_bytes_array(&nullifier4).unwrap(), ], ) .unwrap(); // We expect exactly two errors (for the invalid nullifiers). No more, no // less. let res = relayer_update::<H>( &mut onchain_queue.borrow_mut(), &mut onchain_tree.borrow_mut(), ); assert!(matches!(res, Err(RelayerUpdateError::MerkleTreeUpdate(_)))); } #[test] pub fn test_double_spend_queue_poseidon() { double_spend::<Poseidon>() } /// Try to insert a nullifier to the tree while pointing to an invalid low /// nullifier. /// /// Such invalid insertion needs to be performed manually, without relayer's /// help (which would always insert that nullifier correctly). fn insert_invalid_low_element<H>() where H: Hasher, { // On-chain PDAs. let onchain_queue: RefCell<HashSet> = RefCell::new(HashSet::new(QUEUE_ELEMENTS, MERKLE_TREE_ROOTS + SAFETY_MARGIN).unwrap()); let onchain_tree: RefCell<IndexedMerkleTree<H, usize, MERKLE_TREE_HEIGHT, NET_HEIGHT>> = RefCell::new( IndexedMerkleTree::new( MERKLE_TREE_HEIGHT, MERKLE_TREE_CHANGELOG, MERKLE_TREE_ROOTS, MERKLE_TREE_CANOPY, MERKLE_TREE_INDEXED_CHANGELOG, ) .unwrap(), ); onchain_tree.borrow_mut().init().unwrap(); // Local artifacts. let mut local_indexed_array = IndexedArray::<H, usize>::default(); let mut local_merkle_tree = reference::IndexedMerkleTree::<H, usize>::new(MERKLE_TREE_HEIGHT, MERKLE_TREE_CANOPY) .unwrap(); // Insert a pair of nullifiers, correctly. Just do it with relayer. let nullifier1 = 30_u32.to_biguint().unwrap(); let nullifier2 = 10_u32.to_biguint().unwrap(); onchain_queue .borrow_mut() .insert(&nullifier1, onchain_tree.borrow().sequence_number()) .unwrap(); onchain_queue .borrow_mut() .insert(&nullifier2, onchain_tree.borrow().sequence_number()) .unwrap(); let nullifier_bundle = local_indexed_array.append(&nullifier1).unwrap(); local_merkle_tree .update( &nullifier_bundle.new_low_element, &nullifier_bundle.new_element, &nullifier_bundle.new_element_next_value, ) .unwrap(); let nullifier_bundle = local_indexed_array.append(&nullifier2).unwrap(); local_merkle_tree .update( &nullifier_bundle.new_low_element, &nullifier_bundle.new_element, &nullifier_bundle.new_element_next_value, ) .unwrap(); relayer_update( &mut onchain_queue.borrow_mut(), &mut onchain_tree.borrow_mut(), ) .unwrap(); // Try inserting nullifier 20, while pointing to index 1 (value 30) as low // nullifier. Point to index 2 (value 10) as next value. // Therefore, the new element is lowe than the supposed low element. let nullifier3 = 20_u32.to_biguint().unwrap(); onchain_queue .borrow_mut() .insert(&nullifier3, onchain_tree.borrow().sequence_number()) .unwrap(); let changelog_index = onchain_tree.borrow().changelog_index(); let indexed_changelog_index = onchain_tree.borrow().indexed_changelog_index(); // Index of our new nullifier in the queue. let queue_index = onchain_queue .borrow() .find_element_index(&nullifier3, None) .unwrap() .unwrap(); // (Invalid) low nullifier. let low_nullifier = local_indexed_array.get(1).cloned().unwrap(); let low_nullifier_next_value = local_indexed_array .get(usize::from(low_nullifier.next_index)) .cloned() .unwrap() .value; let mut low_nullifier_proof = local_merkle_tree.get_proof_of_leaf(1, false).unwrap(); assert!(matches!( program_update( &mut onchain_queue.borrow_mut(), &mut onchain_tree.borrow_mut(), changelog_index as u16, indexed_changelog_index as u16, queue_index as u16, low_nullifier, &low_nullifier_next_value, &mut low_nullifier_proof, ), Err(IndexedMerkleTreeError::LowElementGreaterOrEqualToNewElement) )); // Try inserting nullifier 50, while pointing to index 0 as low nullifier. // Therefore, the new element is greate than next element. let nullifier3 = 50_u32.to_biguint().unwrap(); onchain_queue .borrow_mut() .insert(&nullifier3, onchain_tree.borrow().sequence_number()) .unwrap(); let changelog_index = onchain_tree.borrow().changelog_index(); let indexed_changelog_index = onchain_tree.borrow().indexed_changelog_index(); // Index of our new nullifier in the queue. let queue_index = onchain_queue .borrow() .find_element_index(&nullifier3, None) .unwrap() .unwrap(); // (Invalid) low nullifier. let low_nullifier = local_indexed_array.get(0).cloned().unwrap(); let low_nullifier_next_value = local_indexed_array .get(usize::from(low_nullifier.next_index)) .cloned() .unwrap() .value; let mut low_nullifier_proof = local_merkle_tree.get_proof_of_leaf(0, false).unwrap(); assert!(matches!( program_update( &mut onchain_queue.borrow_mut(), &mut onchain_tree.borrow_mut(), changelog_index as u16, indexed_changelog_index as u16, queue_index as u16, low_nullifier, &low_nullifier_next_value, &mut low_nullifier_proof, ), Err(IndexedMerkleTreeError::NewElementGreaterOrEqualToNextElement) )); let nullifier4 = 45_u32.to_biguint().unwrap(); onchain_queue .borrow_mut() .insert(&nullifier4, onchain_tree.borrow().sequence_number()) .unwrap(); let changelog_index = onchain_tree.borrow().changelog_index(); let indexed_changelog_index = onchain_tree.borrow().indexed_changelog_index(); let (low_nullifier, low_nullifier_next_value) = local_indexed_array .find_low_element_for_nonexistent(&nullifier4) .unwrap(); let mut low_nullifier_proof = local_merkle_tree .get_proof_of_leaf(low_nullifier.index(), false) .unwrap(); let result = program_update( &mut onchain_queue.borrow_mut(), &mut onchain_tree.borrow_mut(), changelog_index as u16, indexed_changelog_index as u16, queue_index as u16, low_nullifier, &low_nullifier_next_value, &mut low_nullifier_proof, ); println!("result {:?}", result); assert!(matches!( result, Err(IndexedMerkleTreeError::ConcurrentMerkleTree( ConcurrentMerkleTreeError::InvalidProof(_, _) )) )); } #[test] pub fn test_insert_invalid_low_element_poseidon() { insert_invalid_low_element::<Poseidon>() } #[test] pub fn hash_reference_indexed_element() { let element = IndexedElement::<usize> { value: 0.to_biguint().unwrap(), index: 0, next_index: 1, }; let next_value = BigUint::from_str_radix(HIGHEST_ADDRESS_PLUS_ONE, 10).unwrap(); let hash = element.hash::<Poseidon>(&next_value).unwrap(); assert_eq!( hash, [ 40, 8, 192, 134, 75, 198, 77, 187, 129, 249, 133, 121, 54, 189, 242, 28, 117, 71, 255, 32, 155, 52, 136, 196, 99, 146, 204, 174, 160, 238, 0, 110 ] ); } #[test] pub fn functional_non_inclusion_test() { let mut relayer_indexing_array = IndexedArray::<Poseidon, usize>::default(); // appends the first element let mut relayer_merkle_tree = reference::IndexedMerkleTree::<Poseidon, usize>::new( MERKLE_TREE_HEIGHT, MERKLE_TREE_CANOPY, ) .unwrap(); let nullifier1 = 30_u32.to_biguint().unwrap(); relayer_merkle_tree .append(&nullifier1, &mut relayer_indexing_array) .unwrap(); // indexed array: // element: 0 // value: 0 // next_value: 30 // index: 0 // element: 1 // value: 30 // next_value: 0 // index: 1 // merkle tree: // leaf index: 0 = H(0, 1, 30) //Hash(value, next_index, next_value) // leaf index: 1 = H(30, 0, 0) let indexed_array_element_0 = relayer_indexing_array.get(0).unwrap(); assert_eq!(indexed_array_element_0.value, 0_u32.to_biguint().unwrap()); assert_eq!(indexed_array_element_0.next_index, 1); assert_eq!(indexed_array_element_0.index, 0); let indexed_array_element_1 = relayer_indexing_array.get(1).unwrap(); assert_eq!(indexed_array_element_1.value, 30_u32.to_biguint().unwrap()); assert_eq!(indexed_array_element_1.next_index, 0); assert_eq!(indexed_array_element_1.index, 1); let leaf_0 = relayer_merkle_tree.merkle_tree.get_leaf(0); let leaf_1 = relayer_merkle_tree.merkle_tree.get_leaf(1); assert_eq!( leaf_0, Poseidon::hashv(&[ &0_u32.to_biguint().unwrap().to_bytes_be(), &1_u32.to_biguint().unwrap().to_bytes_be(), &30_u32.to_biguint().unwrap().to_bytes_be() ]) .unwrap() ); assert_eq!( leaf_1, Poseidon::hashv(&[ &30_u32.to_biguint().unwrap().to_bytes_be(), &0_u32.to_biguint().unwrap().to_bytes_be(), &0_u32.to_biguint().unwrap().to_bytes_be() ]) .unwrap() ); let non_inclusion_proof = relayer_merkle_tree .get_non_inclusion_proof(&10_u32.to_biguint().unwrap(), &relayer_indexing_array) .unwrap(); assert_eq!(non_inclusion_proof.root, relayer_merkle_tree.root()); assert_eq!( non_inclusion_proof.value, bigint_to_be_bytes_array::<32>(&10_u32.to_biguint().unwrap()).unwrap() ); assert_eq!(non_inclusion_proof.leaf_lower_range_value, [0; 32]); assert_eq!( non_inclusion_proof.leaf_higher_range_value, bigint_to_be_bytes_array::<32>(&30_u32.to_biguint().unwrap()).unwrap() ); assert_eq!(non_inclusion_proof.leaf_index, 0); relayer_merkle_tree .verify_non_inclusion_proof(&non_inclusion_proof) .unwrap(); } // /** // * // * Range Hash (value, next_index, next_value) -> need next value not next value index // * Update of a range: // * 1. Find the low element, low element points to the next hight element // * 2. update low element with H (low_value, new_inserted_value_index, new_inserted_value) // * 3. append the tree with H(new_inserted_value,index_of_next_value, next_value) // * // */ // /// This test is generating a situation where the low element has to be patched. // /// Scenario: // /// 1. two parties start with the initialized indexing array // /// 2. both parties compute their values with the empty indexed Merkle tree state // /// 3. party one inserts first // /// 4. party two needs to patch the low element because the low element has changed // /// 5. party two inserts // Commented because the test is not working // TODO: figure out address Merkle tree changelog // #[test] // pub fn functional_changelog_test() { // let address_1 = 30_u32.to_biguint().unwrap(); // let address_2 = 10_u32.to_biguint().unwrap(); // cargo test -- --nocapture print_test_data #[test] #[ignore = "only used to generate test data"] pub fn print_test_data() { let mut relayer_indexing_array = IndexedArray::<Poseidon, usize>::default(); relayer_indexing_array.init().unwrap(); let mut relayer_merkle_tree = reference::IndexedMerkleTree::<Poseidon, usize>::new(4, 0).unwrap(); relayer_merkle_tree.init().unwrap(); let root = relayer_merkle_tree.root(); let root_bn = BigUint::from_bytes_be(&root); println!("root {:?}", root_bn); println!("indexed mt inited root {:?}", relayer_merkle_tree.root()); let address1 = 30_u32.to_biguint().unwrap(); let test_address: BigUint = BigUint::from_bytes_be(&[ 171, 159, 63, 33, 62, 94, 156, 27, 61, 216, 203, 164, 91, 229, 110, 16, 230, 124, 129, 133, 222, 159, 99, 235, 50, 181, 94, 203, 105, 23, 82, ]); let non_inclusion_proof_0 = relayer_merkle_tree .get_non_inclusion_proof(&test_address, &relayer_indexing_array) .unwrap(); println!("non inclusion proof init {:?}", non_inclusion_proof_0); relayer_merkle_tree .append(&address1, &mut relayer_indexing_array) .unwrap(); println!( "indexed mt with one append {:?}", relayer_merkle_tree.root() ); let root_bn = BigUint::from_bytes_be(&relayer_merkle_tree.root()); println!("indexed mt with one append {:?}", root_bn); let proof = relayer_merkle_tree.get_proof_of_leaf(2, true).unwrap(); let leaf = relayer_merkle_tree.merkle_tree.get_leaf(2); let leaf_bn = BigUint::from_bytes_be(&leaf); println!("(30) leaf_hash[2] = {:?}", leaf_bn); let subtrees = relayer_merkle_tree.merkle_tree.get_subtrees(); for subtree in subtrees { let subtree_bn = BigUint::from_bytes_be(&subtree); println!("subtree = {:?}", subtree_bn); } let res = relayer_merkle_tree.merkle_tree.verify(&leaf, &proof, 2); println!("verify leaf 2 {:?}", res); println!( "indexed array state element 0 {:?}", relayer_indexing_array.get(0).unwrap() ); println!( "indexed array state element 1 {:?}", relayer_indexing_array.get(1).unwrap() ); println!( "indexed array state element 2 {:?}", relayer_indexing_array.get(2).unwrap() ); let address2 = 42_u32.to_biguint().unwrap(); let non_inclusion_proof = relayer_merkle_tree .get_non_inclusion_proof(&address2, &relayer_indexing_array) .unwrap(); println!("non inclusion proof address 2 {:?}", non_inclusion_proof); relayer_merkle_tree .append(&address2, &mut relayer_indexing_array) .unwrap(); println!( "indexed mt with two appends {:?}", relayer_merkle_tree.root() ); let root_bn = BigUint::from_bytes_be(&relayer_merkle_tree.root()); println!("indexed mt with two appends {:?}", root_bn); println!( "indexed array state element 0 {:?}", relayer_indexing_array.get(0).unwrap() ); println!( "indexed array state element 1 {:?}", relayer_indexing_array.get(1).unwrap() ); println!( "indexed array state element 2 {:?}", relayer_indexing_array.get(2).unwrap() ); println!( "indexed array state element 3 {:?}", relayer_indexing_array.get(3).unwrap() ); let address3 = 12_u32.to_biguint().unwrap(); let non_inclusion_proof = relayer_merkle_tree .get_non_inclusion_proof(&address3, &relayer_indexing_array) .unwrap(); relayer_merkle_tree .append(&address3, &mut relayer_indexing_array) .unwrap(); println!( "indexed mt with three appends {:?}", relayer_merkle_tree.root() ); let root_bn = BigUint::from_bytes_be(&relayer_merkle_tree.root()); println!("indexed mt with three appends {:?}", root_bn); println!("non inclusion proof address 3 {:?}", non_inclusion_proof); println!( "indexed array state element 0 {:?}", relayer_indexing_array.get(0).unwrap() ); println!( "indexed array state element 1 {:?}", relayer_indexing_array.get(1).unwrap() ); println!( "indexed array state element 2 {:?}", relayer_indexing_array.get(2).unwrap() ); println!( "indexed array state element 3 {:?}", relayer_indexing_array.get(3).unwrap() ); println!( "indexed array state element 4 {:?}", relayer_indexing_array.get(4).unwrap() ); // // indexed array: // // element: 0 // // value: 0 // // next_value: 30 // // index: 0 // // element: 1 // // value: 30 // // next_value: 0 // // index: 1 // // merkle tree: // // leaf index: 0 = H(0, 1, 30) //Hash(value, next_index, next_value) // // leaf index: 1 = H(30, 0, 0) // let indexed_array_element_0 = relayer_indexing_array.get(0).unwrap(); // assert_eq!(indexed_array_element_0.value, 0_u32.to_biguint().unwrap()); // assert_eq!(indexed_array_element_0.next_index, 1); // assert_eq!(indexed_array_element_0.index, 0); // let indexed_array_element_1 = relayer_indexing_array.get(1).unwrap(); // assert_eq!(indexed_array_element_1.value, 30_u32.to_biguint().unwrap()); // assert_eq!(indexed_array_element_1.next_index, 0); // assert_eq!(indexed_array_element_1.index, 1); // let leaf_0 = relayer_merkle_tree.merkle_tree.leaf(0); // let leaf_1 = relayer_merkle_tree.merkle_tree.leaf(1); // assert_eq!( // leaf_0, // Poseidon::hashv(&[ // &0_u32.to_biguint().unwrap().to_bytes_be(), // &1_u32.to_biguint().unwrap().to_bytes_be(), // &30_u32.to_biguint().unwrap().to_bytes_be() // ]) // .unwrap() // ); // assert_eq!( // leaf_1, // Poseidon::hashv(&[ // &30_u32.to_biguint().unwrap().to_bytes_be(), // &0_u32.to_biguint().unwrap().to_bytes_be(), // &0_u32.to_biguint().unwrap().to_bytes_be() // ]) // .unwrap() // ); // let non_inclusion_proof = relayer_merkle_tree // .get_non_inclusion_proof(&10_u32.to_biguint().unwrap(), &relayer_indexing_array) // .unwrap(); // assert_eq!(non_inclusion_proof.root, relayer_merkle_tree.root()); // assert_eq!( // non_inclusion_proof.value, // bigint_to_be_bytes_array::<32>(&10_u32.to_biguint().unwrap()).unwrap() // ); // assert_eq!(non_inclusion_proof.leaf_lower_range_value, [0; 32]); // assert_eq!( // non_inclusion_proof.leaf_higher_range_value, // bigint_to_be_bytes_array::<32>(&30_u32.to_biguint().unwrap()).unwrap() // ); // assert_eq!(non_inclusion_proof.leaf_index, 0); // relayer_merkle_tree // .verify_non_inclusion_proof(&non_inclusion_proof) // .unwrap(); } /// Performs conflicting Merkle tree updates where: /// /// 1. Party one inserts 30. /// 2. Party two inserts 10. /// /// In this case, party two needs to update: /// /// * The inserted element (10) to point to 30 as the next one. #[test] fn functional_changelog_test_1() { let address_1 = 30_u32.to_biguint().unwrap(); let address_2 = 10_u32.to_biguint().unwrap(); let address_3 = 11_u32.to_biguint().unwrap(); const HEIGHT: usize = 10; perform_change_log_test::<false, false, HEIGHT, 16, 16, 0, 16, HEIGHT>(&[ address_1, address_2, address_3, ]); } /// Performs conflicting Merkle tree updates where: /// /// 1. Party one inserts 10. /// 2. Party two inserts 30. /// /// In this case, party two needs to update: /// /// * The low element from 0 to 10. #[test] fn functional_changelog_test_2() { let address_1 = 10_u32.to_biguint().unwrap(); let address_2 = 30_u32.to_biguint().unwrap(); const HEIGHT: usize = 10; perform_change_log_test::<false, false, HEIGHT, 16, 16, 0, 16, HEIGHT>(&[address_1, address_2]); } /// Performs conflicting Merkle tree updates where: /// /// 1. Party one inserts 30. /// 2. Party two inserts 10. /// 3. Party three inserts 20. /// /// In this case: /// /// * Party one: /// * Updates the inserted element (10) to point to 30 as the next one. /// * Party two: /// * Updates the low element from 0 to 10. #[test] fn functional_changelog_test_3() { let address_1 = 30_u32.to_biguint().unwrap(); let address_2 = 10_u32.to_biguint().unwrap(); let address_3 = 20_u32.to_biguint().unwrap(); const HEIGHT: usize = 10; perform_change_log_test::<false, false, HEIGHT, 16, 16, 0, 16, HEIGHT>(&[ address_1, address_2, address_3, ]); } /// Performs conflicting Merkle tree updates where two parties try to insert /// the same element. #[test] fn functional_changelog_test_double_spend() { let address = 10_u32.to_biguint().unwrap(); const HEIGHT: usize = 10; perform_change_log_test::<true, false, HEIGHT, 16, 16, 0, 16, HEIGHT>(&[ address.clone(), address.clone(), ]); } #[test] fn functional_changelog_test_random_8_512_512_0_512() { const HEIGHT: usize = 8; const CHANGELOG: usize = 512; const ROOTS: usize = 512; const CANOPY: usize = 0; const INDEXED_CHANGELOG: usize = 512; const N_OPERATIONS: usize = (1 << HEIGHT) / 2; const NET_HEIGHT: usize = HEIGHT - CANOPY; functional_changelog_test_random::< false, HEIGHT, CHANGELOG, ROOTS, CANOPY, INDEXED_CHANGELOG, N_OPERATIONS, NET_HEIGHT, >() } /// Performs concurrent updates, where the indexed changelog eventually wraps /// around. Updates with an old proof and old changelog index are expected to /// fail. #[test] fn functional_changelog_test_random_wrap_around_8_128_512_0_512() { const HEIGHT: usize = 8; const CHANGELOG: usize = 512; const ROOTS: usize = 512; const CANOPY: usize = 0; const INDEXED_CHANGELOG: usize = 128; const N_OPERATIONS: usize = (1 << HEIGHT) / 2; const NET_HEIGHT: usize = HEIGHT - CANOPY; for _ in 0..100 { functional_changelog_test_random::< true, HEIGHT, CHANGELOG, ROOTS, CANOPY, INDEXED_CHANGELOG, N_OPERATIONS, NET_HEIGHT, >() } } /// Performs `N_OPERATIONS` concurrent updates with random elements. All of them without /// updating the changelog indices. All of them should result in using indexed changelog /// for patching the proof. fn functional_changelog_test_random< const WRAP_AROUND: bool, const HEIGHT: usize, const CHANGELOG: usize, const ROOTS: usize, const CANOPY: usize, const INDEXED_CHANGELOG: usize, const N_OPERATIONS: usize, const NET_HEIGHT: usize, >() { let mut rng = thread_rng(); let leaves: Vec<BigUint> = (0..N_OPERATIONS).map(|_| rng.gen_biguint(248)).collect(); perform_change_log_test::< false, WRAP_AROUND, HEIGHT, CHANGELOG, ROOTS, CANOPY, INDEXED_CHANGELOG, NET_HEIGHT, >(&leaves); } /// Performs conflicting Merkle tree updates where multiple actors try to add /// add new ranges when using the same (for the most of actors - outdated) /// Merkle proofs and changelog indices. /// /// Scenario: /// /// 1. Two paries start with the same indexed array state. /// 2. Both parties compute their values with the same indexed Merkle tree /// state. /// 3. Party one inserts first. /// 4. Party two needs to patch the low element, because the low element has /// changed. /// 5. Party two inserts. /// 6. Party N needs to patch the low element, because the low element has /// changed. /// 7. Party N inserts. /// /// `DOUBLE_SPEND` indicates whether the provided addresses are an attempt to /// double-spend by the subsequent parties. When set to `true`, we expect /// subsequent updates to fail. fn perform_change_log_test< const DOUBLE_SPEND: bool, const WRAP_AROUND: bool, const HEIGHT: usize, const CHANGELOG: usize, const ROOTS: usize, const CANOPY: usize, const INDEXED_CHANGELOG: usize, const NET_HEIGHT: usize, >( addresses: &[BigUint], ) { // Initialize the trees and indexed array. let mut relayer_indexed_array = IndexedArray::<Poseidon, usize>::default(); relayer_indexed_array.init().unwrap(); let mut relayer_merkle_tree = reference::IndexedMerkleTree::<Poseidon, usize>::new(HEIGHT, CANOPY).unwrap(); let mut onchain_indexed_merkle_tree = IndexedMerkleTree::<Poseidon, usize, HEIGHT, NET_HEIGHT>::new( HEIGHT, CHANGELOG, ROOTS, CANOPY, INDEXED_CHANGELOG, ) .unwrap(); onchain_indexed_merkle_tree.init().unwrap(); onchain_indexed_merkle_tree.add_highest_element().unwrap(); relayer_merkle_tree.init().unwrap(); assert_eq!( relayer_merkle_tree.root(), onchain_indexed_merkle_tree.root(), "environment setup failed relayer and onchain indexed Merkle tree roots are inconsistent" ); // Perform updates for each actor, where every of them is using the same // changelog indices, generating a conflict which needs to be solved by // patching from changelog. let mut indexed_arrays = vec![relayer_indexed_array.clone(); addresses.len()]; let changelog_index = onchain_indexed_merkle_tree.changelog_index(); let indexed_changelog_index = onchain_indexed_merkle_tree.indexed_changelog_index(); for (i, (address, indexed_array)) in addresses.iter().zip(indexed_arrays.iter_mut()).enumerate() { let (old_low_address, old_low_address_next_value) = indexed_array .find_low_element_for_nonexistent(&address) .unwrap(); let address_bundle = indexed_array .new_element_with_low_element_index(old_low_address.index, address) .unwrap(); let mut low_element_proof = relayer_merkle_tree .get_proof_of_leaf(old_low_address.index, false) .unwrap(); if DOUBLE_SPEND && i > 0 { let res = onchain_indexed_merkle_tree.update( changelog_index, indexed_changelog_index, address_bundle.new_element.value, old_low_address, old_low_address_next_value, &mut low_element_proof, ); assert!(matches!( res, Err(IndexedMerkleTreeError::NewElementGreaterOrEqualToNextElement) )); } else if WRAP_AROUND && (i + 1) * 2 > INDEXED_CHANGELOG { // After a wrap-around of the indexed changelog, we expect leaf // updates to break immediately. let res = onchain_indexed_merkle_tree.update( changelog_index, indexed_changelog_index, address_bundle.new_element.value.clone(), old_low_address.clone(), old_low_address_next_value, &mut low_element_proof, ); println!("changelog_index {:?}", changelog_index); println!("indexed_changelog_index {:?}", indexed_changelog_index); println!( "address_bundle new_element_next_value{:?}", address_bundle.new_element_next_value ); println!( "address_bundle new_element {:?}", address_bundle.new_element ); println!("old_low_address {:?}", old_low_address); println!("res {:?}", res); assert!(matches!( res, Err(IndexedMerkleTreeError::ConcurrentMerkleTree( ConcurrentMerkleTreeError::CannotUpdateLeaf )) )); } else { onchain_indexed_merkle_tree .update( changelog_index, indexed_changelog_index, address_bundle.new_element.value, old_low_address, old_low_address_next_value, &mut low_element_proof, ) .unwrap(); for i in onchain_indexed_merkle_tree.changelog.iter() { println!("indexed array state element {:?} ", i); } } } }
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/indexed
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/indexed/src/copy.rs
use std::{fmt, marker::PhantomData, ops::Deref}; use crate::{errors::IndexedMerkleTreeError, IndexedMerkleTree}; use light_bounded_vec::CyclicBoundedVecMetadata; use light_concurrent_merkle_tree::{ copy::ConcurrentMerkleTreeCopy, errors::ConcurrentMerkleTreeError, }; use light_hasher::Hasher; use light_utils::offset::copy::{read_cyclic_bounded_vec_at, read_value_at}; use num_traits::{CheckedAdd, CheckedSub, ToBytes, Unsigned}; #[derive(Debug)] pub struct IndexedMerkleTreeCopy<H, I, const HEIGHT: usize, const NET_HEIGHT: usize>( IndexedMerkleTree<H, I, HEIGHT, NET_HEIGHT>, ) where H: Hasher, I: CheckedAdd + CheckedSub + Copy + Clone + fmt::Debug + PartialOrd + ToBytes + TryFrom<usize> + Unsigned, usize: From<I>; impl<H, I, const HEIGHT: usize, const NET_HEIGHT: usize> IndexedMerkleTreeCopy<H, I, HEIGHT, NET_HEIGHT> where H: Hasher, I: CheckedAdd + CheckedSub + Copy + Clone + fmt::Debug + PartialOrd + ToBytes + TryFrom<usize> + Unsigned, usize: From<I>, { /// Casts a byte slice into wrapped `IndexedMerkleTree` structure reference, /// including dynamic fields. /// /// # Purpose /// /// This method is meant to be used mostly in Solana programs, where memory /// constraints are tight and we want to make sure no data is copied. pub fn from_bytes_copy(bytes: &[u8]) -> Result<Self, IndexedMerkleTreeError> { let (merkle_tree, mut offset) = ConcurrentMerkleTreeCopy::<H, HEIGHT>::struct_from_bytes_copy(bytes)?; let indexed_changelog_metadata: CyclicBoundedVecMetadata = unsafe { read_value_at(bytes, &mut offset) }; let expected_size = IndexedMerkleTree::<H, I, HEIGHT, NET_HEIGHT>::size_in_account( merkle_tree.height, merkle_tree.changelog.capacity(), merkle_tree.roots.capacity(), merkle_tree.canopy_depth, indexed_changelog_metadata.capacity(), ); if bytes.len() < expected_size { return Err(IndexedMerkleTreeError::ConcurrentMerkleTree( ConcurrentMerkleTreeError::BufferSize(expected_size, bytes.len()), )); } let indexed_changelog = unsafe { read_cyclic_bounded_vec_at(bytes, &mut offset, &indexed_changelog_metadata) }; Ok(Self(IndexedMerkleTree { merkle_tree, indexed_changelog, _index: PhantomData, })) } } impl<H, I, const HEIGHT: usize, const NET_HEIGHT: usize> Deref for IndexedMerkleTreeCopy<H, I, HEIGHT, NET_HEIGHT> where H: Hasher, I: CheckedAdd + CheckedSub + Copy + Clone + fmt::Debug + PartialOrd + ToBytes + TryFrom<usize> + Unsigned, usize: From<I>, { type Target = IndexedMerkleTree<H, I, HEIGHT, NET_HEIGHT>; fn deref(&self) -> &Self::Target { &self.0 } } #[cfg(test)] mod test { use light_hasher::Poseidon; use light_utils::bigint::bigint_to_be_bytes_array; use num_bigint::RandBigInt; use rand::thread_rng; use crate::zero_copy::IndexedMerkleTreeZeroCopyMut; use super::*; fn from_bytes_copy< const HEIGHT: usize, const CHANGELOG_SIZE: usize, const ROOTS: usize, const CANOPY_DEPTH: usize, const INDEXED_CHANGELOG_SIZE: usize, const OPERATIONS: usize, const NET_HEIGHT: usize, >() { let mut mt_1 = IndexedMerkleTree::<Poseidon, usize, HEIGHT, NET_HEIGHT>::new( HEIGHT, CHANGELOG_SIZE, ROOTS, CANOPY_DEPTH, INDEXED_CHANGELOG_SIZE, ) .unwrap(); mt_1.init().unwrap(); let mut bytes = vec![ 0u8; IndexedMerkleTree::<Poseidon, usize, HEIGHT, NET_HEIGHT>::size_in_account( HEIGHT, CHANGELOG_SIZE, ROOTS, CANOPY_DEPTH, INDEXED_CHANGELOG_SIZE ) ]; { let mut mt_2 = IndexedMerkleTreeZeroCopyMut::<Poseidon, usize, HEIGHT, NET_HEIGHT>::from_bytes_zero_copy_init( &mut bytes, HEIGHT, CANOPY_DEPTH, CHANGELOG_SIZE, ROOTS, INDEXED_CHANGELOG_SIZE, ) .unwrap(); mt_2.init().unwrap(); assert_eq!(mt_1, *mt_2); } let mut rng = thread_rng(); for _ in 0..OPERATIONS { // Reload the tree from bytes on each iteration. let mut mt_2 = IndexedMerkleTreeZeroCopyMut::<Poseidon, usize, HEIGHT,NET_HEIGHT>::from_bytes_zero_copy_mut( &mut bytes, ) .unwrap(); let leaf: [u8; 32] = bigint_to_be_bytes_array::<32>(&rng.gen_biguint(248)).unwrap(); mt_1.append(&leaf).unwrap(); mt_2.append(&leaf).unwrap(); assert_eq!(mt_1, *mt_2); } // Read a copy of that Merkle tree. let mt_2 = IndexedMerkleTreeCopy::<Poseidon, usize, HEIGHT, NET_HEIGHT>::from_bytes_copy(&bytes) .unwrap(); assert_eq!(mt_1, *mt_2); } #[test] fn test_from_bytes_copy_26_1400_2400_10_256_1024() { const HEIGHT: usize = 26; const CHANGELOG_SIZE: usize = 1400; const ROOTS: usize = 2400; const CANOPY_DEPTH: usize = 10; const INDEXED_CHANGELOG_SIZE: usize = 256; const NET_HEIGHT: usize = 16; const OPERATIONS: usize = 1024; from_bytes_copy::< HEIGHT, CHANGELOG_SIZE, ROOTS, CANOPY_DEPTH, INDEXED_CHANGELOG_SIZE, OPERATIONS, NET_HEIGHT, >() } }
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/indexed
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/indexed/src/zero_copy.rs
use std::{ fmt, marker::PhantomData, mem, ops::{Deref, DerefMut}, }; use light_bounded_vec::{CyclicBoundedVec, CyclicBoundedVecMetadata}; use light_concurrent_merkle_tree::{ errors::ConcurrentMerkleTreeError, zero_copy::{ConcurrentMerkleTreeZeroCopy, ConcurrentMerkleTreeZeroCopyMut}, ConcurrentMerkleTree, }; use light_hasher::Hasher; use light_utils::offset::zero_copy::{read_array_like_ptr_at, read_ptr_at, write_at}; use num_traits::{CheckedAdd, CheckedSub, ToBytes, Unsigned}; use crate::{errors::IndexedMerkleTreeError, IndexedMerkleTree}; #[derive(Debug)] pub struct IndexedMerkleTreeZeroCopy<'a, H, I, const HEIGHT: usize, const NET_HEIGHT: usize> where H: Hasher, I: CheckedAdd + CheckedSub + Copy + Clone + fmt::Debug + PartialOrd + ToBytes + TryFrom<usize> + Unsigned, usize: From<I>, { pub merkle_tree: mem::ManuallyDrop<IndexedMerkleTree<H, I, HEIGHT, NET_HEIGHT>>, // The purpose of this field is ensuring that the wrapper does not outlive // the buffer. _bytes: &'a [u8], } impl<'a, H, I, const HEIGHT: usize, const NET_HEIGHT: usize> IndexedMerkleTreeZeroCopy<'a, H, I, HEIGHT, NET_HEIGHT> where H: Hasher, I: CheckedAdd + CheckedSub + Copy + Clone + fmt::Debug + PartialOrd + ToBytes + TryFrom<usize> + Unsigned, usize: From<I>, { /// Returns a zero-copy wrapper of `IndexedMerkleTree` created from the /// data in the provided `bytes` buffer. pub fn from_bytes_zero_copy(bytes: &'a [u8]) -> Result<Self, IndexedMerkleTreeError> { let (merkle_tree, mut offset) = ConcurrentMerkleTreeZeroCopy::struct_from_bytes_zero_copy(bytes)?; let indexed_changelog_metadata: *mut CyclicBoundedVecMetadata = unsafe { read_ptr_at(bytes, &mut offset) }; let expected_size = IndexedMerkleTree::<H, I, HEIGHT, NET_HEIGHT>::size_in_account( merkle_tree.height, merkle_tree.changelog.capacity(), merkle_tree.roots.capacity(), merkle_tree.canopy_depth, unsafe { (*indexed_changelog_metadata).capacity() }, ); if bytes.len() < expected_size { return Err(IndexedMerkleTreeError::ConcurrentMerkleTree( ConcurrentMerkleTreeError::BufferSize(expected_size, bytes.len()), )); } let indexed_changelog = unsafe { CyclicBoundedVec::from_raw_parts( indexed_changelog_metadata, read_array_like_ptr_at( bytes, &mut offset, (*indexed_changelog_metadata).capacity(), ), ) }; Ok(Self { merkle_tree: mem::ManuallyDrop::new(IndexedMerkleTree { merkle_tree, indexed_changelog, _index: PhantomData, }), _bytes: bytes, }) } } impl<'a, H, I, const HEIGHT: usize, const NET_HEIGHT: usize> Deref for IndexedMerkleTreeZeroCopy<'a, H, I, HEIGHT, NET_HEIGHT> where H: Hasher, I: CheckedAdd + CheckedSub + Copy + Clone + fmt::Debug + PartialOrd + ToBytes + TryFrom<usize> + Unsigned, usize: From<I>, { type Target = IndexedMerkleTree<H, I, HEIGHT, NET_HEIGHT>; fn deref(&self) -> &Self::Target { &self.merkle_tree } } #[derive(Debug)] pub struct IndexedMerkleTreeZeroCopyMut<'a, H, I, const HEIGHT: usize, const NET_HEIGHT: usize>( IndexedMerkleTreeZeroCopy<'a, H, I, HEIGHT, NET_HEIGHT>, ) where H: Hasher, I: CheckedAdd + CheckedSub + Copy + Clone + fmt::Debug + PartialOrd + ToBytes + TryFrom<usize> + Unsigned, usize: From<I>; impl<'a, H, I, const HEIGHT: usize, const NET_HEIGHT: usize> IndexedMerkleTreeZeroCopyMut<'a, H, I, HEIGHT, NET_HEIGHT> where H: Hasher, I: CheckedAdd + CheckedSub + Copy + Clone + fmt::Debug + PartialOrd + ToBytes + TryFrom<usize> + Unsigned, usize: From<I>, { pub fn from_bytes_zero_copy_mut(bytes: &'a mut [u8]) -> Result<Self, IndexedMerkleTreeError> { Ok(Self(IndexedMerkleTreeZeroCopy::from_bytes_zero_copy( bytes, )?)) } pub fn from_bytes_zero_copy_init( bytes: &'a mut [u8], height: usize, canopy_depth: usize, changelog_capacity: usize, roots_capacity: usize, indexed_changelog_capacity: usize, ) -> Result<Self, IndexedMerkleTreeError> { let _ = ConcurrentMerkleTreeZeroCopyMut::<H, HEIGHT>::fill_non_dyn_fields_in_buffer( bytes, height, canopy_depth, changelog_capacity, roots_capacity, )?; let expected_size = IndexedMerkleTree::<H, I, HEIGHT, NET_HEIGHT>::size_in_account( height, changelog_capacity, roots_capacity, canopy_depth, indexed_changelog_capacity, ); if bytes.len() < expected_size { return Err(IndexedMerkleTreeError::ConcurrentMerkleTree( ConcurrentMerkleTreeError::BufferSize(expected_size, bytes.len()), )); } let mut offset = ConcurrentMerkleTree::<H, HEIGHT>::size_in_account( height, changelog_capacity, roots_capacity, canopy_depth, ); let indexed_changelog_metadata = CyclicBoundedVecMetadata::new(indexed_changelog_capacity); write_at::<CyclicBoundedVecMetadata>( bytes, &indexed_changelog_metadata.to_le_bytes(), &mut offset, ); Self::from_bytes_zero_copy_mut(bytes) } } impl<'a, H, I, const HEIGHT: usize, const NET_HEIGHT: usize> Deref for IndexedMerkleTreeZeroCopyMut<'a, H, I, HEIGHT, NET_HEIGHT> where H: Hasher, I: CheckedAdd + CheckedSub + Copy + Clone + fmt::Debug + PartialOrd + ToBytes + TryFrom<usize> + Unsigned, usize: From<I>, { type Target = IndexedMerkleTree<H, I, HEIGHT, NET_HEIGHT>; fn deref(&self) -> &Self::Target { &self.0.merkle_tree } } impl<'a, H, I, const HEIGHT: usize, const NET_HEIGHT: usize> DerefMut for IndexedMerkleTreeZeroCopyMut<'a, H, I, HEIGHT, NET_HEIGHT> where H: Hasher, I: CheckedAdd + CheckedSub + Copy + Clone + fmt::Debug + PartialOrd + ToBytes + TryFrom<usize> + Unsigned, usize: From<I>, { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0.merkle_tree } } #[cfg(test)] mod test { use light_hasher::Poseidon; use light_utils::bigint::bigint_to_be_bytes_array; use num_bigint::RandBigInt; use rand::thread_rng; use super::*; fn from_bytes_zero_copy< const HEIGHT: usize, const NET_HEIGHT: usize, const CHANGELOG_SIZE: usize, const ROOTS: usize, const CANOPY_DEPTH: usize, const INDEXED_CHANGELOG_SIZE: usize, const OPERATIONS: usize, >() { let mut mt_1 = IndexedMerkleTree::<Poseidon, usize, HEIGHT, NET_HEIGHT>::new( HEIGHT, CHANGELOG_SIZE, ROOTS, CANOPY_DEPTH, INDEXED_CHANGELOG_SIZE, ) .unwrap(); mt_1.init().unwrap(); let mut bytes = vec![ 0u8; IndexedMerkleTree::<Poseidon, usize, HEIGHT, NET_HEIGHT>::size_in_account( HEIGHT, CHANGELOG_SIZE, ROOTS, CANOPY_DEPTH, INDEXED_CHANGELOG_SIZE ) ]; { let mut mt_2 = IndexedMerkleTreeZeroCopyMut::<Poseidon, usize, HEIGHT, NET_HEIGHT>::from_bytes_zero_copy_init( &mut bytes, HEIGHT, CANOPY_DEPTH, CHANGELOG_SIZE, ROOTS, INDEXED_CHANGELOG_SIZE, ) .unwrap(); mt_2.init().unwrap(); assert_eq!(mt_1, *mt_2); } let mut rng = thread_rng(); for _ in 0..OPERATIONS { // Reload the tree from bytes on each iteration. let mut mt_2 = IndexedMerkleTreeZeroCopyMut::<Poseidon, usize, HEIGHT,NET_HEIGHT>::from_bytes_zero_copy_mut( &mut bytes, ) .unwrap(); let leaf: [u8; 32] = bigint_to_be_bytes_array::<32>(&rng.gen_biguint(248)).unwrap(); mt_1.append(&leaf).unwrap(); mt_2.append(&leaf).unwrap(); assert_eq!(mt_1, *mt_2); } } #[test] fn test_from_bytes_zero_copy_26_1400_2400_10_256_1024() { const HEIGHT: usize = 26; const NET_HEIGHT: usize = 16; const CHANGELOG_SIZE: usize = 1400; const ROOTS: usize = 2400; const CANOPY_DEPTH: usize = 10; const INDEXED_CHANGELOG_SIZE: usize = 256; const OPERATIONS: usize = 1024; from_bytes_zero_copy::< HEIGHT, NET_HEIGHT, CHANGELOG_SIZE, ROOTS, CANOPY_DEPTH, INDEXED_CHANGELOG_SIZE, OPERATIONS, >() } }
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/indexed
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/indexed/src/lib.rs
use std::{ fmt, marker::PhantomData, mem, ops::{Deref, DerefMut}, }; use array::{IndexedArray, IndexedElement}; use changelog::IndexedChangelogEntry; use light_bounded_vec::{BoundedVec, CyclicBoundedVec, CyclicBoundedVecMetadata}; use light_concurrent_merkle_tree::{ errors::ConcurrentMerkleTreeError, event::{IndexedMerkleTreeUpdate, RawIndexedElement}, light_hasher::Hasher, ConcurrentMerkleTree, }; use light_utils::bigint::bigint_to_be_bytes_array; use num_bigint::BigUint; use num_traits::{CheckedAdd, CheckedSub, ToBytes, Unsigned}; pub mod array; pub mod changelog; pub mod copy; pub mod errors; pub mod reference; pub mod zero_copy; use crate::errors::IndexedMerkleTreeError; pub const HIGHEST_ADDRESS_PLUS_ONE: &str = "452312848583266388373324160190187140051835877600158453279131187530910662655"; #[derive(Debug)] #[repr(C)] pub struct IndexedMerkleTree<H, I, const HEIGHT: usize, const NET_HEIGHT: usize> where H: Hasher, I: CheckedAdd + CheckedSub + Copy + Clone + fmt::Debug + PartialOrd + ToBytes + TryFrom<usize> + Unsigned, usize: From<I>, { pub merkle_tree: ConcurrentMerkleTree<H, HEIGHT>, pub indexed_changelog: CyclicBoundedVec<IndexedChangelogEntry<I, NET_HEIGHT>>, _index: PhantomData<I>, } pub type IndexedMerkleTree26<H, I> = IndexedMerkleTree<H, I, 26, 16>; impl<H, I, const HEIGHT: usize, const NET_HEIGHT: usize> IndexedMerkleTree<H, I, HEIGHT, NET_HEIGHT> where H: Hasher, I: CheckedAdd + CheckedSub + Copy + Clone + fmt::Debug + PartialOrd + ToBytes + TryFrom<usize> + Unsigned, usize: From<I>, { /// Size of the struct **without** dynamically sized fields (`BoundedVec`, /// `CyclicBoundedVec`). pub fn non_dyn_fields_size() -> usize { ConcurrentMerkleTree::<H, HEIGHT>::non_dyn_fields_size() // indexed_changelog (metadata) + mem::size_of::<CyclicBoundedVecMetadata>() } // TODO(vadorovsky): Make a macro for that. pub fn size_in_account( height: usize, changelog_size: usize, roots_size: usize, canopy_depth: usize, indexed_changelog_size: usize, ) -> usize { ConcurrentMerkleTree::<H, HEIGHT>::size_in_account( height, changelog_size, roots_size, canopy_depth, ) // indexed_changelog (metadata) + mem::size_of::<CyclicBoundedVecMetadata>() // indexed_changelog + mem::size_of::<IndexedChangelogEntry<I, NET_HEIGHT>>() * indexed_changelog_size } pub fn new( height: usize, changelog_size: usize, roots_size: usize, canopy_depth: usize, indexed_changelog_size: usize, ) -> Result<Self, ConcurrentMerkleTreeError> { let merkle_tree = ConcurrentMerkleTree::<H, HEIGHT>::new( height, changelog_size, roots_size, canopy_depth, )?; Ok(Self { merkle_tree, indexed_changelog: CyclicBoundedVec::with_capacity(indexed_changelog_size), _index: PhantomData, }) } pub fn init(&mut self) -> Result<(), IndexedMerkleTreeError> { self.merkle_tree.init()?; // Append the first low leaf, which has value 0 and does not point // to any other leaf yet. // This low leaf is going to be updated during the first `update` // operation. self.merkle_tree.append(&H::zero_indexed_leaf())?; // Emit first changelog entries. let element = RawIndexedElement { value: [0_u8; 32], next_index: I::zero(), next_value: [0_u8; 32], index: I::zero(), }; let changelog_entry = IndexedChangelogEntry { element, proof: H::zero_bytes()[..NET_HEIGHT].try_into().unwrap(), changelog_index: 0, }; self.indexed_changelog.push(changelog_entry.clone()); self.indexed_changelog.push(changelog_entry); Ok(()) } /// Add the hightest element with a maximum value allowed by the prime /// field. /// /// Initializing an indexed Merkle tree not only with the lowest element /// (mandatory for the IMT algorithm to work), but also the highest element, /// makes non-inclusion proofs easier - there is no special case needed for /// the first insertion. /// /// However, it comes with a tradeoff - the space available in the tree /// becomes lower by 1. pub fn add_highest_element(&mut self) -> Result<(), IndexedMerkleTreeError> { let mut indexed_array = IndexedArray::<H, I>::default(); let element_bundle = indexed_array.init()?; let new_low_leaf = element_bundle .new_low_element .hash::<H>(&element_bundle.new_element.value)?; let mut proof = BoundedVec::with_capacity(self.merkle_tree.height); for i in 0..self.merkle_tree.height - self.merkle_tree.canopy_depth { // PANICS: Calling `unwrap()` pushing into this bounded vec // cannot panic since it has enough capacity. proof.push(H::zero_bytes()[i]).unwrap(); } let (changelog_index, _) = self.merkle_tree.update( self.changelog_index(), &H::zero_indexed_leaf(), &new_low_leaf, 0, &mut proof, )?; // Emit changelog for low element. let low_element = RawIndexedElement { value: bigint_to_be_bytes_array::<32>(&element_bundle.new_low_element.value)?, next_index: element_bundle.new_low_element.next_index, next_value: bigint_to_be_bytes_array::<32>(&element_bundle.new_element.value)?, index: element_bundle.new_low_element.index, }; let low_element_changelog_entry = IndexedChangelogEntry { element: low_element, proof: H::zero_bytes()[..NET_HEIGHT].try_into().unwrap(), changelog_index, }; self.indexed_changelog.push(low_element_changelog_entry); let new_leaf = element_bundle .new_element .hash::<H>(&element_bundle.new_element_next_value)?; let mut proof = BoundedVec::with_capacity(self.height); let (changelog_index, _) = self.merkle_tree.append_with_proof(&new_leaf, &mut proof)?; // Emit changelog for new element. let new_element = RawIndexedElement { value: bigint_to_be_bytes_array::<32>(&element_bundle.new_element.value)?, next_index: element_bundle.new_element.next_index, next_value: [0_u8; 32], index: element_bundle.new_element.index, }; let new_element_changelog_entry = IndexedChangelogEntry { element: new_element, proof: proof.as_slice()[..NET_HEIGHT].try_into().unwrap(), changelog_index, }; self.indexed_changelog.push(new_element_changelog_entry); Ok(()) } pub fn indexed_changelog_index(&self) -> usize { self.indexed_changelog.last_index() } /// Checks whether the given Merkle `proof` for the given `node` (with index /// `i`) is valid. The proof is valid when computing parent node hashes using /// the whole path of the proof gives the same result as the given `root`. pub fn validate_proof( &self, leaf: &[u8; 32], leaf_index: usize, proof: &BoundedVec<[u8; 32]>, ) -> Result<(), IndexedMerkleTreeError> { self.merkle_tree.validate_proof(leaf, leaf_index, proof)?; Ok(()) } /// Iterates over indexed changelog and every time an entry corresponding /// to the provided `low_element` is found, it patches: /// /// * Changelog index - indexed changelog entries contain corresponding /// changelog indices. /// * New element - changes might impact the `next_index` field, which in /// such case is updated. /// * Low element - it might completely change if a change introduced an /// element in our range. /// * Merkle proof. #[allow(clippy::type_complexity)] pub fn patch_elements_and_proof( &mut self, indexed_changelog_index: usize, changelog_index: &mut usize, new_element: &mut IndexedElement<I>, low_element: &mut IndexedElement<I>, low_element_next_value: &mut BigUint, low_leaf_proof: &mut BoundedVec<[u8; 32]>, ) -> Result<(), IndexedMerkleTreeError> { let next_indexed_changelog_indices: Vec<usize> = self .indexed_changelog .iter_from(indexed_changelog_index)? .skip(1) .enumerate() .filter_map(|(index, changelog_entry)| { if changelog_entry.element.index == low_element.index { Some((indexed_changelog_index + 1 + index) % self.indexed_changelog.len()) } else { None } }) .collect(); let mut new_low_element = None; for next_indexed_changelog_index in next_indexed_changelog_indices { let changelog_entry = &mut self.indexed_changelog[next_indexed_changelog_index]; let next_element_value = BigUint::from_bytes_be(&changelog_entry.element.next_value); if next_element_value < new_element.value { // If the next element is lower than the current element, it means // that it should become the low element. // // Save it and break the loop. new_low_element = Some(( (next_indexed_changelog_index + 1) % self.indexed_changelog.len(), next_element_value, )); break; } // Patch the changelog index. *changelog_index = changelog_entry.changelog_index; // Patch the `next_index` of `new_element`. new_element.next_index = changelog_entry.element.next_index; // Patch the element. low_element.update_from_raw_element(&changelog_entry.element); // Patch the next value. *low_element_next_value = BigUint::from_bytes_be(&changelog_entry.element.next_value); // Patch the proof. for i in 0..low_leaf_proof.len() { low_leaf_proof[i] = changelog_entry.proof[i]; } } // If we found a new low element. if let Some((new_low_element_changelog_index, new_low_element)) = new_low_element { let new_low_element_changelog_entry = &self.indexed_changelog[new_low_element_changelog_index]; *changelog_index = new_low_element_changelog_entry.changelog_index; *low_element = IndexedElement { index: new_low_element_changelog_entry.element.index, value: new_low_element.clone(), next_index: new_low_element_changelog_entry.element.next_index, }; for i in 0..low_leaf_proof.len() { low_leaf_proof[i] = new_low_element_changelog_entry.proof[i]; } new_element.next_index = low_element.next_index; // Start the patching process from scratch for the new low element. return self.patch_elements_and_proof( new_low_element_changelog_index, changelog_index, new_element, low_element, low_element_next_value, low_leaf_proof, ); } Ok(()) } pub fn update( &mut self, mut changelog_index: usize, indexed_changelog_index: usize, new_element_value: BigUint, mut low_element: IndexedElement<I>, mut low_element_next_value: BigUint, low_leaf_proof: &mut BoundedVec<[u8; 32]>, ) -> Result<IndexedMerkleTreeUpdate<I>, IndexedMerkleTreeError> { let mut new_element = IndexedElement { index: I::try_from(self.merkle_tree.next_index()) .map_err(|_| IndexedMerkleTreeError::IntegerOverflow)?, value: new_element_value, next_index: low_element.next_index, }; self.patch_elements_and_proof( indexed_changelog_index, &mut changelog_index, &mut new_element, &mut low_element, &mut low_element_next_value, low_leaf_proof, )?; // Check that the value of `new_element` belongs to the range // of `old_low_element`. if low_element.next_index == I::zero() { // In this case, the `old_low_element` is the greatest element. // The value of `new_element` needs to be greater than the value of // `old_low_element` (and therefore, be the greatest). if new_element.value <= low_element.value { return Err(IndexedMerkleTreeError::LowElementGreaterOrEqualToNewElement); } } else { // The value of `new_element` needs to be greater than the value of // `old_low_element` (and therefore, be the greatest). if new_element.value <= low_element.value { return Err(IndexedMerkleTreeError::LowElementGreaterOrEqualToNewElement); } // The value of `new_element` needs to be lower than the value of // next element pointed by `old_low_element`. if new_element.value >= low_element_next_value { return Err(IndexedMerkleTreeError::NewElementGreaterOrEqualToNextElement); } } // Instantiate `new_low_element` - the low element with updated values. let new_low_element = IndexedElement { index: low_element.index, value: low_element.value.clone(), next_index: new_element.index, }; // Update low element. If the `old_low_element` does not belong to the // tree, validating the proof is going to fail. let old_low_leaf = low_element.hash::<H>(&low_element_next_value)?; let new_low_leaf = new_low_element.hash::<H>(&new_element.value)?; let (new_changelog_index, _) = self.merkle_tree.update( changelog_index, &old_low_leaf, &new_low_leaf, low_element.index.into(), low_leaf_proof, )?; // Emit changelog entry for low element. let new_low_element = RawIndexedElement { value: bigint_to_be_bytes_array::<32>(&new_low_element.value).unwrap(), next_index: new_low_element.next_index, next_value: bigint_to_be_bytes_array::<32>(&new_element.value)?, index: new_low_element.index, }; let low_element_changelog_entry = IndexedChangelogEntry { element: new_low_element, proof: low_leaf_proof.as_slice()[..NET_HEIGHT].try_into().unwrap(), changelog_index: new_changelog_index, }; self.indexed_changelog.push(low_element_changelog_entry); // New element is always the newest one in the tree. Since we // support concurrent updates, the index provided by the caller // might be outdated. Let's just use the latest index indicated // by the tree. new_element.index = I::try_from(self.next_index()).map_err(|_| IndexedMerkleTreeError::IntegerOverflow)?; // Append new element. let mut proof = BoundedVec::with_capacity(self.height); let new_leaf = new_element.hash::<H>(&low_element_next_value)?; let (new_changelog_index, _) = self.merkle_tree.append_with_proof(&new_leaf, &mut proof)?; // Prepare raw new element to save in changelog. let raw_new_element = RawIndexedElement { value: bigint_to_be_bytes_array::<32>(&new_element.value).unwrap(), next_index: new_element.next_index, next_value: bigint_to_be_bytes_array::<32>(&low_element_next_value)?, index: new_element.index, }; // Emit changelog entry for new element. let new_element_changelog_entry = IndexedChangelogEntry { element: raw_new_element, proof: proof.as_slice()[..NET_HEIGHT].try_into().unwrap(), changelog_index: new_changelog_index, }; self.indexed_changelog.push(new_element_changelog_entry); let output = IndexedMerkleTreeUpdate { new_low_element, new_low_element_hash: new_low_leaf, new_high_element: raw_new_element, new_high_element_hash: new_leaf, }; Ok(output) } } impl<H, I, const HEIGHT: usize, const NET_HEIGHT: usize> Deref for IndexedMerkleTree<H, I, HEIGHT, NET_HEIGHT> where H: Hasher, I: CheckedAdd + CheckedSub + Copy + Clone + fmt::Debug + PartialOrd + ToBytes + TryFrom<usize> + Unsigned, usize: From<I>, { type Target = ConcurrentMerkleTree<H, HEIGHT>; fn deref(&self) -> &Self::Target { &self.merkle_tree } } impl<H, I, const HEIGHT: usize, const NET_HEIGHT: usize> DerefMut for IndexedMerkleTree<H, I, HEIGHT, NET_HEIGHT> where H: Hasher, I: CheckedAdd + CheckedSub + Copy + Clone + fmt::Debug + PartialOrd + ToBytes + TryFrom<usize> + Unsigned, usize: From<I>, { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.merkle_tree } } impl<H, I, const HEIGHT: usize, const NET_HEIGHT: usize> PartialEq for IndexedMerkleTree<H, I, HEIGHT, NET_HEIGHT> where H: Hasher, I: CheckedAdd + CheckedSub + Copy + Clone + fmt::Debug + PartialOrd + ToBytes + TryFrom<usize> + Unsigned, usize: From<I>, { fn eq(&self, other: &Self) -> bool { self.merkle_tree.eq(&other.merkle_tree) && self .indexed_changelog .capacity() .eq(&other.indexed_changelog.capacity()) && self .indexed_changelog .len() .eq(&other.indexed_changelog.len()) && self .indexed_changelog .first_index() .eq(&other.indexed_changelog.first_index()) && self .indexed_changelog .last_index() .eq(&other.indexed_changelog.last_index()) && self.indexed_changelog.eq(&other.indexed_changelog) } }
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/indexed
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/indexed/src/errors.rs
use light_bounded_vec::BoundedVecError; use light_concurrent_merkle_tree::{ errors::ConcurrentMerkleTreeError, light_hasher::errors::HasherError, }; use light_utils::UtilsError; use thiserror::Error; #[derive(Debug, Error)] pub enum IndexedMerkleTreeError { #[error("Integer overflow")] IntegerOverflow, #[error("Invalid index, it exceeds the number of elements.")] IndexHigherThanMax, #[error("Could not find the low element.")] LowElementNotFound, #[error("Low element is greater or equal to the provided new element.")] LowElementGreaterOrEqualToNewElement, #[error("The provided new element is greater or equal to the next element.")] NewElementGreaterOrEqualToNextElement, #[error("The element already exists, but was expected to be absent.")] ElementAlreadyExists, #[error("The element does not exist, but was expected to be present.")] ElementDoesNotExist, #[error("Invalid changelog buffer size, expected {0}, got {1}")] ChangelogBufferSize(usize, usize), #[error("Hasher error: {0}")] Hasher(#[from] HasherError), #[error("Concurrent Merkle tree error: {0}")] ConcurrentMerkleTree(#[from] ConcurrentMerkleTreeError), #[error("Utils error {0}")] Utils(#[from] UtilsError), #[error("Bounded vector error: {0}")] BoundedVec(#[from] BoundedVecError), #[error("Indexed array is full, cannot append more elements")] ArrayFull, } // NOTE(vadorovsky): Unfortunately, we need to do it by hand. `num_derive::ToPrimitive` // doesn't support data-carrying enums. #[cfg(feature = "solana")] impl From<IndexedMerkleTreeError> for u32 { fn from(e: IndexedMerkleTreeError) -> u32 { match e { IndexedMerkleTreeError::IntegerOverflow => 11001, IndexedMerkleTreeError::IndexHigherThanMax => 11002, IndexedMerkleTreeError::LowElementNotFound => 11003, IndexedMerkleTreeError::LowElementGreaterOrEqualToNewElement => 11004, IndexedMerkleTreeError::NewElementGreaterOrEqualToNextElement => 11005, IndexedMerkleTreeError::ElementAlreadyExists => 11006, IndexedMerkleTreeError::ElementDoesNotExist => 11007, IndexedMerkleTreeError::ChangelogBufferSize(_, _) => 11008, IndexedMerkleTreeError::ArrayFull => 11009, IndexedMerkleTreeError::Hasher(e) => e.into(), IndexedMerkleTreeError::ConcurrentMerkleTree(e) => e.into(), IndexedMerkleTreeError::Utils(e) => e.into(), IndexedMerkleTreeError::BoundedVec(e) => e.into(), } } } #[cfg(feature = "solana")] impl From<IndexedMerkleTreeError> for solana_program::program_error::ProgramError { fn from(e: IndexedMerkleTreeError) -> Self { solana_program::program_error::ProgramError::Custom(e.into()) } }
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/indexed
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/indexed/src/array.rs
use std::{cmp::Ordering, fmt::Debug, marker::PhantomData}; use crate::{errors::IndexedMerkleTreeError, HIGHEST_ADDRESS_PLUS_ONE}; use light_concurrent_merkle_tree::{event::RawIndexedElement, light_hasher::Hasher}; use light_utils::bigint::bigint_to_be_bytes_array; use num_bigint::BigUint; use num_traits::Zero; use num_traits::{CheckedAdd, CheckedSub, ToBytes, Unsigned}; #[derive(Clone, Debug, Default)] pub struct IndexedElement<I> where I: CheckedAdd + CheckedSub + Copy + Clone + PartialOrd + ToBytes + TryFrom<usize> + Unsigned, usize: From<I>, { pub index: I, pub value: BigUint, pub next_index: I, } impl<I> From<RawIndexedElement<I>> for IndexedElement<I> where I: CheckedAdd + CheckedSub + Copy + Clone + PartialOrd + ToBytes + TryFrom<usize> + Unsigned, usize: From<I>, { fn from(value: RawIndexedElement<I>) -> Self { IndexedElement { index: value.index, value: BigUint::from_bytes_be(&value.value), next_index: value.next_index, } } } impl<I> PartialEq for IndexedElement<I> where I: CheckedAdd + CheckedSub + Copy + Clone + PartialOrd + ToBytes + TryFrom<usize> + Unsigned, usize: From<I>, { fn eq(&self, other: &Self) -> bool { self.value == other.value } } impl<I> Eq for IndexedElement<I> where I: CheckedAdd + CheckedSub + Copy + Clone + PartialOrd + ToBytes + TryFrom<usize> + Unsigned, usize: From<I>, { } impl<I> PartialOrd for IndexedElement<I> where I: CheckedAdd + CheckedSub + Copy + Clone + PartialOrd + ToBytes + TryFrom<usize> + Unsigned, usize: From<I>, { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl<I> Ord for IndexedElement<I> where I: CheckedAdd + CheckedSub + Copy + Clone + PartialOrd + ToBytes + TryFrom<usize> + Unsigned, usize: From<I>, { fn cmp(&self, other: &Self) -> Ordering { self.value.cmp(&other.value) } } impl<I> IndexedElement<I> where I: CheckedAdd + CheckedSub + Copy + Clone + PartialOrd + ToBytes + TryFrom<usize> + Unsigned, usize: From<I>, { pub fn index(&self) -> usize { self.index.into() } pub fn next_index(&self) -> usize { self.next_index.into() } pub fn hash<H>(&self, next_value: &BigUint) -> Result<[u8; 32], IndexedMerkleTreeError> where H: Hasher, { let hash = H::hashv(&[ bigint_to_be_bytes_array::<32>(&self.value)?.as_ref(), self.next_index.to_be_bytes().as_ref(), bigint_to_be_bytes_array::<32>(next_value)?.as_ref(), ])?; Ok(hash) } pub fn update_from_raw_element(&mut self, raw_element: &RawIndexedElement<I>) { self.index = raw_element.index; self.value = BigUint::from_bytes_be(&raw_element.value); self.next_index = raw_element.next_index; } } #[derive(Clone, Debug)] pub struct IndexedElementBundle<I> where I: CheckedAdd + CheckedSub + Copy + Clone + PartialOrd + ToBytes + TryFrom<usize> + Unsigned, usize: From<I>, { pub new_low_element: IndexedElement<I>, pub new_element: IndexedElement<I>, pub new_element_next_value: BigUint, } #[derive(Clone, Debug)] pub struct IndexedArray<H, I> where H: Hasher, I: CheckedAdd + CheckedSub + Copy + Clone + PartialOrd + ToBytes + TryFrom<usize> + Unsigned, usize: From<I>, { pub elements: Vec<IndexedElement<I>>, pub current_node_index: I, pub highest_element_index: I, _hasher: PhantomData<H>, } impl<H, I> Default for IndexedArray<H, I> where H: Hasher, I: CheckedAdd + CheckedSub + Copy + Clone + PartialOrd + ToBytes + TryFrom<usize> + Unsigned, usize: From<I>, { fn default() -> Self { Self { elements: vec![IndexedElement { index: I::zero(), value: BigUint::zero(), next_index: I::zero(), }], current_node_index: I::zero(), highest_element_index: I::zero(), _hasher: PhantomData, } } } impl<H, I> IndexedArray<H, I> where H: Hasher, I: CheckedAdd + CheckedSub + Copy + Clone + PartialOrd + ToBytes + TryFrom<usize> + Unsigned, usize: From<I>, { pub fn get(&self, index: usize) -> Option<&IndexedElement<I>> { self.elements.get(index) } pub fn len(&self) -> usize { self.current_node_index.into() } pub fn is_empty(&self) -> bool { self.current_node_index == I::zero() } pub fn iter(&self) -> IndexingArrayIter<H, I> { IndexingArrayIter { indexing_array: self, front: 0, back: self.current_node_index.into(), } } pub fn find_element(&self, value: &BigUint) -> Option<&IndexedElement<I>> { self.elements[..self.len() + 1] .iter() .find(|&node| node.value == *value) } pub fn init(&mut self) -> Result<IndexedElementBundle<I>, IndexedMerkleTreeError> { use num_traits::Num; let init_value = BigUint::from_str_radix(HIGHEST_ADDRESS_PLUS_ONE, 10) .map_err(|_| IndexedMerkleTreeError::IntegerOverflow)?; self.append(&init_value) } /// Returns the index of the low element for the given `value`, which is /// not yet the part of the array. /// /// Low element is the greatest element which still has lower value than /// the provided one. /// /// Low elements are used in non-membership proofs. pub fn find_low_element_index_for_nonexistent( &self, value: &BigUint, ) -> Result<I, IndexedMerkleTreeError> { // Try to find element whose next element is higher than the provided // value. for (i, node) in self.elements.iter().enumerate() { if node.value == *value { return Err(IndexedMerkleTreeError::ElementAlreadyExists); } if self.elements[node.next_index()].value > *value && node.value < *value { return i .try_into() .map_err(|_| IndexedMerkleTreeError::IntegerOverflow); } } // If no such element was found, it means that our value is going to be // the greatest in the array. This means that the currently greatest // element is going to be the low element of our value. Ok(self.highest_element_index) } /// Returns the: /// /// * Low element for the given value. /// * Next value for that low element. /// /// For the given `value`, which is not yet the part of the array. /// /// Low element is the greatest element which still has lower value than /// the provided one. /// /// Low elements are used in non-membership proofs. pub fn find_low_element_for_nonexistent( &self, value: &BigUint, ) -> Result<(IndexedElement<I>, BigUint), IndexedMerkleTreeError> { let low_element_index = self.find_low_element_index_for_nonexistent(value)?; let low_element = self.elements[usize::from(low_element_index)].clone(); Ok(( low_element.clone(), self.elements[low_element.next_index()].value.clone(), )) } /// Returns the index of the low element for the given `value`, which is /// already the part of the array. /// /// Low element is the greatest element which still has lower value than /// the provided one. /// /// Low elements are used in non-membership proofs. pub fn find_low_element_index_for_existent( &self, value: &BigUint, ) -> Result<I, IndexedMerkleTreeError> { for (i, node) in self.elements[..self.len() + 1].iter().enumerate() { if self.elements[usize::from(node.next_index)].value == *value { let i = i .try_into() .map_err(|_| IndexedMerkleTreeError::IntegerOverflow)?; return Ok(i); } } Err(IndexedMerkleTreeError::ElementDoesNotExist) } /// Returns the low element for the given `value`, which is already the /// part of the array. /// /// Low element is the greatest element which still has lower value than /// the provided one. /// /// Low elements are used in non-membership proofs. pub fn find_low_element_for_existent( &self, value: &BigUint, ) -> Result<IndexedElement<I>, IndexedMerkleTreeError> { let low_element_index = self.find_low_element_index_for_existent(value)?; let low_element = self.elements[usize::from(low_element_index)].clone(); Ok(low_element) } /// Returns the hash of the given element. That hash consists of: /// /// * The value of the given element. /// * The `next_index` of the given element. /// * The value of the element pointed by `next_index`. pub fn hash_element(&self, index: I) -> Result<[u8; 32], IndexedMerkleTreeError> { let element = self .elements .get(usize::from(index)) .ok_or(IndexedMerkleTreeError::IndexHigherThanMax)?; let next_element = self .elements .get(usize::from(element.next_index)) .ok_or(IndexedMerkleTreeError::IndexHigherThanMax)?; element.hash::<H>(&next_element.value) } /// Returns an updated low element and a new element, created based on the /// provided `low_element_index` and `value`. pub fn new_element_with_low_element_index( &self, low_element_index: I, value: &BigUint, ) -> Result<IndexedElementBundle<I>, IndexedMerkleTreeError> { let mut new_low_element = self.elements[usize::from(low_element_index)].clone(); let new_element_index = self .current_node_index .checked_add(&I::one()) .ok_or(IndexedMerkleTreeError::IntegerOverflow)?; let new_element = IndexedElement { index: new_element_index, value: value.clone(), next_index: new_low_element.next_index, }; new_low_element.next_index = new_element_index; let new_element_next_value = self.elements[usize::from(new_element.next_index)] .value .clone(); Ok(IndexedElementBundle { new_low_element, new_element, new_element_next_value, }) } pub fn new_element( &self, value: &BigUint, ) -> Result<IndexedElementBundle<I>, IndexedMerkleTreeError> { let low_element_index = self.find_low_element_index_for_nonexistent(value)?; let element = self.new_element_with_low_element_index(low_element_index, value)?; Ok(element) } /// Appends the given `value` to the indexing array. pub fn append_with_low_element_index( &mut self, low_element_index: I, value: &BigUint, ) -> Result<IndexedElementBundle<I>, IndexedMerkleTreeError> { // TOD0: add length check, and add field to with tree height here let old_low_element = &self.elements[usize::from(low_element_index)]; // Check that the `value` belongs to the range of `old_low_element`. if old_low_element.next_index == I::zero() { // In this case, the `old_low_element` is the greatest element. // The value of `new_element` needs to be greater than the value of // `old_low_element` (and therefore, be the greatest). if value <= &old_low_element.value { return Err(IndexedMerkleTreeError::LowElementGreaterOrEqualToNewElement); } } else { // The value of `new_element` needs to be greater than the value of // `old_low_element` (and therefore, be the greatest). if value <= &old_low_element.value { return Err(IndexedMerkleTreeError::LowElementGreaterOrEqualToNewElement); } // The value of `new_element` needs to be lower than the value of // next element pointed by `old_low_element`. if value >= &self.elements[usize::from(old_low_element.next_index)].value { return Err(IndexedMerkleTreeError::NewElementGreaterOrEqualToNextElement); } } // Create new node. let new_element_bundle = self.new_element_with_low_element_index(low_element_index, value)?; // If the old low element wasn't pointing to any element, it means that: // // * It used to be the highest element. // * Our new element, which we are appending, is going the be the // highest element. // // Therefore, we need to save the new element index as the highest // index. if old_low_element.next_index == I::zero() { self.highest_element_index = new_element_bundle.new_element.index; } // Insert new node. self.current_node_index = new_element_bundle.new_element.index; self.elements.push(new_element_bundle.new_element.clone()); // Update low element. self.elements[usize::from(low_element_index)] = new_element_bundle.new_low_element.clone(); Ok(new_element_bundle) } pub fn append( &mut self, value: &BigUint, ) -> Result<IndexedElementBundle<I>, IndexedMerkleTreeError> { let low_element_index = self.find_low_element_index_for_nonexistent(value)?; self.append_with_low_element_index(low_element_index, value) } pub fn lowest(&self) -> Option<IndexedElement<I>> { if self.current_node_index < I::one() { None } else { self.elements.get(1).cloned() } } } pub struct IndexingArrayIter<'a, H, I> where H: Hasher, I: CheckedAdd + CheckedSub + Copy + Clone + PartialOrd + ToBytes + TryFrom<usize> + Unsigned, usize: From<I>, { indexing_array: &'a IndexedArray<H, I>, front: usize, back: usize, } impl<'a, H, I> Iterator for IndexingArrayIter<'a, H, I> where H: Hasher, I: CheckedAdd + CheckedSub + Copy + Clone + PartialOrd + ToBytes + TryFrom<usize> + Unsigned, usize: From<I>, { type Item = &'a IndexedElement<I>; fn next(&mut self) -> Option<Self::Item> { if self.front <= self.back { let result = self.indexing_array.elements.get(self.front); self.front += 1; result } else { None } } } impl<'a, H, I> DoubleEndedIterator for IndexingArrayIter<'a, H, I> where H: Hasher, I: CheckedAdd + CheckedSub + Copy + Clone + PartialOrd + ToBytes + TryFrom<usize> + Unsigned, usize: From<I>, { fn next_back(&mut self) -> Option<Self::Item> { if self.back >= self.front { let result = self.indexing_array.elements.get(self.back); self.back -= 1; result } else { None } } } #[cfg(test)] mod test { use light_concurrent_merkle_tree::light_hasher::Poseidon; use num_bigint::{RandBigInt, ToBigUint}; use rand::thread_rng; use super::*; #[test] fn test_indexed_element_cmp() { let mut rng = thread_rng(); for _ in 0..1000 { let value = rng.gen_biguint(128); let element_1 = IndexedElement::<u16> { index: 0, value: value.clone(), next_index: 1, }; let element_2 = IndexedElement::<u16> { index: 1, value, next_index: 2, }; assert_eq!(element_1, element_2); assert_eq!(element_2, element_1); assert!(matches!(element_1.cmp(&element_2), Ordering::Equal)); assert!(matches!(element_2.cmp(&element_1), Ordering::Equal)); let value_higher = rng.gen_biguint(128); if value_higher == 0.to_biguint().unwrap() { continue; } let value_lower = rng.gen_biguint_below(&value_higher); let element_lower = IndexedElement::<u16> { index: 0, value: value_lower, next_index: 1, }; let element_higher = IndexedElement::<u16> { index: 1, value: value_higher, next_index: 2, }; assert_ne!(element_lower, element_higher); assert_ne!(element_higher, element_lower); assert!(matches!(element_lower.cmp(&element_higher), Ordering::Less)); assert!(matches!( element_higher.cmp(&element_lower), Ordering::Greater )); assert!(matches!( element_lower.partial_cmp(&element_higher), Some(Ordering::Less) )); assert!(matches!( element_higher.partial_cmp(&element_lower), Some(Ordering::Greater) )); } } /// Tests the insertion of elements to the indexing array. #[test] fn test_append() { // The initial state of the array looks like: // // ``` // value = [0] [0] [0] [0] [0] [0] [0] [0] // next_index = [0] [0] [0] [0] [0] [0] [0] [0] // ``` let mut indexed_array: IndexedArray<Poseidon, usize> = IndexedArray::default(); let nullifier1 = 30_u32.to_biguint().unwrap(); let bundle1 = indexed_array.new_element(&nullifier1).unwrap(); assert!(indexed_array.find_element(&nullifier1).is_none()); indexed_array.append(&nullifier1).unwrap(); // After adding a new value 30, it should look like: // // ``` // value = [ 0] [30] [0] [0] [0] [0] [0] [0] // next_index = [ 1] [ 0] [0] [0] [0] [0] [0] [0] // ``` // // Because: // // * Low element is the first node, with index 0 and value 0. There is // no node with value greater as 30, so we found it as a one pointing to // node 0 (which will always have value 0). // * The new nullifier is inserted in index 1. // * `next_*` fields of the low nullifier are updated to point to the new // nullifier. assert_eq!( indexed_array.find_element(&nullifier1), Some(&bundle1.new_element), ); let expected_hash = Poseidon::hashv(&[ bigint_to_be_bytes_array::<32>(&nullifier1) .unwrap() .as_ref(), 0_usize.to_be_bytes().as_ref(), bigint_to_be_bytes_array::<32>(&(0.to_biguint().unwrap())) .unwrap() .as_ref(), ]) .unwrap(); assert_eq!(indexed_array.hash_element(1).unwrap(), expected_hash); assert_eq!( indexed_array.elements[0], IndexedElement { index: 0, value: 0_u32.to_biguint().unwrap(), next_index: 1, }, ); assert_eq!( indexed_array.elements[1], IndexedElement { index: 1, value: 30_u32.to_biguint().unwrap(), next_index: 0, } ); assert_eq!( indexed_array.iter().collect::<Vec<_>>().as_slice(), &[ &IndexedElement { index: 0, value: 0_u32.to_biguint().unwrap(), next_index: 1, }, &IndexedElement { index: 1, value: 30_u32.to_biguint().unwrap(), next_index: 0 } ] ); let nullifier2 = 10_u32.to_biguint().unwrap(); let bundle2 = indexed_array.new_element(&nullifier2).unwrap(); assert!(indexed_array.find_element(&nullifier2).is_none()); indexed_array.append(&nullifier2).unwrap(); // After adding an another value 10, it should look like: // // ``` // value = [ 0] [30] [10] [0] [0] [0] [0] [0] // next_index = [ 2] [ 0] [ 1] [0] [0] [0] [0] [0] // ``` // // Because: // // * Low nullifier is still the node 0, but this time for differen reason - // its `next_index` 2 contains value 30, whish is greater than 10. // * The new nullifier is inserted as node 2. // * Low nullifier is pointing to the index 1. We assign the 1st nullifier // as the next nullifier of our new nullifier. Therefore, our new nullifier // looks like: `[value = 10, next_index = 1]`. // * Low nullifier is updated to point to the new nullifier. Therefore, // after update it looks like: `[value = 0, next_index = 2]`. // * The previously inserted nullifier, the node 1, remains unchanged. assert_eq!( indexed_array.find_element(&nullifier2), Some(&bundle2.new_element), ); let expected_hash = Poseidon::hashv(&[ bigint_to_be_bytes_array::<32>(&nullifier2) .unwrap() .as_ref(), 1_usize.to_be_bytes().as_ref(), bigint_to_be_bytes_array::<32>(&(30.to_biguint().unwrap())) .unwrap() .as_ref(), ]) .unwrap(); assert_eq!(indexed_array.hash_element(2).unwrap(), expected_hash); assert_eq!( indexed_array.elements[0], IndexedElement { index: 0, value: 0_u32.to_biguint().unwrap(), next_index: 2, } ); assert_eq!( indexed_array.elements[1], IndexedElement { index: 1, value: 30_u32.to_biguint().unwrap(), next_index: 0, } ); assert_eq!( indexed_array.elements[2], IndexedElement { index: 2, value: 10_u32.to_biguint().unwrap(), next_index: 1, } ); assert_eq!( indexed_array.iter().collect::<Vec<_>>().as_slice(), &[ &IndexedElement { index: 0, value: 0_u32.to_biguint().unwrap(), next_index: 2, }, &IndexedElement { index: 1, value: 30_u32.to_biguint().unwrap(), next_index: 0, }, &IndexedElement { index: 2, value: 10_u32.to_biguint().unwrap(), next_index: 1, } ] ); let nullifier3 = 20_u32.to_biguint().unwrap(); let bundle3 = indexed_array.new_element(&nullifier3).unwrap(); assert!(indexed_array.find_element(&nullifier3).is_none()); indexed_array.append(&nullifier3).unwrap(); // After adding an another value 20, it should look like: // // ``` // value = [ 0] [30] [10] [20] [0] [0] [0] [0] // next_index = [ 2] [ 0] [ 3] [ 1] [0] [0] [0] [0] // ``` // // Because: // * Low nullifier is the node 2. // * The new nullifier is inserted as node 3. // * Low nullifier is pointing to the node 2. We assign the 1st nullifier // as the next nullifier of our new nullifier. Therefore, our new // nullifier looks like: // * Low nullifier is updated to point to the new nullifier. Therefore, // after update it looks like: `[value = 10, next_index = 3]`. assert_eq!( indexed_array.find_element(&nullifier3), Some(&bundle3.new_element), ); let expected_hash = Poseidon::hashv(&[ bigint_to_be_bytes_array::<32>(&nullifier3) .unwrap() .as_ref(), 1_usize.to_be_bytes().as_ref(), bigint_to_be_bytes_array::<32>(&(30.to_biguint().unwrap())) .unwrap() .as_ref(), ]) .unwrap(); assert_eq!(indexed_array.hash_element(3).unwrap(), expected_hash); assert_eq!( indexed_array.elements[0], IndexedElement { index: 0, value: 0_u32.to_biguint().unwrap(), next_index: 2, } ); assert_eq!( indexed_array.elements[1], IndexedElement { index: 1, value: 30_u32.to_biguint().unwrap(), next_index: 0, } ); assert_eq!( indexed_array.elements[2], IndexedElement { index: 2, value: 10_u32.to_biguint().unwrap(), next_index: 3, } ); assert_eq!( indexed_array.elements[3], IndexedElement { index: 3, value: 20_u32.to_biguint().unwrap(), next_index: 1, } ); assert_eq!( indexed_array.iter().collect::<Vec<_>>().as_slice(), &[ &IndexedElement { index: 0, value: 0_u32.to_biguint().unwrap(), next_index: 2, }, &IndexedElement { index: 1, value: 30_u32.to_biguint().unwrap(), next_index: 0, }, &IndexedElement { index: 2, value: 10_u32.to_biguint().unwrap(), next_index: 3, }, &IndexedElement { index: 2, value: 20_u32.to_biguint().unwrap(), next_index: 1 } ] ); let nullifier4 = 50_u32.to_biguint().unwrap(); let bundle4 = indexed_array.new_element(&nullifier4).unwrap(); assert!(indexed_array.find_element(&nullifier4).is_none()); indexed_array.append(&nullifier4).unwrap(); // After adding an another value 50, it should look like: // // ``` // value = [ 0] [30] [10] [20] [50] [0] [0] [0] // next_index = [ 2] [ 4] [ 3] [ 1] [0 ] [0] [0] [0] // ``` // // Because: // // * Low nullifier is the node 1 - there is no node with value greater // than 50, so we found it as a one having 0 as the `next_value`. // * The new nullifier is inserted as node 4. // * Low nullifier is not pointing to any node. So our new nullifier // is not going to point to any other node either. Therefore, the new // nullifier looks like: `[value = 50, next_index = 0]`. // * Low nullifier is updated to point to the new nullifier. Therefore, // after update it looks like: `[value = 30, next_index = 4]`. assert_eq!( indexed_array.find_element(&nullifier4), Some(&bundle4.new_element), ); let expected_hash = Poseidon::hashv(&[ bigint_to_be_bytes_array::<32>(&nullifier4) .unwrap() .as_ref(), 0_usize.to_be_bytes().as_ref(), bigint_to_be_bytes_array::<32>(&(0.to_biguint().unwrap())) .unwrap() .as_ref(), ]) .unwrap(); assert_eq!(indexed_array.hash_element(4).unwrap(), expected_hash); assert_eq!( indexed_array.elements[0], IndexedElement { index: 0, value: 0_u32.to_biguint().unwrap(), next_index: 2, } ); assert_eq!( indexed_array.elements[1], IndexedElement { index: 1, value: 30_u32.to_biguint().unwrap(), next_index: 4, } ); assert_eq!( indexed_array.elements[2], IndexedElement { index: 2, value: 10_u32.to_biguint().unwrap(), next_index: 3, } ); assert_eq!( indexed_array.elements[3], IndexedElement { index: 3, value: 20_u32.to_biguint().unwrap(), next_index: 1, } ); assert_eq!( indexed_array.elements[4], IndexedElement { index: 4, value: 50_u32.to_biguint().unwrap(), next_index: 0, } ); assert_eq!( indexed_array.iter().collect::<Vec<_>>().as_slice(), &[ &IndexedElement { index: 0, value: 0_u32.to_biguint().unwrap(), next_index: 2, }, &IndexedElement { index: 1, value: 30_u32.to_biguint().unwrap(), next_index: 4, }, &IndexedElement { index: 2, value: 10_u32.to_biguint().unwrap(), next_index: 3, }, &IndexedElement { index: 3, value: 20_u32.to_biguint().unwrap(), next_index: 1, }, &IndexedElement { index: 4, value: 50_u32.to_biguint().unwrap(), next_index: 0, } ] ); } #[test] fn test_append_with_low_element_index() { // The initial state of the array looks like: // // ``` // value = [0] [0] [0] [0] [0] [0] [0] [0] // next_index = [0] [0] [0] [0] [0] [0] [0] [0] // ``` let mut indexing_array: IndexedArray<Poseidon, usize> = IndexedArray::default(); let low_element_index = 0; let nullifier1 = 30_u32.to_biguint().unwrap(); indexing_array .append_with_low_element_index(low_element_index, &nullifier1) .unwrap(); // After adding a new value 30, it should look like: // // ``` // value = [ 0] [30] [0] [0] [0] [0] [0] [0] // next_index = [ 1] [ 0] [0] [0] [0] [0] [0] [0] // ``` // // Because: // // * Low element is the first node, with index 0 and value 0. There is // no node with value greater as 30, so we found it as a one pointing to // node 0 (which will always have value 0). // * The new nullifier is inserted in index 1. // * `next_*` fields of the low nullifier are updated to point to the new // nullifier. assert_eq!( indexing_array.elements[0], IndexedElement { index: 0, value: 0_u32.to_biguint().unwrap(), next_index: 1, }, ); assert_eq!( indexing_array.elements[1], IndexedElement { index: 1, value: 30_u32.to_biguint().unwrap(), next_index: 0, } ); let low_element_index = 0; let nullifier2 = 10_u32.to_biguint().unwrap(); indexing_array .append_with_low_element_index(low_element_index, &nullifier2) .unwrap(); // After adding an another value 10, it should look like: // // ``` // value = [ 0] [30] [10] [0] [0] [0] [0] [0] // next_index = [ 2] [ 0] [ 1] [0] [0] [0] [0] [0] // ``` // // Because: // // * Low nullifier is still the node 0, but this time for differen reason - // its `next_index` 2 contains value 30, whish is greater than 10. // * The new nullifier is inserted as node 2. // * Low nullifier is pointing to the index 1. We assign the 1st nullifier // as the next nullifier of our new nullifier. Therefore, our new nullifier // looks like: `[value = 10, next_index = 1]`. // * Low nullifier is updated to point to the new nullifier. Therefore, // after update it looks like: `[value = 0, next_index = 2]`. // * The previously inserted nullifier, the node 1, remains unchanged. assert_eq!( indexing_array.elements[0], IndexedElement { index: 0, value: 0_u32.to_biguint().unwrap(), next_index: 2, } ); assert_eq!( indexing_array.elements[1], IndexedElement { index: 1, value: 30_u32.to_biguint().unwrap(), next_index: 0, } ); assert_eq!( indexing_array.elements[2], IndexedElement { index: 2, value: 10_u32.to_biguint().unwrap(), next_index: 1, } ); let low_element_index = 2; let nullifier3 = 20_u32.to_biguint().unwrap(); indexing_array .append_with_low_element_index(low_element_index, &nullifier3) .unwrap(); // After adding an another value 20, it should look like: // // ``` // value = [ 0] [30] [10] [20] [0] [0] [0] [0] // next_index = [ 2] [ 0] [ 3] [ 1] [0] [0] [0] [0] // ``` // // Because: // * Low nullifier is the node 2. // * The new nullifier is inserted as node 3. // * Low nullifier is pointing to the node 2. We assign the 1st nullifier // as the next nullifier of our new nullifier. Therefore, our new // nullifier looks like: // * Low nullifier is updated to point to the new nullifier. Therefore, // after update it looks like: `[value = 10, next_index = 3]`. assert_eq!( indexing_array.elements[0], IndexedElement { index: 0, value: 0_u32.to_biguint().unwrap(), next_index: 2, } ); assert_eq!( indexing_array.elements[1], IndexedElement { index: 1, value: 30_u32.to_biguint().unwrap(), next_index: 0, } ); assert_eq!( indexing_array.elements[2], IndexedElement { index: 2, value: 10_u32.to_biguint().unwrap(), next_index: 3, } ); assert_eq!( indexing_array.elements[3], IndexedElement { index: 3, value: 20_u32.to_biguint().unwrap(), next_index: 1, } ); let low_element_index = 1; let nullifier4 = 50_u32.to_biguint().unwrap(); indexing_array .append_with_low_element_index(low_element_index, &nullifier4) .unwrap(); // After adding an another value 50, it should look like: // // ``` // value = [ 0] [30] [10] [20] [50] [0] [0] [0] // next_index = [ 2] [ 4] [ 3] [ 1] [0 ] [0] [0] [0] // ``` // // Because: // // * Low nullifier is the node 1 - there is no node with value greater // than 50, so we found it as a one having 0 as the `next_value`. // * The new nullifier is inserted as node 4. // * Low nullifier is not pointing to any node. So our new nullifier // is not going to point to any other node either. Therefore, the new // nullifier looks like: `[value = 50, next_index = 0]`. // * Low nullifier is updated to point to the new nullifier. Therefore, // after update it looks like: `[value = 30, next_index = 4]`. assert_eq!( indexing_array.elements[0], IndexedElement { index: 0, value: 0_u32.to_biguint().unwrap(), next_index: 2, } ); assert_eq!( indexing_array.elements[1], IndexedElement { index: 1, value: 30_u32.to_biguint().unwrap(), next_index: 4, } ); assert_eq!( indexing_array.elements[2], IndexedElement { index: 2, value: 10_u32.to_biguint().unwrap(), next_index: 3, } ); assert_eq!( indexing_array.elements[3], IndexedElement { index: 3, value: 20_u32.to_biguint().unwrap(), next_index: 1, } ); assert_eq!( indexing_array.elements[4], IndexedElement { index: 4, value: 50_u32.to_biguint().unwrap(), next_index: 0, } ); } /// Tries to violate the integrity of the array by pointing to invalid low /// nullifiers. Tests whether the range check works correctly and disallows /// the invalid appends from happening. #[test] fn test_append_with_low_element_index_invalid() { // The initial state of the array looks like: // // ``` // value = [0] [0] [0] [0] [0] [0] [0] [0] // next_index = [0] [0] [0] [0] [0] [0] [0] [0] // ``` let mut indexing_array: IndexedArray<Poseidon, usize> = IndexedArray::default(); // Append nullifier 30. The low nullifier is at index 0. The array // should look like: // // ``` // value = [ 0] [30] [0] [0] [0] [0] [0] [0] // next_index = [ 1] [ 0] [0] [0] [0] [0] [0] [0] // ``` let low_element_index = 0; let nullifier1 = 30_u32.to_biguint().unwrap(); indexing_array .append_with_low_element_index(low_element_index, &nullifier1) .unwrap(); // Try appending nullifier 20, while pointing to index 1 as low // nullifier. // Therefore, the new element is lower than the supposed low element. let low_element_index = 1; let nullifier2 = 20_u32.to_biguint().unwrap(); assert!(matches!( indexing_array.append_with_low_element_index(low_element_index, &nullifier2), Err(IndexedMerkleTreeError::LowElementGreaterOrEqualToNewElement) )); // Try appending nullifier 50, while pointing to index 0 as low // nullifier. // Therefore, the new element is greater than next element. let low_element_index = 0; let nullifier2 = 50_u32.to_biguint().unwrap(); assert!(matches!( indexing_array.append_with_low_element_index(low_element_index, &nullifier2), Err(IndexedMerkleTreeError::NewElementGreaterOrEqualToNextElement), )); // Append nullifier 50 correctly, with 0 as low nullifier. The array // should look like: // // ``` // value = [ 0] [30] [50] [0] [0] [0] [0] [0] // next_index = [ 1] [ 2] [ 0] [0] [0] [0] [0] [0] // ``` let low_element_index = 1; let nullifier2 = 50_u32.to_biguint().unwrap(); indexing_array .append_with_low_element_index(low_element_index, &nullifier2) .unwrap(); // Try appending nullifier 40, while pointint to index 2 (value 50) as // low nullifier. // Therefore, the pointed low element is greater than the new element. let low_element_index = 2; let nullifier3 = 40_u32.to_biguint().unwrap(); assert!(matches!( indexing_array.append_with_low_element_index(low_element_index, &nullifier3), Err(IndexedMerkleTreeError::LowElementGreaterOrEqualToNewElement) )); } /// Tests whether `find_*_for_existent` elements return `None` when a /// nonexistent is provided. #[test] fn test_find_low_element_for_existent_element() { let mut indexed_array: IndexedArray<Poseidon, usize> = IndexedArray::default(); // Append nullifiers 40 and 20. let low_element_index = 0; let nullifier_1 = 40_u32.to_biguint().unwrap(); indexed_array .append_with_low_element_index(low_element_index, &nullifier_1) .unwrap(); let low_element_index = 0; let nullifier_2 = 20_u32.to_biguint().unwrap(); indexed_array .append_with_low_element_index(low_element_index, &nullifier_2) .unwrap(); // Try finding a low element for nonexistent nullifier 30. let nonexistent_nullifier = 30_u32.to_biguint().unwrap(); // `*_existent` methods should fail. let res = indexed_array.find_low_element_index_for_existent(&nonexistent_nullifier); assert!(matches!( res, Err(IndexedMerkleTreeError::ElementDoesNotExist) )); let res = indexed_array.find_low_element_for_existent(&nonexistent_nullifier); assert!(matches!( res, Err(IndexedMerkleTreeError::ElementDoesNotExist) )); // `*_nonexistent` methods should succeed. let low_element_index = indexed_array .find_low_element_index_for_nonexistent(&nonexistent_nullifier) .unwrap(); assert_eq!(low_element_index, 2); let low_element = indexed_array .find_low_element_for_nonexistent(&nonexistent_nullifier) .unwrap(); assert_eq!( low_element, ( IndexedElement::<usize> { index: 2, value: 20_u32.to_biguint().unwrap(), next_index: 1, }, 40_u32.to_biguint().unwrap(), ) ); // Try finding a low element of existent nullifier 40. // `_existent` methods should succeed. let low_element_index = indexed_array .find_low_element_index_for_existent(&nullifier_1) .unwrap(); assert_eq!(low_element_index, 2); let low_element = indexed_array .find_low_element_for_existent(&nullifier_1) .unwrap(); assert_eq!( low_element, IndexedElement::<usize> { index: 2, value: 20_u32.to_biguint().unwrap(), next_index: 1, }, ); // `*_nonexistent` methods should fail. let res = indexed_array.find_low_element_index_for_nonexistent(&nullifier_1); assert!(matches!( res, Err(IndexedMerkleTreeError::ElementAlreadyExists) )); let res = indexed_array.find_low_element_for_nonexistent(&nullifier_1); assert!(matches!( res, Err(IndexedMerkleTreeError::ElementAlreadyExists) )); } }
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/indexed
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/indexed/src/reference.rs
use std::marker::PhantomData; use light_bounded_vec::{BoundedVec, BoundedVecError}; use light_concurrent_merkle_tree::light_hasher::{errors::HasherError, Hasher}; use light_merkle_tree_reference::{MerkleTree, ReferenceMerkleTreeError}; use light_utils::bigint::bigint_to_be_bytes_array; use num_bigint::BigUint; use num_traits::{CheckedAdd, CheckedSub, Num, ToBytes, Unsigned}; use thiserror::Error; use crate::{ array::{IndexedArray, IndexedElement}, errors::IndexedMerkleTreeError, HIGHEST_ADDRESS_PLUS_ONE, }; #[derive(Debug, Error)] pub enum IndexedReferenceMerkleTreeError { #[error("NonInclusionProofFailedLowerBoundViolated")] NonInclusionProofFailedLowerBoundViolated, #[error("NonInclusionProofFailedHigherBoundViolated")] NonInclusionProofFailedHigherBoundViolated, #[error(transparent)] Indexed(#[from] IndexedMerkleTreeError), #[error(transparent)] Reference(#[from] ReferenceMerkleTreeError), #[error(transparent)] Hasher(#[from] HasherError), } #[derive(Debug, Clone)] #[repr(C)] pub struct IndexedMerkleTree<H, I> where H: Hasher, I: CheckedAdd + CheckedSub + Copy + Clone + PartialOrd + ToBytes + TryFrom<usize> + Unsigned, { pub merkle_tree: MerkleTree<H>, _index: PhantomData<I>, } impl<H, I> IndexedMerkleTree<H, I> where H: Hasher, I: CheckedAdd + CheckedSub + Copy + Clone + PartialOrd + ToBytes + TryFrom<usize> + Unsigned, usize: From<I>, { pub fn new( height: usize, canopy_depth: usize, ) -> Result<Self, IndexedReferenceMerkleTreeError> { let mut merkle_tree = MerkleTree::new(height, canopy_depth); // Append the first low leaf, which has value 0 and does not point // to any other leaf yet. // This low leaf is going to be updated during the first `update` // operation. merkle_tree.append(&H::zero_indexed_leaf())?; Ok(Self { merkle_tree, _index: PhantomData, }) } /// Initializes the reference indexed merkle tree on par with the /// on-chain indexed concurrent merkle tree. /// Inserts the ranges 0 - BN254 Field Size - 1 into the tree. pub fn init(&mut self) -> Result<(), IndexedReferenceMerkleTreeError> { let mut indexed_array = IndexedArray::<H, I>::default(); let init_value = BigUint::from_str_radix(HIGHEST_ADDRESS_PLUS_ONE, 10).unwrap(); let nullifier_bundle = indexed_array.append(&init_value)?; let new_low_leaf = nullifier_bundle .new_low_element .hash::<H>(&nullifier_bundle.new_element.value)?; self.merkle_tree.update(&new_low_leaf, 0)?; let new_leaf = nullifier_bundle .new_element .hash::<H>(&nullifier_bundle.new_element_next_value)?; self.merkle_tree.append(&new_leaf)?; Ok(()) } pub fn get_path_of_leaf( &self, index: usize, full: bool, ) -> Result<BoundedVec<[u8; 32]>, BoundedVecError> { self.merkle_tree.get_path_of_leaf(index, full) } pub fn get_proof_of_leaf( &self, index: usize, full: bool, ) -> Result<BoundedVec<[u8; 32]>, BoundedVecError> { self.merkle_tree.get_proof_of_leaf(index, full) } pub fn root(&self) -> [u8; 32] { self.merkle_tree.root() } // TODO: rename input values pub fn update( &mut self, new_low_element: &IndexedElement<I>, new_element: &IndexedElement<I>, new_element_next_value: &BigUint, ) -> Result<(), IndexedReferenceMerkleTreeError> { // Update the low element. let new_low_leaf = new_low_element.hash::<H>(&new_element.value)?; self.merkle_tree .update(&new_low_leaf, usize::from(new_low_element.index))?; // Append the new element. let new_leaf = new_element.hash::<H>(new_element_next_value)?; self.merkle_tree.append(&new_leaf)?; Ok(()) } // TODO: add append with new value, so that we don't need to compute the lowlevel values manually pub fn append( &mut self, value: &BigUint, indexed_array: &mut IndexedArray<H, I>, ) -> Result<(), IndexedReferenceMerkleTreeError> { let nullifier_bundle = indexed_array.append(value).unwrap(); self.update( &nullifier_bundle.new_low_element, &nullifier_bundle.new_element, &nullifier_bundle.new_element_next_value, )?; Ok(()) } pub fn get_non_inclusion_proof( &self, value: &BigUint, indexed_array: &IndexedArray<H, I>, ) -> Result<NonInclusionProof, IndexedReferenceMerkleTreeError> { let (low_element, _next_value) = indexed_array.find_low_element_for_nonexistent(value)?; let merkle_proof = self .get_proof_of_leaf(usize::from(low_element.index), true) .unwrap(); let higher_range_value = indexed_array .get(low_element.next_index()) .unwrap() .value .clone(); Ok(NonInclusionProof { root: self.root(), value: bigint_to_be_bytes_array::<32>(value).unwrap(), leaf_lower_range_value: bigint_to_be_bytes_array::<32>(&low_element.value).unwrap(), leaf_higher_range_value: bigint_to_be_bytes_array::<32>(&higher_range_value).unwrap(), leaf_index: low_element.index.into(), next_index: low_element.next_index(), merkle_proof, }) } pub fn verify_non_inclusion_proof( &self, proof: &NonInclusionProof, ) -> Result<(), IndexedReferenceMerkleTreeError> { let value_big_int = BigUint::from_bytes_be(&proof.value); let lower_end_value = BigUint::from_bytes_be(&proof.leaf_lower_range_value); if lower_end_value >= value_big_int { return Err(IndexedReferenceMerkleTreeError::NonInclusionProofFailedLowerBoundViolated); } let higher_end_value = BigUint::from_bytes_be(&proof.leaf_higher_range_value); if higher_end_value <= value_big_int { return Err( IndexedReferenceMerkleTreeError::NonInclusionProofFailedHigherBoundViolated, ); } let array_element = IndexedElement::<usize> { value: lower_end_value, index: proof.leaf_index, next_index: proof.next_index, }; let leaf_hash = array_element.hash::<H>(&higher_end_value)?; self.merkle_tree .verify(&leaf_hash, &proof.merkle_proof, proof.leaf_index) .unwrap(); Ok(()) } } // TODO: check why next_index is usize while index is I /// We prove non-inclusion by: /// 1. Showing that value is greater than leaf_lower_range_value and less than leaf_higher_range_value /// 2. Showing that the leaf_hash H(leaf_lower_range_value, leaf_next_index, leaf_higher_value) is included in the root (Merkle tree) #[derive(Debug)] pub struct NonInclusionProof { pub root: [u8; 32], pub value: [u8; 32], pub leaf_lower_range_value: [u8; 32], pub leaf_higher_range_value: [u8; 32], pub leaf_index: usize, pub next_index: usize, pub merkle_proof: BoundedVec<[u8; 32]>, }
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/indexed
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/indexed/src/changelog.rs
use light_concurrent_merkle_tree::event::RawIndexedElement; /// NET_HEIGHT = HEIGHT - CANOPY_DEPTH #[derive(Clone, Debug, PartialEq, Eq)] pub struct IndexedChangelogEntry<I, const NET_HEIGHT: usize> where I: Clone, { /// Element that was a subject to the change. pub element: RawIndexedElement<I>, /// Merkle proof of that operation. pub proof: [[u8; 32]; NET_HEIGHT], /// Index of a changelog entry in `ConcurrentMerkleTree` corresponding to /// the same operation. pub changelog_index: usize, }
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/bounded-vec/Cargo.toml
[package] name = "light-bounded-vec" version = "1.1.0" description = "Bounded and cyclic vector implementations" repository = "https://github.com/Lightprotocol/light-protocol" license = "Apache-2.0" edition = "2021" [features] solana = ["solana-program"] [dependencies] bytemuck = { version = "1.17", features = ["min_const_generics"] } memoffset = "0.9" solana-program = { workspace = true, optional = true } thiserror = "1.0" [dev-dependencies] rand = "0.8"
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/bounded-vec
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/bounded-vec/src/lib.rs
use std::{ alloc::{self, handle_alloc_error, Layout}, fmt, mem, ops::{Index, IndexMut}, ptr::{self, NonNull}, slice::{self, Iter, IterMut, SliceIndex}, }; use memoffset::span_of; use thiserror::Error; #[derive(Debug, Error, PartialEq)] pub enum BoundedVecError { #[error("The vector is full, cannot push any new elements")] Full, #[error("Requested array of size {0}, but the vector has {1} elements")] ArraySize(usize, usize), #[error("The requested start index is out of bounds.")] IterFromOutOfBounds, } #[cfg(feature = "solana")] impl From<BoundedVecError> for u32 { fn from(e: BoundedVecError) -> u32 { match e { BoundedVecError::Full => 8001, BoundedVecError::ArraySize(_, _) => 8002, BoundedVecError::IterFromOutOfBounds => 8003, } } } #[cfg(feature = "solana")] impl From<BoundedVecError> for solana_program::program_error::ProgramError { fn from(e: BoundedVecError) -> Self { solana_program::program_error::ProgramError::Custom(e.into()) } } #[derive(Clone, Debug, Eq, PartialEq)] pub struct BoundedVecMetadata { capacity: usize, length: usize, } impl BoundedVecMetadata { pub fn new(capacity: usize) -> Self { Self { capacity, length: 0, } } pub fn new_with_length(capacity: usize, length: usize) -> Self { Self { capacity, length } } pub fn from_le_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self { Self { capacity: usize::from_le_bytes(bytes[span_of!(Self, capacity)].try_into().unwrap()), length: usize::from_le_bytes(bytes[span_of!(Self, length)].try_into().unwrap()), } } pub fn to_le_bytes(&self) -> [u8; mem::size_of::<Self>()] { let mut bytes = [0u8; mem::size_of::<Self>()]; bytes[span_of!(Self, capacity)].copy_from_slice(&self.capacity.to_le_bytes()); bytes[span_of!(Self, length)].copy_from_slice(&self.length.to_le_bytes()); bytes } pub fn capacity(&self) -> usize { self.capacity } pub fn length(&self) -> usize { self.length } } /// `BoundedVec` is a custom vector implementation which forbids /// post-initialization reallocations. The size is not known during compile /// time (that makes it different from arrays), but can be defined only once /// (that makes it different from [`Vec`](std::vec::Vec)). pub struct BoundedVec<T> where T: Clone, { metadata: *mut BoundedVecMetadata, data: NonNull<T>, } impl<T> BoundedVec<T> where T: Clone, { #[inline] fn metadata_with_capacity(capacity: usize) -> *mut BoundedVecMetadata { let layout = Layout::new::<BoundedVecMetadata>(); let metadata = unsafe { alloc::alloc(layout) as *mut BoundedVecMetadata }; if metadata.is_null() { handle_alloc_error(layout); } unsafe { *metadata = BoundedVecMetadata { capacity, length: 0, }; } metadata } #[inline] fn metadata_from(src_metadata: &BoundedVecMetadata) -> *mut BoundedVecMetadata { let layout = Layout::new::<BoundedVecMetadata>(); let metadata = unsafe { alloc::alloc(layout) as *mut BoundedVecMetadata }; if metadata.is_null() { handle_alloc_error(layout); } unsafe { (*metadata).clone_from(src_metadata) }; metadata } #[inline] fn data_with_capacity(capacity: usize) -> NonNull<T> { let layout = Layout::array::<T>(capacity).unwrap(); let data_ptr = unsafe { alloc::alloc(layout) as *mut T }; if data_ptr.is_null() { handle_alloc_error(layout); } // PANICS: We ensured that the pointer is not NULL. NonNull::new(data_ptr).unwrap() } #[inline] pub fn with_capacity(capacity: usize) -> Self { let metadata = Self::metadata_with_capacity(capacity); let data = Self::data_with_capacity(capacity); Self { metadata, data } } /// Creates a `BoundedVec<T>` with the given `metadata`. /// /// # Safety /// /// This method is unsafe, as it does not guarantee the correctness of /// provided parameters (other than `capacity`). The full responisibility /// is on the caller. #[inline] pub unsafe fn with_metadata(metadata: &BoundedVecMetadata) -> Self { let capacity = metadata.capacity(); let metadata = Self::metadata_from(metadata); let data = Self::data_with_capacity(capacity); Self { metadata, data } } pub fn metadata(&self) -> &BoundedVecMetadata { unsafe { &*self.metadata } } pub fn from_array<const N: usize>(array: &[T; N]) -> Self { let mut vec = Self::with_capacity(N); for element in array { // SAFETY: We are sure that the array and the vector have equal // sizes, there is no chance for the error to occur. vec.push(element.clone()).unwrap(); } vec } pub fn from_slice(slice: &[T]) -> Self { let mut vec = Self::with_capacity(slice.len()); for element in slice { // SAFETY: We are sure that the array and the vector have equal // sizes, there is no chance for the error to occur. vec.push(element.clone()).unwrap(); } vec } /// Creates `BoundedVec<T>` directly from a pointer, a capacity, and a length. /// /// # Safety /// /// This is highly unsafe, due to the number of invariants that aren't /// checked: /// /// * `ptr` must have been allocated using the global allocator, such as via /// the [`alloc::alloc`] function. /// * `T` needs to have the same alignment as what `ptr` was allocated with. /// (`T` having a less strict alignment is not sufficient, the alignment really /// needs to be equal to satisfy the [`dealloc`] requirement that memory must be /// allocated and deallocated with the same layout.) /// * The size of `T` times the `capacity` (ie. the allocated size in bytes) needs /// to be the same size as the pointer was allocated with. (Because similar to /// alignment, [`dealloc`] must be called with the same layout `size`.) /// * `length` needs to be less than or equal to `capacity`. /// * The first `length` values must be properly initialized values of type `T`. /// * `capacity` needs to be the capacity that the pointer was allocated with. /// * The allocated size in bytes must be no larger than `isize::MAX`. /// See the safety documentation of [`pointer::offset`]. #[inline] pub unsafe fn from_raw_parts(metadata: *mut BoundedVecMetadata, ptr: *mut T) -> Self { let data = NonNull::new(ptr).unwrap(); Self { metadata, data } } /// Returns the total number of elements the vector can hold without /// reallocating. /// /// # Examples /// /// ``` /// let mut vec: Vec<i32> = Vec::with_capacity(10); /// vec.push(42); /// assert!(vec.capacity() >= 10); /// ``` #[inline] pub fn capacity(&self) -> usize { unsafe { (*self.metadata).capacity } } #[inline] pub fn as_slice(&self) -> &[T] { unsafe { slice::from_raw_parts(self.data.as_ptr(), self.len()) } } #[inline] pub fn as_mut_slice(&mut self) -> &mut [T] { unsafe { slice::from_raw_parts_mut(self.data.as_ptr(), self.len()) } } /// Appends an element to the back of a collection. /// /// # Panics /// /// Panics if the new capacity exceeds `isize::MAX` bytes. /// /// # Examples /// /// ``` /// let mut vec = vec![1, 2]; /// vec.push(3); /// assert_eq!(vec, [1, 2, 3]); /// ``` #[inline] pub fn push(&mut self, value: T) -> Result<(), BoundedVecError> { if self.len() == self.capacity() { return Err(BoundedVecError::Full); } unsafe { ptr::write(self.data.as_ptr().add(self.len()), value) }; self.inc_len(); Ok(()) } #[inline] pub fn len(&self) -> usize { unsafe { (*self.metadata).length } } #[inline] fn inc_len(&mut self) { unsafe { (*self.metadata).length += 1 }; } pub fn is_empty(&self) -> bool { self.len() == 0 } #[inline] pub fn get(&self, index: usize) -> Option<&T> { if index >= self.len() { return None; } let cell = unsafe { &*self.data.as_ptr().add(index) }; Some(cell) } #[inline] pub fn get_mut(&mut self, index: usize) -> Option<&mut T> { if index >= self.len() { return None; } let cell = unsafe { &mut *self.data.as_ptr().add(index) }; Some(cell) } /// Returns a mutable pointer to `BoundedVec`'s buffer. #[inline(always)] pub fn as_mut_ptr(&mut self) -> *mut T { self.data.as_ptr() } #[inline] pub fn iter(&self) -> Iter<'_, T> { self.as_slice().iter() } #[inline] pub fn iter_mut(&mut self) -> IterMut<'_, T> { self.as_mut_slice().iter_mut() } #[inline] pub fn last(&self) -> Option<&T> { if self.is_empty() { return None; } self.get(self.len() - 1) } #[inline] pub fn last_mut(&mut self) -> Option<&mut T> { if self.is_empty() { return None; } self.get_mut(self.len() - 1) } pub fn to_array<const N: usize>(&self) -> Result<[T; N], BoundedVecError> { if self.len() != N { return Err(BoundedVecError::ArraySize(N, self.len())); } Ok(std::array::from_fn(|i| self.get(i).unwrap().clone())) } pub fn to_vec(self) -> Vec<T> { self.as_slice().to_vec() } pub fn extend<U: IntoIterator<Item = T>>(&mut self, iter: U) -> Result<(), BoundedVecError> { for item in iter { self.push(item)?; } Ok(()) } } impl<T> Clone for BoundedVec<T> where T: Clone, { fn clone(&self) -> Self { // Create a new buffer with the same capacity as the original let layout = Layout::new::<BoundedVecMetadata>(); let metadata = unsafe { alloc::alloc(layout) as *mut BoundedVecMetadata }; if metadata.is_null() { handle_alloc_error(layout); } unsafe { *metadata = (*self.metadata).clone() }; let layout = Layout::array::<T>(self.capacity()).unwrap(); let data_ptr = unsafe { alloc::alloc(layout) as *mut T }; if data_ptr.is_null() { handle_alloc_error(layout); } let data = NonNull::new(data_ptr).unwrap(); // Copy elements from the original data slice to the new slice let new_vec = Self { metadata, data }; // Clone each element into the new vector for i in 0..self.len() { unsafe { ptr::write(data_ptr.add(i), (*self.get(i).unwrap()).clone()) }; } new_vec } } impl<T> fmt::Debug for BoundedVec<T> where T: Clone + fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{:?}", self.as_slice()) } } impl<T> Drop for BoundedVec<T> where T: Clone, { fn drop(&mut self) { let layout = Layout::array::<T>(self.capacity()).unwrap(); unsafe { alloc::dealloc(self.data.as_ptr() as *mut u8, layout) }; let layout = Layout::new::<BoundedVecMetadata>(); unsafe { alloc::dealloc(self.metadata as *mut u8, layout) }; } } impl<T, I: SliceIndex<[T]>> Index<I> for BoundedVec<T> where T: Clone, I: SliceIndex<[T]>, { type Output = I::Output; #[inline] fn index(&self, index: I) -> &Self::Output { self.as_slice().index(index) } } impl<T, I> IndexMut<I> for BoundedVec<T> where T: Clone, I: SliceIndex<[T]>, { fn index_mut(&mut self, index: I) -> &mut Self::Output { self.as_mut_slice().index_mut(index) } } impl<T> IntoIterator for BoundedVec<T> where T: Clone, { type Item = T; type IntoIter = BoundedVecIntoIterator<T>; fn into_iter(self) -> Self::IntoIter { BoundedVecIntoIterator { vec: self, current: 0, } } } impl<T> PartialEq for BoundedVec<T> where T: Clone + PartialEq, { fn eq(&self, other: &Self) -> bool { self.iter().eq(other.iter()) } } impl<T> Eq for BoundedVec<T> where T: Clone + Eq {} pub struct BoundedVecIntoIterator<T> where T: Clone, { vec: BoundedVec<T>, current: usize, } impl<T> Iterator for BoundedVecIntoIterator<T> where T: Clone, { type Item = T; fn next(&mut self) -> Option<Self::Item> { let element = self.vec.get(self.current).map(|element| element.to_owned()); self.current += 1; element } } #[derive(Clone, Debug, Eq, PartialEq)] pub struct CyclicBoundedVecMetadata { capacity: usize, length: usize, first_index: usize, last_index: usize, } impl CyclicBoundedVecMetadata { pub fn new(capacity: usize) -> Self { Self { capacity, length: 0, first_index: 0, last_index: 0, } } pub fn new_with_indices( capacity: usize, length: usize, first_index: usize, last_index: usize, ) -> Self { Self { capacity, length, first_index, last_index, } } pub fn from_le_bytes(bytes: [u8; mem::size_of::<CyclicBoundedVecMetadata>()]) -> Self { Self { capacity: usize::from_le_bytes(bytes[span_of!(Self, capacity)].try_into().unwrap()), length: usize::from_le_bytes(bytes[span_of!(Self, length)].try_into().unwrap()), first_index: usize::from_le_bytes( bytes[span_of!(Self, first_index)].try_into().unwrap(), ), last_index: usize::from_le_bytes(bytes[span_of!(Self, last_index)].try_into().unwrap()), } } pub fn to_le_bytes(&self) -> [u8; mem::size_of::<Self>()] { let mut bytes = [0u8; mem::size_of::<Self>()]; bytes[span_of!(Self, capacity)].copy_from_slice(&self.capacity.to_le_bytes()); bytes[span_of!(Self, length)].copy_from_slice(&self.length.to_le_bytes()); bytes[span_of!(Self, first_index)].copy_from_slice(&self.first_index.to_le_bytes()); bytes[span_of!(Self, last_index)].copy_from_slice(&self.last_index.to_le_bytes()); bytes } pub fn capacity(&self) -> usize { self.capacity } pub fn length(&self) -> usize { self.length } } /// `CyclicBoundedVec` is a wrapper around [`Vec`](std::vec::Vec) which: /// /// * Forbids post-initialization reallocations. /// * Starts overwriting elements from the beginning once it reaches its /// capacity. pub struct CyclicBoundedVec<T> where T: Clone, { metadata: *mut CyclicBoundedVecMetadata, data: NonNull<T>, } impl<T> CyclicBoundedVec<T> where T: Clone, { #[inline] fn metadata_with_capacity(capacity: usize) -> *mut CyclicBoundedVecMetadata { let layout = Layout::new::<CyclicBoundedVecMetadata>(); let metadata = unsafe { alloc::alloc(layout) as *mut CyclicBoundedVecMetadata }; if metadata.is_null() { handle_alloc_error(layout); } unsafe { *metadata = CyclicBoundedVecMetadata { capacity, length: 0, first_index: 0, last_index: 0, }; } metadata } #[inline] fn metadata_from(src_metadata: &CyclicBoundedVecMetadata) -> *mut CyclicBoundedVecMetadata { let layout = Layout::new::<CyclicBoundedVecMetadata>(); let metadata = unsafe { alloc::alloc(layout) as *mut CyclicBoundedVecMetadata }; if metadata.is_null() { handle_alloc_error(layout); } unsafe { (*metadata).clone_from(src_metadata) }; metadata } #[inline] fn data_with_capacity(capacity: usize) -> NonNull<T> { let layout = Layout::array::<T>(capacity).unwrap(); let data_ptr = unsafe { alloc::alloc(layout) as *mut T }; if data_ptr.is_null() { handle_alloc_error(layout); } // PANICS: We ensured that the pointer is not NULL. NonNull::new(data_ptr).unwrap() } #[inline] pub fn with_capacity(capacity: usize) -> Self { let metadata = Self::metadata_with_capacity(capacity); let data = Self::data_with_capacity(capacity); Self { metadata, data } } /// Creates a `CyclicBoundedVec<T>` with the given `metadata`. /// /// # Safety /// /// This method is unsafe, as it does not guarantee the correctness of /// provided parameters (other than `capacity`). The full responisibility /// is on the caller. #[inline] pub unsafe fn with_metadata(metadata: &CyclicBoundedVecMetadata) -> Self { let capacity = metadata.capacity(); let metadata = Self::metadata_from(metadata); let data = Self::data_with_capacity(capacity); Self { metadata, data } } pub fn metadata(&self) -> &CyclicBoundedVecMetadata { unsafe { &*self.metadata } } /// Creates a `CyclicBoundedVec<T>` directly from a pointer, a capacity, and a length. /// /// # Safety /// /// This is highly unsafe, due to the number of invariants that aren't /// checked: /// /// * `ptr` must have been allocated using the global allocator, such as via /// the [`alloc::alloc`] function. /// * `T` needs to have the same alignment as what `ptr` was allocated with. /// (`T` having a less strict alignment is not sufficient, the alignment really /// needs to be equal to satisfy the [`dealloc`] requirement that memory must be /// allocated and deallocated with the same layout.) /// * The size of `T` times the `capacity` (ie. the allocated size in bytes) needs /// to be the same size as the pointer was allocated with. (Because similar to /// alignment, [`dealloc`] must be called with the same layout `size`.) /// * `length` needs to be less than or equal to `capacity`. /// * The first `length` values must be properly initialized values of type `T`. /// * `capacity` needs to be the capacity that the pointer was allocated with. /// * The allocated size in bytes must be no larger than `isize::MAX`. /// See the safety documentation of [`pointer::offset`]. #[inline] pub unsafe fn from_raw_parts(metadata: *mut CyclicBoundedVecMetadata, ptr: *mut T) -> Self { let data = NonNull::new(ptr).unwrap(); Self { metadata, data } } /// Returns the total number of elements the vector can hold without /// reallocating. /// /// # Examples /// /// ``` /// let mut vec: Vec<i32> = Vec::with_capacity(10); /// vec.push(42); /// assert!(vec.capacity() >= 10); /// ``` #[inline] pub fn capacity(&self) -> usize { unsafe { (*self.metadata).capacity } } #[inline] pub fn as_slice(&self) -> &[T] { unsafe { slice::from_raw_parts(self.data.as_ptr(), self.len()) } } /// Appends an element to the back of a collection. /// /// # Examples /// /// ``` /// let mut vec = vec![1, 2]; /// vec.push(3); /// assert_eq!(vec, [1, 2, 3]); /// ``` #[inline] pub fn push(&mut self, value: T) { if self.is_empty() { self.inc_len(); } else if self.len() < self.capacity() { self.inc_len(); self.inc_last_index(); } else { self.inc_last_index(); self.inc_first_index(); } // SAFETY: We made sure that `last_index` doesn't exceed the capacity. unsafe { std::ptr::write(self.data.as_ptr().add(self.last_index()), value); } } #[inline] pub fn len(&self) -> usize { unsafe { (*self.metadata).length } } #[inline] fn inc_len(&mut self) { unsafe { (*self.metadata).length += 1 } } pub fn is_empty(&self) -> bool { self.len() == 0 } #[inline] pub fn get(&self, index: usize) -> Option<&T> { if index >= self.len() { return None; } let cell = unsafe { &*self.data.as_ptr().add(index) }; Some(cell) } #[inline] pub fn get_mut(&mut self, index: usize) -> Option<&mut T> { if index >= self.len() { return None; } let cell = unsafe { &mut *self.data.as_ptr().add(index) }; Some(cell) } /// Returns a mutable pointer to `BoundedVec`'s buffer. #[inline(always)] pub fn as_mut_ptr(&mut self) -> *mut T { self.data.as_ptr() } #[inline] pub fn iter(&self) -> CyclicBoundedVecIterator<'_, T> { CyclicBoundedVecIterator { vec: self, current: self.first_index(), is_finished: false, } } #[inline] pub fn iter_from( &self, start: usize, ) -> Result<CyclicBoundedVecIterator<'_, T>, BoundedVecError> { if start >= self.len() { return Err(BoundedVecError::IterFromOutOfBounds); } Ok(CyclicBoundedVecIterator { vec: self, current: start, is_finished: false, }) } #[inline] pub fn first_index(&self) -> usize { unsafe { (*self.metadata).first_index } } #[inline] fn inc_first_index(&self) { unsafe { (*self.metadata).first_index = ((*self.metadata).first_index + 1) % self.capacity(); } } #[inline] pub fn first(&self) -> Option<&T> { self.get(self.first_index()) } #[inline] pub fn first_mut(&mut self) -> Option<&mut T> { self.get_mut(self.first_index()) } #[inline] pub fn last_index(&self) -> usize { unsafe { (*self.metadata).last_index } } #[inline] fn inc_last_index(&mut self) { unsafe { (*self.metadata).last_index = ((*self.metadata).last_index + 1) % self.capacity(); } } #[inline] pub fn last(&self) -> Option<&T> { self.get(self.last_index()) } #[inline] pub fn last_mut(&mut self) -> Option<&mut T> { self.get_mut(self.last_index()) } } impl<T> fmt::Debug for CyclicBoundedVec<T> where T: Clone + fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{:?}", self.as_slice()) } } impl<T> Drop for CyclicBoundedVec<T> where T: Clone, { fn drop(&mut self) { let layout = Layout::array::<T>(self.capacity()).unwrap(); unsafe { alloc::dealloc(self.data.as_ptr() as *mut u8, layout) }; let layout = Layout::new::<CyclicBoundedVecMetadata>(); unsafe { alloc::dealloc(self.metadata as *mut u8, layout) }; } } impl<T> Index<usize> for CyclicBoundedVec<T> where T: Clone, { type Output = T; #[inline] fn index(&self, index: usize) -> &Self::Output { self.get(index).unwrap() } } impl<T> IndexMut<usize> for CyclicBoundedVec<T> where T: Clone, { #[inline] fn index_mut(&mut self, index: usize) -> &mut Self::Output { self.get_mut(index).unwrap() } } impl<T> PartialEq for CyclicBoundedVec<T> where T: Clone + PartialEq, { fn eq(&self, other: &Self) -> bool { self.iter().eq(other.iter()) } } impl<T> Eq for CyclicBoundedVec<T> where T: Clone + Eq {} pub struct CyclicBoundedVecIterator<'a, T> where T: Clone, { vec: &'a CyclicBoundedVec<T>, current: usize, is_finished: bool, } impl<'a, T> Iterator for CyclicBoundedVecIterator<'a, T> where T: Clone, { type Item = &'a T; fn next(&mut self) -> Option<Self::Item> { if self.vec.capacity() == 0 || self.is_finished { None } else { if self.current == self.vec.last_index() { self.is_finished = true; } let new_current = (self.current + 1) % self.vec.capacity(); let element = self.vec.get(self.current); self.current = new_current; element } } } #[cfg(test)] mod test { use std::array; use rand::{ distributions::{Distribution, Standard}, thread_rng, Rng, }; use super::*; use rand::distributions::uniform::{SampleRange, SampleUniform}; /// Generates a random value in the given range, excluding the values provided /// in `exclude`. fn gen_range_exclude<N, R, T>(rng: &mut N, range: R, exclude: &[T]) -> T where N: Rng, R: Clone + SampleRange<T>, T: PartialEq + SampleUniform, { loop { // This utility is supposed to be used only in unit tests. This `clone` // is harmless and necessary (can't pass a reference to range, it has // to be moved). let sample = rng.gen_range(range.clone()); if !exclude.contains(&sample) { return sample; } } } #[test] fn test_gen_range_exclude() { let mut rng = thread_rng(); for n_excluded in 1..100 { let excluded: Vec<u64> = (0..n_excluded).map(|_| rng.gen_range(0..100)).collect(); for _ in 0..10_000 { let sample = gen_range_exclude(&mut rng, 0..100, excluded.as_slice()); for excluded in excluded.iter() { assert_ne!(&sample, excluded); } } } } fn rand_bounded_vec<T>() -> BoundedVec<T> where T: Clone, Standard: Distribution<T>, { let mut rng = rand::thread_rng(); let capacity = rng.gen_range(1..1000); let length = rng.gen_range(0..capacity); let mut bounded_vec = BoundedVec::<T>::with_capacity(capacity); for _ in 0..length { let element = rng.gen(); bounded_vec.push(element).unwrap(); } bounded_vec } #[test] fn test_bounded_vec_metadata_serialization() { let mut rng = thread_rng(); for _ in 0..1000 { let capacity = rng.gen(); let metadata = BoundedVecMetadata::new(capacity); assert_eq!(metadata.capacity(), capacity); assert_eq!(metadata.length(), 0); let bytes = metadata.to_le_bytes(); let metadata_2 = BoundedVecMetadata::from_le_bytes(bytes); assert_eq!(metadata, metadata_2); } } #[test] fn test_bounded_vec_with_capacity() { for capacity in 0..1024 { let bounded_vec = BoundedVec::<u32>::with_capacity(capacity); assert_eq!(bounded_vec.capacity(), capacity); assert_eq!(bounded_vec.len(), 0); } } fn bounded_vec_from_array<const N: usize>() { let mut rng = thread_rng(); let arr: [u64; N] = array::from_fn(|_| rng.gen()); let vec = BoundedVec::from_array(&arr); assert_eq!(&arr, vec.as_slice()); } #[test] fn test_bounded_vec_from_array_256() { bounded_vec_from_array::<256>() } #[test] fn test_bounded_vec_from_array_512() { bounded_vec_from_array::<512>() } #[test] fn test_bounded_vec_from_array_1024() { bounded_vec_from_array::<1024>() } #[test] fn test_bounded_vec_from_slice() { let mut rng = thread_rng(); for capacity in 0..10_000 { let vec: Vec<u64> = (0..capacity).map(|_| rng.gen()).collect(); let bounded_vec = BoundedVec::from_slice(&vec); assert_eq!(vec.as_slice(), bounded_vec.as_slice()); } } #[test] fn test_bounded_vec_is_empty() { let mut rng = thread_rng(); let mut vec = BoundedVec::with_capacity(1000); assert!(vec.is_empty()); for _ in 0..1000 { let element: u64 = rng.gen(); vec.push(element).unwrap(); assert!(!vec.is_empty()); } } #[test] fn test_bounded_vec_get() { let mut vec = BoundedVec::with_capacity(1000); for i in 0..1000 { assert!(vec.get(i).is_none()); vec.push(i).unwrap(); } for i in 0..1000 { assert_eq!(vec.get(i), Some(&i)); } for i in 1000..10_000 { assert!(vec.get(i).is_none()); } } #[test] fn test_bounded_vec_get_mut() { let mut vec = BoundedVec::with_capacity(1000); for i in 0..1000 { assert!(vec.get_mut(i).is_none()); vec.push(i).unwrap(); } for i in 0..1000 { let element = vec.get_mut(i).unwrap(); assert_eq!(element, &i); *element = i * 2; } for i in 0..1000 { assert_eq!(vec.get_mut(i), Some(&mut (i * 2))); } for i in 1000..10_000 { assert!(vec.get_mut(i).is_none()); } } #[test] fn test_bounded_vec_iter_mut() { let mut vec = BoundedVec::with_capacity(1000); for i in 0..1000 { vec.push(i).unwrap(); } for (i, element) in vec.iter().enumerate() { assert_eq!(*element, i); } for element in vec.iter_mut() { *element = *element * 2; } for (i, element) in vec.iter().enumerate() { assert_eq!(*element, i * 2); } } #[test] fn test_bounded_vec_last() { let mut rng = thread_rng(); let mut vec = BoundedVec::with_capacity(1000); assert!(vec.last().is_none()); for _ in 0..1000 { let element: u64 = rng.gen(); vec.push(element).unwrap(); assert_eq!(vec.last(), Some(&element)); } } #[test] fn test_bounded_vec_last_mut() { let mut rng = thread_rng(); let mut vec = BoundedVec::with_capacity(1000); assert!(vec.last_mut().is_none()); for _ in 0..1000 { let element_old: u64 = rng.gen(); vec.push(element_old).unwrap(); let element_ref = vec.last_mut().unwrap(); assert_eq!(*element_ref, element_old); // Assign a new value. let element_new: u64 = rng.gen(); *element_ref = element_new; // Assert that it took the effect. let element_ref = vec.last_mut().unwrap(); assert_eq!(*element_ref, element_new); } } #[test] fn test_bounded_vec_to_array() { let vec = BoundedVec::from_array(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); let arr: [u32; 16] = vec.to_array().unwrap(); assert_eq!(arr, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); assert!(matches!( vec.to_array::<15>(), Err(BoundedVecError::ArraySize(_, _)) )); assert!(matches!( vec.to_array::<17>(), Err(BoundedVecError::ArraySize(_, _)) )); } #[test] fn test_bounded_vec_to_vec() { let mut rng = thread_rng(); for capacity in (0..10_000).step_by(100) { let vec_1: Vec<u64> = (0..capacity).map(|_| rng.gen()).collect(); let bounded_vec = BoundedVec::from_slice(&vec_1); let vec_2 = bounded_vec.to_vec(); assert_eq!(vec_1.as_slice(), vec_2.as_slice()); } } #[test] fn test_bounded_vec_extend() { let mut rng = thread_rng(); for capacity in (1..10_000).step_by(100) { let length = rng.gen_range(0..capacity); let mut vec = BoundedVec::with_capacity(capacity); vec.extend(0..length).unwrap(); assert_eq!(vec.capacity(), capacity); assert_eq!(vec.len(), length); for (element_1, element_2) in vec.iter().zip(0..length) { assert_eq!(*element_1, element_2); } } } #[test] fn test_bounded_vec_clone() { for _ in 0..1000 { let bounded_vec = rand_bounded_vec::<u32>(); let cloned_bounded_vec = bounded_vec.clone(); assert_eq!(bounded_vec.capacity(), cloned_bounded_vec.capacity()); assert_eq!(bounded_vec.len(), cloned_bounded_vec.len()); assert_eq!(bounded_vec, cloned_bounded_vec); } } #[test] fn test_bounded_vec_index() { let mut vec = BoundedVec::with_capacity(1000); for i in 0..1000 { vec.push(i).unwrap(); } for i in 0..1000 { assert_eq!(vec[i], i); } for i in 0..1000 { vec[i] = i * 2; } for i in 0..1000 { assert_eq!(vec[i], i * 2); } } #[test] fn test_bounded_vec_into_iter() { let mut vec = BoundedVec::with_capacity(1000); for i in 0..1000 { vec.push(i).unwrap(); } for (i, element) in vec.into_iter().enumerate() { assert_eq!(element, i); } } #[test] fn test_cyclic_bounded_vec_metadata_serialization() { let mut rng = thread_rng(); for _ in 0..1000 { let capacity = rng.gen(); let metadata = CyclicBoundedVecMetadata::new(capacity); assert_eq!(metadata.capacity(), capacity); assert_eq!(metadata.length(), 0); let bytes = metadata.to_le_bytes(); let metadata_2 = CyclicBoundedVecMetadata::from_le_bytes(bytes); assert_eq!(metadata, metadata_2); } } #[test] fn test_cyclic_bounded_vec_with_capacity() { for capacity in 0..1024 { let cyclic_bounded_vec = CyclicBoundedVec::<u32>::with_capacity(capacity); assert_eq!(cyclic_bounded_vec.capacity(), capacity); assert_eq!(cyclic_bounded_vec.len(), 0); assert_eq!(cyclic_bounded_vec.first_index(), 0); assert_eq!(cyclic_bounded_vec.last_index(), 0); } } #[test] fn test_cyclic_bounded_vec_is_empty() { let mut rng = thread_rng(); let mut vec = CyclicBoundedVec::with_capacity(1000); assert!(vec.is_empty()); for _ in 0..1000 { let element: u64 = rng.gen(); vec.push(element); assert!(!vec.is_empty()); } } #[test] fn test_cyclic_bounded_vec_get() { let mut vec = CyclicBoundedVec::with_capacity(1000); for i in 0..1000 { vec.push(i); } for i in 0..1000 { assert_eq!(vec.get(i), Some(&i)); } for i in 1000..10_000 { assert!(vec.get(i).is_none()); } } #[test] fn test_cyclic_bounded_vec_get_mut() { let mut vec = CyclicBoundedVec::with_capacity(1000); for i in 0..2000 { vec.push(i); } for i in 0..1000 { let element = vec.get_mut(i).unwrap(); assert_eq!(*element, 1000 + i); *element = i * 2; } for i in 0..1000 { assert_eq!(vec.get_mut(i), Some(&mut (i * 2))); } for i in 1000..10_000 { assert!(vec.get_mut(i).is_none()); } } #[test] fn test_cyclic_bounded_vec_first() { let mut vec = CyclicBoundedVec::with_capacity(500); assert!(vec.first().is_none()); for i in 0..1000 { vec.push(i); assert_eq!(vec.first(), Some(&((i as u64).saturating_sub(499)))); } } #[test] fn test_cyclic_bounded_vec_last() { let mut rng = thread_rng(); let mut vec = CyclicBoundedVec::with_capacity(500); assert!(vec.last().is_none()); for _ in 0..1000 { let element: u64 = rng.gen(); vec.push(element); assert_eq!(vec.last(), Some(&element)); } } #[test] fn test_cyclic_bounded_vec_last_mut() { let mut rng = thread_rng(); let mut vec = CyclicBoundedVec::with_capacity(500); assert!(vec.last_mut().is_none()); for _ in 0..1000 { let element_old: u64 = rng.gen(); vec.push(element_old); let element_ref = vec.last_mut().unwrap(); assert_eq!(*element_ref, element_old); // Assign a new value. let element_new: u64 = rng.gen(); *element_ref = element_new; // Assert that it took the effect. let element_ref = vec.last_mut().unwrap(); assert_eq!(*element_ref, element_new); } } #[test] fn test_cyclic_bounded_vec_manual() { let mut cyclic_bounded_vec = CyclicBoundedVec::with_capacity(8); // Fill up the cyclic vector. // // ``` // ^ $ // index [0, 1, 2, 3, 4, 5, 6, 7] // value [0, 1, 2, 3, 4, 5, 6, 7] // ``` // // * `^` - first element // * `$` - last element for i in 0..8 { cyclic_bounded_vec.push(i); } assert_eq!(cyclic_bounded_vec.first_index(), 0); assert_eq!(cyclic_bounded_vec.last_index(), 7); assert_eq!( cyclic_bounded_vec.iter().collect::<Vec<_>>().as_slice(), &[&0, &1, &2, &3, &4, &5, &6, &7] ); // Overwrite half of values. // // ``` // $ ^ // index [0, 1, 2, 3, 4, 5, 6, 7] // value [8, 9, 10, 11, 4, 5, 6, 7] // ``` // // * `^` - first element // * `$` - last element for i in 0..4 { cyclic_bounded_vec.push(i + 8); } assert_eq!(cyclic_bounded_vec.first_index(), 4); assert_eq!(cyclic_bounded_vec.last_index(), 3); assert_eq!( cyclic_bounded_vec.iter().collect::<Vec<_>>().as_slice(), &[&4, &5, &6, &7, &8, &9, &10, &11] ); // Overwrite even more. // // ``` // $ ^ // index [0, 1, 2, 3, 4, 5, 6, 7] // value [8, 9, 10, 11, 12, 13, 6, 7] // ``` // // * `^` - first element // * `$` - last element for i in 0..2 { cyclic_bounded_vec.push(i + 12); } assert_eq!(cyclic_bounded_vec.first_index(), 6); assert_eq!(cyclic_bounded_vec.last_index(), 5); assert_eq!( cyclic_bounded_vec.iter().collect::<Vec<_>>().as_slice(), &[&6, &7, &8, &9, &10, &11, &12, &13] ); // Overwrite all values from the first loop. // // ``` // ^ $ // index [0, 1, 2, 3, 4, 5, 6, 7] // value [8, 9, 10, 11, 12, 13, 14, 15] // ``` // // * `^` - first element // * `$` - last element for i in 0..2 { cyclic_bounded_vec.push(i + 14); } assert_eq!(cyclic_bounded_vec.first_index(), 0); assert_eq!(cyclic_bounded_vec.last_index(), 7); assert_eq!( cyclic_bounded_vec.iter().collect::<Vec<_>>().as_slice(), &[&8, &9, &10, &11, &12, &13, &14, &15] ); } /// Iteration on a vector with one element. /// /// ``` /// ^$ /// index [0] /// value [0] /// ``` /// /// * `^` - first element /// * `$` - last element /// * `#` - visited elements /// /// Length: 1 /// Capacity: 8 /// First index: 0 /// Last index: 0 /// /// Start iteration from: 0 /// /// Should iterate over one element. #[test] fn test_cyclic_bounded_vec_iter_one_element() { let mut cyclic_bounded_vec = CyclicBoundedVec::with_capacity(8); cyclic_bounded_vec.push(0); assert_eq!(cyclic_bounded_vec.len(), 1); assert_eq!(cyclic_bounded_vec.capacity(), 8); assert_eq!(cyclic_bounded_vec.first_index(), 0); assert_eq!(cyclic_bounded_vec.last_index(), 0); let elements = cyclic_bounded_vec.iter().collect::<Vec<_>>(); assert_eq!(elements.len(), 1); assert_eq!(elements.as_slice(), &[&0]); let elements = cyclic_bounded_vec.iter_from(0).unwrap().collect::<Vec<_>>(); assert_eq!(elements.len(), 1); assert_eq!(elements.as_slice(), &[&0]); } /// Iteration without reset in a vector which is not full. /// /// ``` /// # # # # /// ^ $ /// index [0, 1, 2, 3, 4, 5] /// value [0, 1, 2, 3, 4, 5] /// ``` /// /// * `^` - first element /// * `$` - last element /// * `#` - visited elements /// /// Length: 6 /// Capacity: 8 /// First index: 0 /// Last index: 5 /// /// Start iteration from: 2 /// /// Should iterate over elements from 2 to 5, with 4 iterations. #[test] fn test_cyclic_bounded_vec_iter_from_without_reset_not_full_6_8_4() { let mut cyclic_bounded_vec = CyclicBoundedVec::with_capacity(8); for i in 0..6 { cyclic_bounded_vec.push(i); } assert_eq!(cyclic_bounded_vec.len(), 6); assert_eq!(cyclic_bounded_vec.capacity(), 8); assert_eq!(cyclic_bounded_vec.first_index(), 0); assert_eq!(cyclic_bounded_vec.last_index(), 5); let elements = cyclic_bounded_vec.iter_from(2).unwrap().collect::<Vec<_>>(); assert_eq!(elements.len(), 4); assert_eq!(elements.as_slice(), &[&2, &3, &4, &5]); } /// Iteration without reset in a vector which is full. /// /// ``` /// # # # /// ^ $ /// index [0, 1, 2, 3, 4] /// value [0, 1, 2, 3, 4] /// ``` /// /// * `^` - first element /// * `$` - last element /// * `#` - visited elements /// /// Length: 5 /// Capacity: 5 /// First index: 0 /// Last index: 4 /// /// Start iteration from: 2 /// /// Should iterate over elements 2..4 - 3 iterations. #[test] fn test_cyclic_bounded_vec_iter_from_without_reset_not_full_5_5_4() { let mut cyclic_bounded_vec = CyclicBoundedVec::with_capacity(5); for i in 0..5 { cyclic_bounded_vec.push(i); } assert_eq!(cyclic_bounded_vec.len(), 5); assert_eq!(cyclic_bounded_vec.capacity(), 5); assert_eq!(cyclic_bounded_vec.first_index(), 0); assert_eq!(cyclic_bounded_vec.last_index(), 4); let elements = cyclic_bounded_vec.iter_from(2).unwrap().collect::<Vec<_>>(); assert_eq!(elements.len(), 3); assert_eq!(elements.as_slice(), &[&2, &3, &4]); } /// Iteration without reset in a vector which is full. /// /// ``` /// # # # # # # /// ^ $ /// index [0, 1, 2, 3, 4, 5, 6, 7] /// value [0, 1, 2, 3, 4, 5, 6, 7] /// ``` /// /// * `^` - first element /// * `$` - last element /// * `#` - visited elements /// /// Length: 8 /// Capacity: 8 /// First index: 0 /// Last index: 7 /// /// Start iteration from: 2 /// /// Should iterate over elements 2..7 - 6 iterations. #[test] fn test_cyclic_bounded_vec_iter_from_without_reset_full_8_8_6() { let mut cyclic_bounded_vec = CyclicBoundedVec::with_capacity(8); for i in 0..8 { cyclic_bounded_vec.push(i); } assert_eq!(cyclic_bounded_vec.len(), 8); assert_eq!(cyclic_bounded_vec.capacity(), 8); assert_eq!(cyclic_bounded_vec.first_index(), 0); assert_eq!(cyclic_bounded_vec.last_index(), 7); let elements = cyclic_bounded_vec.iter_from(2).unwrap().collect::<Vec<_>>(); assert_eq!(elements.len(), 6); assert_eq!(elements.as_slice(), &[&2, &3, &4, &5, &6, &7]); } /// Iteration with reset. /// /// Insert elements over capacity, so the vector resets and starts /// overwriting elements from the start - 12 elements into a vector with /// capacity 8. /// /// The resulting data structure looks like: /// /// ``` /// # # # # # # /// $ ^ /// index [0, 1, 2, 3, 4, 5, 6, 7] /// value [8, 9, 10, 11, 4, 5, 6, 7] /// ``` /// /// * `^` - first element /// * `$` - last element /// * `#` - visited elements /// /// Length: 8 /// Capacity: 8 /// First: 4 /// Last: 3 /// /// Start iteration from: 6 /// /// Should iterate over elements 6..7 and 8..11 - 6 iterations. #[test] fn test_cyclic_bounded_vec_iter_from_reset() { let mut cyclic_bounded_vec = CyclicBoundedVec::with_capacity(8); for i in 0..12 { cyclic_bounded_vec.push(i); } assert_eq!(cyclic_bounded_vec.len(), 8); assert_eq!(cyclic_bounded_vec.capacity(), 8); assert_eq!(cyclic_bounded_vec.first_index(), 4); assert_eq!(cyclic_bounded_vec.last_index(), 3); let elements = cyclic_bounded_vec.iter_from(6).unwrap().collect::<Vec<_>>(); assert_eq!(elements.len(), 6); assert_eq!(elements.as_slice(), &[&6, &7, &8, &9, &10, &11]); } #[test] fn test_cyclic_bounded_vec_iter_from_out_of_bounds_not_full() { let mut cyclic_bounded_vec = CyclicBoundedVec::with_capacity(8); for i in 0..4 { cyclic_bounded_vec.push(i); } // Try `start` values in bounds. for i in 0..4 { let elements = cyclic_bounded_vec.iter_from(i).unwrap().collect::<Vec<_>>(); assert_eq!(elements.len(), 4 - i); let expected = (i..4).collect::<Vec<_>>(); // Just to coerce it to have references... let expected = expected.iter().collect::<Vec<_>>(); assert_eq!(elements.as_slice(), expected.as_slice()); } // Try `start` values out of bounds. for i in 4..1000 { let elements = cyclic_bounded_vec.iter_from(i); assert!(matches!( elements, Err(BoundedVecError::IterFromOutOfBounds) )); } } #[test] fn test_cyclic_bounded_vec_iter_from_out_of_bounds_full() { let mut cyclic_bounded_vec = CyclicBoundedVec::with_capacity(8); for i in 0..12 { cyclic_bounded_vec.push(i); } // Try different `start` values which are out of bounds. for start in 8..1000 { let elements = cyclic_bounded_vec.iter_from(start); assert!(matches!( elements, Err(BoundedVecError::IterFromOutOfBounds) )); } } #[test] fn test_cyclic_bounded_vec_iter_from_out_of_bounds_iter_from() { let mut cyclic_bounded_vec = CyclicBoundedVec::with_capacity(8); for i in 0..8 { assert!(matches!( cyclic_bounded_vec.iter_from(i), Err(BoundedVecError::IterFromOutOfBounds) )); cyclic_bounded_vec.push(i); } } #[test] fn test_cyclic_bounded_vec_overwrite() { let mut cyclic_bounded_vec = CyclicBoundedVec::with_capacity(64); for i in 0..256 { cyclic_bounded_vec.push(i); } assert_eq!(cyclic_bounded_vec.len(), 64); assert_eq!(cyclic_bounded_vec.capacity(), 64); assert_eq!( cyclic_bounded_vec.iter().collect::<Vec<_>>().as_slice(), &[ &192, &193, &194, &195, &196, &197, &198, &199, &200, &201, &202, &203, &204, &205, &206, &207, &208, &209, &210, &211, &212, &213, &214, &215, &216, &217, &218, &219, &220, &221, &222, &223, &224, &225, &226, &227, &228, &229, &230, &231, &232, &233, &234, &235, &236, &237, &238, &239, &240, &241, &242, &243, &244, &245, &246, &247, &248, &249, &250, &251, &252, &253, &254, &255 ] ); } }
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/concurrent/Cargo.toml
[package] name = "light-concurrent-merkle-tree" version = "1.1.0" edition = "2021" description = "Concurrent Merkle tree implementation" repository = "https://github.com/Lightprotocol/light-protocol" license = "Apache-2.0" [features] heavy-tests = [] solana = [ "light-bounded-vec/solana", "light-hasher/solana", "solana-program" ] [dependencies] borsh = "0.10" bytemuck = "1.17" light-bounded-vec = { path = "../bounded-vec", version = "1.1.0" } light-hasher = { path = "../hasher", version = "1.1.0" } light-utils = { version = "1.1.0", path = "../../utils" } memoffset = "0.9" solana-program = { workspace = true, optional = true } thiserror = "1.0" [dev-dependencies] ark-bn254 = "0.4" ark-ff = "0.4" light-merkle-tree-reference = { path = "../reference", version = "1.1.0" } light-hash-set = { workspace = true, features = ["solana"] } rand = "0.8" solana-program = { workspace = true } spl-account-compression = { version = "0.3.0", default-features = false} spl-concurrent-merkle-tree = { version = "0.2.0", default-features = false} tokio = { workspace = true } num-bigint = "0.4" num-traits = "0.2"
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/concurrent
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/concurrent/tests/tests.rs
use ark_bn254::Fr; use ark_ff::{BigInteger, PrimeField, UniformRand}; use light_bounded_vec::{BoundedVec, BoundedVecError, CyclicBoundedVec}; use light_concurrent_merkle_tree::{ changelog::{ChangelogEntry, ChangelogPath}, errors::ConcurrentMerkleTreeError, zero_copy::ConcurrentMerkleTreeZeroCopyMut, ConcurrentMerkleTree, }; use light_hash_set::HashSet; use light_hasher::{Hasher, Keccak, Poseidon, Sha256}; use light_utils::rand::gen_range_exclude; use num_bigint::BigUint; use num_traits::FromBytes; use rand::{rngs::ThreadRng, seq::SliceRandom, thread_rng, Rng}; use std::cmp; /// Tests whether append operations work as expected. fn append<H, const CANOPY: usize>() where H: Hasher, { const HEIGHT: usize = 4; const CHANGELOG: usize = 32; const ROOTS: usize = 256; let mut merkle_tree = ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap(); merkle_tree.init().unwrap(); let leaf1 = H::hash(&[1u8; 32]).unwrap(); // The hash of our new leaf and its sibling (a zero value). // // H1 // / \ // L1 Z[0] let h1 = H::hashv(&[&leaf1, &H::zero_bytes()[0]]).unwrap(); // The hash of `h1` and its sibling (a subtree represented by `Z[1]`). // // H2 // /-/ \-\ // H1 Z[1] // / \ / \ // L1 Z[0] Z[0] Z[0] // // `Z[1]` represents the whole subtree on the right from `h2`. In the next // examples, we are just going to show empty subtrees instead of the whole // hierarchy. let h2 = H::hashv(&[&h1, &H::zero_bytes()[1]]).unwrap(); // The hash of `h3` and its sibling (a subtree represented by `Z[2]`). // // H3 // / \ // H2 Z[2] // / \ // H1 Z[1] // / \ // L1 Z[0] let h3 = H::hashv(&[&h2, &H::zero_bytes()[2]]).unwrap(); // The hash of `h4` and its sibling (a subtree represented by `Z[3]`), // which is the root. // // R // / \ // H3 Z[3] // / \ // H2 Z[2] // / \ // H1 Z[1] // / \ // L1 Z[0] let expected_root = H::hashv(&[&h3, &H::zero_bytes()[3]]).unwrap(); let expected_changelog_path = ChangelogPath([Some(leaf1), Some(h1), Some(h2), Some(h3)]); let expected_filled_subtrees = BoundedVec::from_array(&[leaf1, h1, h2, h3]); merkle_tree.append(&leaf1).unwrap(); assert_eq!(merkle_tree.changelog_index(), 1); assert_eq!( merkle_tree.changelog[merkle_tree.changelog_index()], ChangelogEntry::new(expected_changelog_path, 0) ); assert_eq!(merkle_tree.root(), expected_root); assert_eq!(merkle_tree.roots.last_index(), 1); assert_eq!(merkle_tree.filled_subtrees, expected_filled_subtrees); assert_eq!(merkle_tree.next_index(), 1); assert_eq!(merkle_tree.rightmost_leaf(), leaf1); // Appending the 2nd leaf should result in recomputing the root due to the // change of the `h1`, which now is a hash of the two non-zero leafs. So // when computing hashes from H2 up to the root, we are still going to use // zero bytes. // // The other subtrees still remain the same. // // R // / \ // H3 Z[3] // / \ // H2 Z[2] // / \ // H1 Z[1] // / \ // L1 L2 let leaf2 = H::hash(&[2u8; 32]).unwrap(); let h1 = H::hashv(&[&leaf1, &leaf2]).unwrap(); let h2 = H::hashv(&[&h1, &H::zero_bytes()[1]]).unwrap(); let h3 = H::hashv(&[&h2, &H::zero_bytes()[2]]).unwrap(); let expected_root = H::hashv(&[&h3, &H::zero_bytes()[3]]).unwrap(); let expected_changelog_path = ChangelogPath([Some(leaf2), Some(h1), Some(h2), Some(h3)]); let expected_filled_subtrees = BoundedVec::from_array(&[leaf1, h1, h2, h3]); merkle_tree.append(&leaf2).unwrap(); assert_eq!(merkle_tree.changelog_index(), 2); assert_eq!( merkle_tree.changelog[merkle_tree.changelog_index()], ChangelogEntry::new(expected_changelog_path, 1), ); assert_eq!(merkle_tree.root(), expected_root); assert_eq!(merkle_tree.roots.last_index(), 2); assert_eq!(merkle_tree.filled_subtrees, expected_filled_subtrees); assert_eq!(merkle_tree.next_index(), 2); assert_eq!(merkle_tree.rightmost_leaf(), leaf2); // Appending the 3rd leaf alters the next subtree on the right. // Instead of using Z[1], we will end up with the hash of the new leaf and // Z[0]. // // The other subtrees still remain the same. // // R // / \ // H4 Z[3] // / \ // H3 Z[2] // / \ // H1 H2 // / \ / \ // L1 L2 L3 Z[0] let leaf3 = H::hash(&[3u8; 32]).unwrap(); let h1 = H::hashv(&[&leaf1, &leaf2]).unwrap(); let h2 = H::hashv(&[&leaf3, &H::zero_bytes()[0]]).unwrap(); let h3 = H::hashv(&[&h1, &h2]).unwrap(); let h4 = H::hashv(&[&h3, &H::zero_bytes()[2]]).unwrap(); let expected_root = H::hashv(&[&h4, &H::zero_bytes()[3]]).unwrap(); let expected_changelog_path = ChangelogPath([Some(leaf3), Some(h2), Some(h3), Some(h4)]); let expected_filled_subtrees = BoundedVec::from_array(&[leaf3, h1, h3, h4]); merkle_tree.append(&leaf3).unwrap(); assert_eq!(merkle_tree.changelog_index(), 3); assert_eq!( merkle_tree.changelog[merkle_tree.changelog_index()], ChangelogEntry::new(expected_changelog_path, 2), ); assert_eq!(merkle_tree.root(), expected_root); assert_eq!(merkle_tree.roots.last_index(), 3); assert_eq!(merkle_tree.filled_subtrees, expected_filled_subtrees); assert_eq!(merkle_tree.next_index(), 3); assert_eq!(merkle_tree.rightmost_leaf(), leaf3); // Appending the 4th leaf alters the next subtree on the right. // Instead of using Z[1], we will end up with the hash of the new leaf and // Z[0]. // // The other subtrees still remain the same. // // R // / \ // H4 Z[3] // / \ // H3 Z[2] // / \ // H1 H2 // / \ / \ // L1 L2 L3 L4 let leaf4 = H::hash(&[4u8; 32]).unwrap(); let h1 = H::hashv(&[&leaf1, &leaf2]).unwrap(); let h2 = H::hashv(&[&leaf3, &leaf4]).unwrap(); let h3 = H::hashv(&[&h1, &h2]).unwrap(); let h4 = H::hashv(&[&h3, &H::zero_bytes()[2]]).unwrap(); let expected_root = H::hashv(&[&h4, &H::zero_bytes()[3]]).unwrap(); let expected_changelog_path = ChangelogPath([Some(leaf4), Some(h2), Some(h3), Some(h4)]); let expected_filled_subtrees = BoundedVec::from_array(&[leaf3, h1, h3, h4]); merkle_tree.append(&leaf4).unwrap(); assert_eq!(merkle_tree.changelog_index(), 4); assert_eq!( merkle_tree.changelog[merkle_tree.changelog_index()], ChangelogEntry::new(expected_changelog_path, 3), ); assert_eq!(merkle_tree.root(), expected_root); assert_eq!(merkle_tree.roots.last_index(), 4); assert_eq!(merkle_tree.filled_subtrees, expected_filled_subtrees); assert_eq!(merkle_tree.next_index(), 4); assert_eq!(merkle_tree.rightmost_leaf(), leaf4); } /// Checks whether `append_with_proof` returns correct Merkle proofs. fn append_with_proof< H, const HEIGHT: usize, const CHANGELOG: usize, const ROOTS: usize, const CANOPY: usize, const N_APPENDS: usize, >() where H: Hasher, { let mut merkle_tree = ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap(); merkle_tree.init().unwrap(); let mut reference_tree = light_merkle_tree_reference::MerkleTree::<H>::new(HEIGHT, CANOPY); let mut rng = thread_rng(); for i in 0..N_APPENDS { let leaf: [u8; 32] = Fr::rand(&mut rng) .into_bigint() .to_bytes_be() .try_into() .unwrap(); let mut proof = BoundedVec::with_capacity(HEIGHT); merkle_tree.append_with_proof(&leaf, &mut proof).unwrap(); reference_tree.append(&leaf).unwrap(); let reference_proof = reference_tree.get_proof_of_leaf(i, true).unwrap(); assert_eq!(proof, reference_proof); } } /// Performs invalid updates on the given Merkle tree by trying to swap all /// parameters separately. Asserts the errors that the Merkle tree should /// return as a part of validation of these inputs. fn invalid_updates<H, const HEIGHT: usize, const CHANGELOG: usize>( rng: &mut ThreadRng, merkle_tree: &mut ConcurrentMerkleTree<H, HEIGHT>, changelog_index: usize, old_leaf: &[u8; 32], new_leaf: &[u8; 32], leaf_index: usize, proof: BoundedVec<[u8; 32]>, ) where H: Hasher, { // This test case works only for larger changelogs, where there is a chance // to encounter conflicting changelog entries. // // We assume that it's going to work for changelogs with capacity greater // than 1. But the smaller the changelog and the more non-conflicting // operations are done in between, the higher the chance of this check // failing. If you ever encounter issues with reproducing this error, try // tuning your changelog size or make sure that conflicting operations are // done frequently enough. if CHANGELOG > 1 { let invalid_changelog_index = 0; let mut proof_clone = proof.clone(); let res = merkle_tree.update( invalid_changelog_index, old_leaf, new_leaf, leaf_index, &mut proof_clone, ); assert!(matches!( res, Err(ConcurrentMerkleTreeError::CannotUpdateLeaf) )); } let invalid_old_leaf: [u8; 32] = Fr::rand(rng) .into_bigint() .to_bytes_be() .try_into() .unwrap(); let mut proof_clone = proof.clone(); let res = merkle_tree.update( changelog_index, &invalid_old_leaf, &new_leaf, 0, &mut proof_clone, ); assert!(matches!( res, Err(ConcurrentMerkleTreeError::InvalidProof(_, _)) )); let invalid_index_in_range = gen_range_exclude(rng, 0..merkle_tree.next_index(), &[leaf_index]); let mut proof_clone = proof.clone(); let res = merkle_tree.update( changelog_index, old_leaf, new_leaf, invalid_index_in_range, &mut proof_clone, ); assert!(matches!( res, Err(ConcurrentMerkleTreeError::InvalidProof(_, _)) )); // Try pointing to the leaf indices outside the range only if the tree is // not full. Otherwise, it doesn't make sense and even `gen_range` will // fail. let next_index = merkle_tree.next_index(); let limit_leaves = 1 << HEIGHT; if next_index < limit_leaves { let invalid_index_outside_range = rng.gen_range(next_index..limit_leaves); let mut proof_clone = proof.clone(); let res = merkle_tree.update( changelog_index, old_leaf, new_leaf, invalid_index_outside_range, &mut proof_clone, ); assert!(matches!( res, Err(ConcurrentMerkleTreeError::CannotUpdateEmpty) )); } } /// Tests whether update operations work as expected. fn update<H, const CHANGELOG: usize, const ROOTS: usize, const CANOPY: usize>() where H: Hasher, { const HEIGHT: usize = 4; let mut merkle_tree = ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap(); merkle_tree.init().unwrap(); let mut reference_tree = light_merkle_tree_reference::MerkleTree::<H>::new(HEIGHT, CANOPY); let mut rng = thread_rng(); let leaf1 = H::hash(&[1u8; 32]).unwrap(); let leaf2 = H::hash(&[2u8; 32]).unwrap(); let leaf3 = H::hash(&[3u8; 32]).unwrap(); let leaf4 = H::hash(&[4u8; 32]).unwrap(); // Append 4 leaves. // // R // / \ // H4 Z[3] // / \ // H3 Z[2] // / \ // H1 H2 // / \ / \ // L1 L2 L3 L4 let h1 = H::hashv(&[&leaf1, &leaf2]).unwrap(); let h2 = H::hashv(&[&leaf3, &leaf4]).unwrap(); let h3 = H::hashv(&[&h1, &h2]).unwrap(); let h4 = H::hashv(&[&h3, &H::zero_bytes()[2]]).unwrap(); let expected_root = H::hashv(&[&h4, &H::zero_bytes()[3]]).unwrap(); let expected_changelog_path = ChangelogPath([Some(leaf4), Some(h2), Some(h3), Some(h4)]); let expected_filled_subtrees = BoundedVec::from_array(&[leaf3, h1, h3, h4]); merkle_tree.append(&leaf1).unwrap(); reference_tree.append(&leaf1).unwrap(); merkle_tree.append(&leaf2).unwrap(); reference_tree.append(&leaf2).unwrap(); merkle_tree.append(&leaf3).unwrap(); reference_tree.append(&leaf3).unwrap(); merkle_tree.append(&leaf4).unwrap(); reference_tree.append(&leaf4).unwrap(); let canopy_levels = [ &[h4, H::zero_bytes()[3]][..], &[ h3, H::zero_bytes()[2], H::zero_bytes()[2], H::zero_bytes()[2], ][..], ]; let mut expected_canopy = Vec::new(); for canopy_level in 0..CANOPY { println!("canopy_level: {canopy_level}"); expected_canopy.extend_from_slice(&canopy_levels[canopy_level]); } assert_eq!(merkle_tree.changelog_index(), 4 % CHANGELOG); assert_eq!( merkle_tree.changelog[merkle_tree.changelog_index()], ChangelogEntry::new(expected_changelog_path, 3), ); assert_eq!(merkle_tree.root(), reference_tree.root()); assert_eq!(merkle_tree.root(), expected_root); assert_eq!(merkle_tree.roots.last_index(), 4); assert_eq!(merkle_tree.filled_subtrees, expected_filled_subtrees); assert_eq!(merkle_tree.next_index(), 4); assert_eq!(merkle_tree.rightmost_leaf(), leaf4); assert_eq!(merkle_tree.canopy, reference_tree.get_canopy().unwrap()); assert_eq!(merkle_tree.canopy.as_slice(), expected_canopy.as_slice()); // Replace `leaf1`. let new_leaf1 = [9u8; 32]; // Replacing L1 affects H1 and all parent hashes up to the root. // // R // / \ // *H4* Z[3] // / \ // *H3* Z[2] // / \ // *H1* H2 // / \ / \ // *L1* L2 L3 L4 // // Merkle proof for the replaced leaf L1 is: // [L2, H2, Z[2], Z[3]] let changelog_index = merkle_tree.changelog_index(); let proof_raw = &[leaf2, h2, H::zero_bytes()[2], H::zero_bytes()[3]]; let mut proof = BoundedVec::with_capacity(HEIGHT); for node in &proof_raw[..HEIGHT - CANOPY] { proof.push(*node).unwrap(); } invalid_updates::<H, HEIGHT, CHANGELOG>( &mut rng, &mut merkle_tree, changelog_index, &leaf1, &new_leaf1, 0, proof.clone(), ); merkle_tree .update(changelog_index, &leaf1, &new_leaf1, 0, &mut proof) .unwrap(); reference_tree.update(&new_leaf1, 0).unwrap(); let h1 = H::hashv(&[&new_leaf1, &leaf2]).unwrap(); let h2 = H::hashv(&[&leaf3, &leaf4]).unwrap(); let h3 = H::hashv(&[&h1, &h2]).unwrap(); let h4 = H::hashv(&[&h3, &H::zero_bytes()[2]]).unwrap(); let expected_root = H::hashv(&[&h4, &H::zero_bytes()[3]]).unwrap(); let expected_changelog_path = ChangelogPath([Some(new_leaf1), Some(h1), Some(h3), Some(h4)]); let canopy_levels = [ &[h4, H::zero_bytes()[3]][..], &[ h3, H::zero_bytes()[2], H::zero_bytes()[2], H::zero_bytes()[2], ][..], ]; let mut expected_canopy = Vec::new(); for canopy_level in 0..CANOPY { expected_canopy.extend_from_slice(&canopy_levels[canopy_level]); } assert_eq!(merkle_tree.changelog_index(), 5 % CHANGELOG); assert_eq!( merkle_tree.changelog[merkle_tree.changelog_index()], ChangelogEntry::new(expected_changelog_path, 0), ); assert_eq!(merkle_tree.root(), reference_tree.root()); assert_eq!(merkle_tree.root(), expected_root); assert_eq!(merkle_tree.roots.last_index(), 5); assert_eq!(merkle_tree.next_index(), 4); assert_eq!(merkle_tree.rightmost_leaf(), leaf4); assert_eq!(merkle_tree.canopy, reference_tree.get_canopy().unwrap()); assert_eq!(merkle_tree.canopy.as_slice(), expected_canopy.as_slice()); // Replace `leaf2`. let new_leaf2 = H::hash(&[8u8; 32]).unwrap(); // Replacing L2 affects H1 and all parent hashes up to the root. // // R // / \ // *H4* Z[3] // / \ // *H3* Z[2] // / \ // *H1* H2 // / \ / \ // L1 *L2* L3 L4 // // Merkle proof for the replaced leaf L2 is: // [L1, H2, Z[2], Z[3]] let changelog_index = merkle_tree.changelog_index(); let proof_raw = &[new_leaf1, h2, H::zero_bytes()[2], H::zero_bytes()[3]]; let mut proof = BoundedVec::with_capacity(HEIGHT); for node in &proof_raw[..HEIGHT - CANOPY] { proof.push(*node).unwrap(); } invalid_updates::<H, HEIGHT, CHANGELOG>( &mut rng, &mut merkle_tree, changelog_index, &leaf2, &new_leaf2, 1, proof.clone(), ); merkle_tree .update(changelog_index, &leaf2, &new_leaf2, 1, &mut proof) .unwrap(); reference_tree.update(&new_leaf2, 1).unwrap(); let h1 = H::hashv(&[&new_leaf1, &new_leaf2]).unwrap(); let h2 = H::hashv(&[&leaf3, &leaf4]).unwrap(); let h3 = H::hashv(&[&h1, &h2]).unwrap(); let h4 = H::hashv(&[&h3, &H::zero_bytes()[2]]).unwrap(); let expected_root = H::hashv(&[&h4, &H::zero_bytes()[3]]).unwrap(); let expected_changelog_path = ChangelogPath([Some(new_leaf2), Some(h1), Some(h3), Some(h4)]); let canopy_levels = [ &[h4, H::zero_bytes()[3]][..], &[ h3, H::zero_bytes()[2], H::zero_bytes()[2], H::zero_bytes()[2], ][..], ]; let mut expected_canopy = Vec::new(); for canopy_level in 0..CANOPY { expected_canopy.extend_from_slice(&canopy_levels[canopy_level]); } assert_eq!(merkle_tree.changelog_index(), 6 % CHANGELOG); assert_eq!( merkle_tree.changelog[merkle_tree.changelog_index()], ChangelogEntry::new(expected_changelog_path, 1), ); assert_eq!(merkle_tree.root(), expected_root); assert_eq!(merkle_tree.roots.last_index(), 6); assert_eq!(merkle_tree.next_index(), 4); assert_eq!(merkle_tree.rightmost_leaf(), leaf4); assert_eq!(merkle_tree.canopy, reference_tree.get_canopy().unwrap()); assert_eq!(merkle_tree.canopy.as_slice(), expected_canopy.as_slice()); // Replace `leaf3`. let new_leaf3 = H::hash(&[7u8; 32]).unwrap(); // Replacing L3 affects H1 and all parent hashes up to the root. // // R // / \ // *H4* Z[3] // / \ // *H3* Z[2] // / \ // H1 *H2* // / \ / \ // L1 L2 *L3* L4 // // Merkle proof for the replaced leaf L3 is: // [L4, H1, Z[2], Z[3]] let changelog_index = merkle_tree.changelog_index(); let proof_raw = &[leaf4, h1, H::zero_bytes()[2], H::zero_bytes()[3]]; let mut proof = BoundedVec::with_capacity(HEIGHT); for node in &proof_raw[..HEIGHT - CANOPY] { proof.push(*node).unwrap(); } invalid_updates::<H, HEIGHT, CHANGELOG>( &mut rng, &mut merkle_tree, changelog_index, &leaf3, &new_leaf3, 2, proof.clone(), ); merkle_tree .update(changelog_index, &leaf3, &new_leaf3, 2, &mut proof) .unwrap(); reference_tree.update(&new_leaf3, 2).unwrap(); let h1 = H::hashv(&[&new_leaf1, &new_leaf2]).unwrap(); let h2 = H::hashv(&[&new_leaf3, &leaf4]).unwrap(); let h3 = H::hashv(&[&h1, &h2]).unwrap(); let h4 = H::hashv(&[&h3, &H::zero_bytes()[2]]).unwrap(); let expected_root = H::hashv(&[&h4, &H::zero_bytes()[3]]).unwrap(); let expected_changelog_path = ChangelogPath([Some(new_leaf3), Some(h2), Some(h3), Some(h4)]); let canopy_levels = [ &[h4, H::zero_bytes()[3]][..], &[ h3, H::zero_bytes()[2], H::zero_bytes()[2], H::zero_bytes()[2], ][..], ]; let mut expected_canopy = Vec::new(); for canopy_level in 0..CANOPY { expected_canopy.extend_from_slice(&canopy_levels[canopy_level]); } assert_eq!(merkle_tree.changelog_index(), 7 % CHANGELOG); assert_eq!( merkle_tree.changelog[merkle_tree.changelog_index()], ChangelogEntry::new(expected_changelog_path, 2) ); assert_eq!(merkle_tree.root(), expected_root); assert_eq!(merkle_tree.roots.last_index(), 7); assert_eq!(merkle_tree.next_index(), 4); assert_eq!(merkle_tree.rightmost_leaf(), leaf4); assert_eq!(merkle_tree.canopy, reference_tree.get_canopy().unwrap()); assert_eq!(merkle_tree.canopy.as_slice(), expected_canopy.as_slice()); // Replace `leaf4`. let new_leaf4 = H::hash(&[6u8; 32]).unwrap(); // Replacing L4 affects H1 and all parent hashes up to the root. // // R // / \ // *H4* Z[3] // / \ // *H3* Z[2] // / \ // H1 *H2* // / \ / \ // L1 L2 L3 *L4* // // Merkle proof for the replaced leaf L4 is: // [L3, H1, Z[2], Z[3]] let changelog_index = merkle_tree.changelog_index(); let proof_raw = &[new_leaf3, h1, H::zero_bytes()[2], H::zero_bytes()[3]]; let mut proof = BoundedVec::with_capacity(HEIGHT); for node in &proof_raw[..HEIGHT - CANOPY] { proof.push(*node).unwrap(); } invalid_updates::<H, HEIGHT, CHANGELOG>( &mut rng, &mut merkle_tree, changelog_index, &leaf4, &new_leaf4, 3, proof.clone(), ); merkle_tree .update(changelog_index, &leaf4, &new_leaf4, 3, &mut proof) .unwrap(); reference_tree.update(&new_leaf4, 3).unwrap(); let h1 = H::hashv(&[&new_leaf1, &new_leaf2]).unwrap(); let h2 = H::hashv(&[&new_leaf3, &new_leaf4]).unwrap(); let h3 = H::hashv(&[&h1, &h2]).unwrap(); let h4 = H::hashv(&[&h3, &H::zero_bytes()[2]]).unwrap(); let expected_root = H::hashv(&[&h4, &H::zero_bytes()[3]]).unwrap(); let expected_changelog_path = ChangelogPath([Some(new_leaf4), Some(h2), Some(h3), Some(h4)]); let canopy_levels = [ &[h4, H::zero_bytes()[3]][..], &[ h3, H::zero_bytes()[2], H::zero_bytes()[2], H::zero_bytes()[2], ][..], ]; let mut expected_canopy = Vec::new(); for canopy_level in 0..CANOPY { expected_canopy.extend_from_slice(&canopy_levels[canopy_level]); } assert_eq!(merkle_tree.changelog_index(), 8 % CHANGELOG); assert_eq!( merkle_tree.changelog[merkle_tree.changelog_index()], ChangelogEntry::new(expected_changelog_path, 3) ); assert_eq!(merkle_tree.root(), expected_root); assert_eq!(merkle_tree.roots.last_index(), 8); assert_eq!(merkle_tree.next_index(), 4); assert_eq!(merkle_tree.rightmost_leaf(), new_leaf4); assert_eq!(merkle_tree.canopy, reference_tree.get_canopy().unwrap()); assert_eq!(merkle_tree.canopy.as_slice(), expected_canopy.as_slice()); } /// Tests whether appending leaves over the limit results in an explicit error. fn overfill_tree<H>() where H: Hasher, { const HEIGHT: usize = 2; const CHANGELOG: usize = 32; const ROOTS: usize = 32; const CANOPY: usize = 0; let mut merkle_tree = ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap(); merkle_tree.init().unwrap(); for _ in 0..4 { merkle_tree.append(&[4; 32]).unwrap(); } assert!(matches!( merkle_tree.append(&[4; 32]), Err(ConcurrentMerkleTreeError::TreeFull) )); } /// Tests whether performing enough updates to overfill the changelog and root /// buffer results in graceful reset of the counters. fn overfill_changelog_and_roots<H>() where H: Hasher, { const HEIGHT: usize = 2; const CHANGELOG: usize = 6; const ROOTS: usize = 8; const CANOPY: usize = 0; // Our implementation of concurrent Merkle tree. let mut merkle_tree = ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap(); merkle_tree.init().unwrap(); // Reference implementation of Merkle tree which Solana Labs uses for // testing (and therefore, we as well). We use it mostly to get the Merkle // proofs. let mut reference_tree = light_merkle_tree_reference::MerkleTree::<H>::new(HEIGHT, CANOPY); let mut rng = thread_rng(); // Fill up the tree, producing 4 roots and changelog entries. for _ in 0..(1 << HEIGHT) { let leaf: [u8; 32] = Fr::rand(&mut rng) .into_bigint() .to_bytes_be() .try_into() .unwrap(); merkle_tree.append(&leaf).unwrap(); reference_tree.append(&leaf).unwrap(); } assert_eq!(merkle_tree.changelog.last_index(), 4); assert_eq!(merkle_tree.roots.last_index(), 4); // Update 2 leaves to fill up the changelog. Its counter should reach the // modulus and get reset. for i in 0..2 { let new_leaf: [u8; 32] = Fr::rand(&mut rng) .into_bigint() .to_bytes_be() .try_into() .unwrap(); let changelog_index = merkle_tree.changelog_index(); let old_leaf = reference_tree.get_leaf(i); let mut proof = reference_tree.get_proof_of_leaf(i, false).unwrap(); merkle_tree .update(changelog_index, &old_leaf, &new_leaf, i, &mut proof) .unwrap(); reference_tree.update(&new_leaf, i).unwrap(); } assert_eq!(merkle_tree.changelog.last_index(), 0); assert_eq!(merkle_tree.roots.last_index(), 6); // Update another 2 leaves to fill up the root. Its counter should reach // the modulus and get reset. The previously reset counter should get // incremented. for i in 0..2 { let new_leaf: [u8; 32] = Fr::rand(&mut rng) .into_bigint() .to_bytes_be() .try_into() .unwrap(); let changelog_index = merkle_tree.changelog_index(); let old_leaf = reference_tree.get_leaf(i); let mut proof = reference_tree.get_proof_of_leaf(i, false).unwrap(); merkle_tree .update(changelog_index, &old_leaf, &new_leaf, i, &mut proof) .unwrap(); reference_tree.update(&new_leaf, i).unwrap(); } assert_eq!(merkle_tree.changelog.last_index(), 2); assert_eq!(merkle_tree.roots.last_index(), 0); // The latter updates should keep incrementing the counters. for i in 0..3 { let new_leaf: [u8; 32] = Fr::rand(&mut rng) .into_bigint() .to_bytes_be() .try_into() .unwrap(); let changelog_index = merkle_tree.changelog_index(); let old_leaf = reference_tree.get_leaf(i); let mut proof = reference_tree.get_proof_of_leaf(i, false).unwrap(); merkle_tree .update(changelog_index, &old_leaf, &new_leaf, i, &mut proof) .unwrap(); reference_tree.update(&new_leaf, i).unwrap(); } assert_eq!(merkle_tree.changelog.last_index(), 5); assert_eq!(merkle_tree.roots.last_index(), 3); } /// Checks whether `append_batch` is compatible with equivalent multiple /// appends. fn compat_batch<H, const HEIGHT: usize, const CANOPY: usize>() where H: Hasher, { const CHANGELOG: usize = 64; const ROOTS: usize = 256; let mut rng = thread_rng(); let batch_limit = cmp::min(1 << HEIGHT, CHANGELOG); for batch_size in 1..batch_limit { let mut concurrent_mt_1 = ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap(); concurrent_mt_1.init().unwrap(); // Tree to which are going to append single leaves. let mut concurrent_mt_2 = ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap(); concurrent_mt_2.init().unwrap(); // Reference tree for checking the correctness of proofs. let mut reference_mt = light_merkle_tree_reference::MerkleTree::<H>::new(HEIGHT, CANOPY); let leaves: Vec<[u8; 32]> = (0..batch_size) .map(|_| { Fr::rand(&mut rng) .into_bigint() .to_bytes_be() .try_into() .unwrap() }) .collect(); let leaves: Vec<&[u8; 32]> = leaves.iter().collect(); // Append leaves to all Merkle tree implementations. // Batch append. concurrent_mt_1.append_batch(leaves.as_slice()).unwrap(); // Singular appends. for leaf in leaves.iter() { concurrent_mt_2.append(leaf).unwrap(); } // Singular appends to reference MT. for leaf in leaves.iter() { reference_mt.append(leaf).unwrap(); } // Check whether roots are the same. // Skip roots which are an output of singular, non-terminal // appends - we don't compute them in batch appends and instead, // emit a "zero root" (just to appease the clients assuming that // root index is equal to sequence number). assert_eq!( concurrent_mt_1 .roots .iter() .step_by(batch_size) .collect::<Vec<_>>() .as_slice(), concurrent_mt_2 .roots .iter() .step_by(batch_size) .collect::<Vec<_>>() .as_slice() ); assert_eq!(concurrent_mt_1.root(), reference_mt.root()); assert_eq!(concurrent_mt_2.root(), reference_mt.root()); } } fn batch_greater_than_changelog<H, const HEIGHT: usize, const CANOPY: usize>() where H: Hasher, { const CHANGELOG: usize = 64; const ROOTS: usize = 256; let mut rng = thread_rng(); let mut concurrent_mt = ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap(); concurrent_mt.init().unwrap(); for batch_size in (CHANGELOG + 1)..(1 << HEIGHT) { let leaves: Vec<[u8; 32]> = (0..batch_size) .map(|_| { Fr::rand(&mut rng) .into_bigint() .to_bytes_be() .try_into() .unwrap() }) .collect(); let leaves: Vec<&[u8; 32]> = leaves.iter().collect(); assert!(matches!( concurrent_mt.append_batch(leaves.as_slice()), Err(ConcurrentMerkleTreeError::BatchGreaterThanChangelog(_, _)), )); } } fn compat_canopy<H, const HEIGHT: usize>() where H: Hasher, { const CHANGELOG: usize = 64; const ROOTS: usize = 256; let mut rng = thread_rng(); for canopy_depth in 1..(HEIGHT + 1) { let batch_limit = cmp::min(1 << HEIGHT, CHANGELOG); for batch_size in 1..batch_limit { let mut concurrent_mt_with_canopy = ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, canopy_depth) .unwrap(); concurrent_mt_with_canopy.init().unwrap(); let mut concurrent_mt_without_canopy = ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, 0).unwrap(); concurrent_mt_without_canopy.init().unwrap(); let mut reference_mt_with_canopy = light_merkle_tree_reference::MerkleTree::<H>::new(HEIGHT, canopy_depth); let mut reference_mt_without_canopy = light_merkle_tree_reference::MerkleTree::<H>::new(HEIGHT, 0); for batch_i in 0..((1 << HEIGHT) / batch_size) { let leaves: Vec<[u8; 32]> = (0..batch_size) .map(|_| { Fr::rand(&mut rng) .into_bigint() .to_bytes_be() .try_into() .unwrap() }) .collect(); let leaves: Vec<&[u8; 32]> = leaves.iter().collect(); concurrent_mt_with_canopy .append_batch(leaves.as_slice()) .unwrap(); concurrent_mt_without_canopy .append_batch(leaves.as_slice()) .unwrap(); for leaf in leaves { reference_mt_with_canopy.append(leaf).unwrap(); reference_mt_without_canopy.append(leaf).unwrap(); } for leaf_i in 0..batch_size { let leaf_index = (batch_i * batch_size) + leaf_i; let mut proof_with_canopy = reference_mt_with_canopy .get_proof_of_leaf(leaf_index, false) .unwrap(); let proof_without_canopy = reference_mt_without_canopy .get_proof_of_leaf(leaf_index, true) .unwrap(); assert_eq!( proof_with_canopy[..], proof_without_canopy[..HEIGHT - canopy_depth] ); concurrent_mt_with_canopy .update_proof_from_canopy(leaf_index, &mut proof_with_canopy) .unwrap(); assert_eq!(proof_with_canopy, proof_without_canopy) } } } } } #[test] fn test_append_keccak_canopy_0() { append::<Keccak, 0>() } #[test] fn test_append_poseidon_canopy_0() { append::<Poseidon, 0>() } #[test] fn test_append_sha256_canopy_0() { append::<Sha256, 0>() } #[test] fn test_append_with_proof_keccak_4_16_16_0_16() { append_with_proof::<Keccak, 4, 16, 16, 0, 16>() } #[test] fn test_append_with_proof_poseidon_4_16_16_0_16() { append_with_proof::<Poseidon, 4, 16, 16, 0, 16>() } #[test] fn test_append_with_proof_sha256_4_16_16_0_16() { append_with_proof::<Sha256, 4, 16, 16, 0, 16>() } #[test] fn test_append_with_proof_keccak_26_1400_2800_0_200() { append_with_proof::<Keccak, 26, 1400, 2800, 0, 200>() } #[test] fn test_append_with_proof_poseidon_26_1400_2800_0_200() { append_with_proof::<Poseidon, 26, 1400, 2800, 0, 200>() } #[test] fn test_append_with_proof_sha256_26_1400_2800_0_200() { append_with_proof::<Sha256, 26, 1400, 2800, 0, 200>() } #[test] fn test_append_with_proof_keccak_26_1400_2800_10_200() { append_with_proof::<Keccak, 26, 1400, 2800, 10, 200>() } #[test] fn test_append_with_proof_poseidon_26_1400_2800_10_200() { append_with_proof::<Poseidon, 26, 1400, 2800, 10, 200>() } #[test] fn test_append_with_proof_sha256_26_1400_2800_10_200() { append_with_proof::<Sha256, 26, 1400, 2800, 10, 200>() } #[test] fn test_update_keccak_height_4_changelog_1_roots_256_canopy_0() { update::<Keccak, 1, 256, 0>() } #[test] fn test_update_keccak_height_4_changelog_1_roots_256_canopy_1() { update::<Keccak, 1, 256, 1>() } #[test] fn test_update_keccak_height_4_changelog_1_roots_256_canopy_2() { update::<Keccak, 1, 256, 2>() } #[test] fn test_update_keccak_height_4_changelog_32_roots_256_canopy_0() { update::<Keccak, 32, 256, 0>() } #[test] fn test_update_keccak_height_4_changelog_32_roots_256_canopy_1() { update::<Keccak, 32, 256, 1>() } #[test] fn test_update_keccak_height_4_changelog_32_roots_256_canopy_2() { update::<Keccak, 32, 256, 2>() } #[test] fn test_update_poseidon_height_4_changelog_1_roots_256_canopy_0() { update::<Poseidon, 1, 256, 0>() } #[test] fn test_update_poseidon_height_4_changelog_1_roots_256_canopy_1() { update::<Poseidon, 1, 256, 1>() } #[test] fn test_update_poseidon_height_4_changelog_1_roots_256_canopy_2() { update::<Poseidon, 1, 256, 2>() } #[test] fn test_update_poseidon_height_4_changelog_32_roots_256_canopy_0() { update::<Poseidon, 32, 256, 0>() } #[test] fn test_update_poseidon_height_4_changelog_32_roots_256_canopy_1() { update::<Poseidon, 32, 256, 1>() } #[test] fn test_update_poseidon_height_4_changelog_32_roots_256_canopy_2() { update::<Poseidon, 32, 256, 2>() } #[test] fn test_update_sha256_height_4_changelog_32_roots_256_canopy_0() { update::<Sha256, 32, 256, 0>() } #[test] fn test_update_sha256_height_4_changelog_32_roots_256_canopy_1() { update::<Sha256, 32, 256, 0>() } #[test] fn test_update_sha256_height_4_changelog_32_roots_256_canopy_2() { update::<Sha256, 32, 256, 0>() } #[test] fn test_overfill_tree_keccak() { overfill_tree::<Keccak>() } #[test] fn test_overfill_tree_poseidon() { overfill_tree::<Poseidon>() } #[test] fn test_overfill_tree_sha256() { overfill_tree::<Sha256>() } #[test] fn test_overfill_changelog_keccak() { overfill_changelog_and_roots::<Keccak>() } #[test] fn test_compat_batch_keccak_8_canopy_0() { const HEIGHT: usize = 8; const CANOPY: usize = 0; compat_batch::<Keccak, HEIGHT, CANOPY>() } #[test] fn test_compat_batch_poseidon_3_canopy_0() { const HEIGHT: usize = 3; const CANOPY: usize = 0; compat_batch::<Poseidon, HEIGHT, CANOPY>() } #[test] fn test_compat_batch_poseidon_6_canopy_0() { const HEIGHT: usize = 6; const CANOPY: usize = 0; compat_batch::<Poseidon, HEIGHT, CANOPY>() } #[test] fn test_compat_batch_sha256_8_canopy_0() { const HEIGHT: usize = 8; const CANOPY: usize = 0; compat_batch::<Sha256, HEIGHT, CANOPY>() } #[cfg(feature = "heavy-tests")] #[test] fn test_compat_batch_keccak_16() { const HEIGHT: usize = 16; const CANOPY: usize = 0; compat_batch::<Keccak, HEIGHT, CANOPY>() } #[cfg(feature = "heavy-tests")] #[test] fn test_compat_batch_poseidon_16() { const HEIGHT: usize = 16; const CANOPY: usize = 0; compat_batch::<Poseidon, HEIGHT, CANOPY>() } #[cfg(feature = "heavy-tests")] #[test] fn test_compat_batch_sha256_16() { const HEIGHT: usize = 16; const CANOPY: usize = 0; compat_batch::<Sha256, HEIGHT, CANOPY>() } #[test] fn test_batch_greater_than_changelog_keccak_8_canopy_0() { const HEIGHT: usize = 8; const CANOPY: usize = 0; batch_greater_than_changelog::<Keccak, HEIGHT, CANOPY>() } #[test] fn test_batch_greater_than_changelog_poseidon_8_canopy_0() { const HEIGHT: usize = 8; const CANOPY: usize = 0; batch_greater_than_changelog::<Poseidon, HEIGHT, CANOPY>() } #[test] fn test_batch_greater_than_changelog_sha256_8_canopy_0() { const HEIGHT: usize = 8; const CANOPY: usize = 0; batch_greater_than_changelog::<Sha256, HEIGHT, CANOPY>() } #[test] fn test_batch_greater_than_changelog_keccak_8_canopy_4() { const HEIGHT: usize = 8; const CANOPY: usize = 4; batch_greater_than_changelog::<Keccak, HEIGHT, CANOPY>() } #[test] fn test_batch_greater_than_changelog_poseidon_6_canopy_3() { const HEIGHT: usize = 6; const CANOPY: usize = 3; batch_greater_than_changelog::<Poseidon, HEIGHT, CANOPY>() } #[test] fn test_batch_greater_than_changelog_sha256_8_canopy_4() { const HEIGHT: usize = 8; const CANOPY: usize = 4; batch_greater_than_changelog::<Sha256, HEIGHT, CANOPY>() } #[test] fn test_compat_canopy_keccak_8() { const HEIGHT: usize = 8; compat_canopy::<Keccak, HEIGHT>() } #[test] fn test_compat_canopy_poseidon_6() { const HEIGHT: usize = 6; compat_canopy::<Poseidon, HEIGHT>() } #[cfg(feature = "heavy-tests")] #[test] fn test_compat_canopy_poseidon_26() { const HEIGHT: usize = 26; compat_canopy::<Poseidon, HEIGHT>() } #[test] fn test_compat_canopy_sha256_8() { const HEIGHT: usize = 8; compat_canopy::<Sha256, HEIGHT>() } /// Compares the internal fields of concurrent Merkle tree implementations, to /// ensure their consistency. fn compare_trees<H, const HEIGHT: usize, const MAX_ROOTS: usize>( concurrent_mt: &ConcurrentMerkleTree<H, HEIGHT>, spl_concurrent_mt: &spl_concurrent_merkle_tree::concurrent_merkle_tree::ConcurrentMerkleTree< HEIGHT, MAX_ROOTS, >, ) where H: Hasher, { for i in 0..concurrent_mt.changelog.len() { let changelog_entry = concurrent_mt.changelog[i].clone(); let spl_changelog_entry = spl_concurrent_mt.change_logs[i]; for j in 0..HEIGHT { let changelog_node = changelog_entry.path[j].unwrap(); let spl_changelog_node = spl_changelog_entry.path[j]; assert_eq!(changelog_node, spl_changelog_node); } assert_eq!(changelog_entry.index, spl_changelog_entry.index as u64); } assert_eq!( concurrent_mt.changelog.last_index(), spl_concurrent_mt.active_index as usize ); assert_eq!(concurrent_mt.root(), spl_concurrent_mt.get_root()); for i in 0..concurrent_mt.roots.len() { assert_eq!( concurrent_mt.roots[i], spl_concurrent_mt.change_logs[i].root ); } assert_eq!( concurrent_mt.roots.last_index(), spl_concurrent_mt.active_index as usize ); assert_eq!( concurrent_mt.next_index(), spl_concurrent_mt.rightmost_proof.index as usize ); assert_eq!( concurrent_mt.rightmost_leaf(), spl_concurrent_mt.rightmost_proof.leaf ); } /// Checks whether our `append` and `update` implementations are compatible /// with `append` and `set_leaf` from `spl-concurrent-merkle-tree` crate. #[tokio::test(flavor = "multi_thread")] async fn test_spl_compat() { const HEIGHT: usize = 4; const CHANGELOG: usize = 64; const ROOTS: usize = 256; const CANOPY: usize = 0; let mut rng = thread_rng(); // Our implementation of concurrent Merkle tree. let mut concurrent_mt = ConcurrentMerkleTree::<Keccak, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap(); concurrent_mt.init().unwrap(); // Solana Labs implementation of concurrent Merkle tree. let mut spl_concurrent_mt = spl_concurrent_merkle_tree::concurrent_merkle_tree::ConcurrentMerkleTree::<HEIGHT, ROOTS>::new(); spl_concurrent_mt.initialize().unwrap(); // Reference implemenetation of Merkle tree which Solana Labs uses for // testing (and therefore, we as well). We use it mostly to get the Merkle // proofs. let mut reference_tree = light_merkle_tree_reference::MerkleTree::<Keccak>::new(HEIGHT, CANOPY); for i in 0..(1 << HEIGHT) { let leaf: [u8; 32] = Fr::rand(&mut rng) .into_bigint() .to_bytes_be() .try_into() .unwrap(); concurrent_mt.append(&leaf).unwrap(); spl_concurrent_mt.append(leaf).unwrap(); reference_tree.append(&leaf).unwrap(); compare_trees(&concurrent_mt, &spl_concurrent_mt); // For every appended leaf with index greater than 0, update the leaf 0. // This is done in indexed Merkle trees[0] and it's a great test case // for rightmost proof updates. // // [0] https://docs.aztec.network/concepts/advanced/data_structures/indexed_merkle_tree if i > 0 { let new_leaf: [u8; 32] = Fr::rand(&mut rng) .into_bigint() .to_bytes_be() .try_into() .unwrap(); let root = concurrent_mt.root(); let changelog_index = concurrent_mt.changelog_index(); let old_leaf = reference_tree.get_leaf(0); let mut proof = reference_tree.get_proof_of_leaf(0, false).unwrap(); concurrent_mt .update(changelog_index, &old_leaf, &new_leaf, 0, &mut proof) .unwrap(); spl_concurrent_mt .set_leaf(root, old_leaf, new_leaf, proof.as_slice(), 0 as u32) .unwrap(); reference_tree.update(&new_leaf, 0).unwrap(); compare_trees(&concurrent_mt, &spl_concurrent_mt); } } for i in 0..(1 << HEIGHT) { let new_leaf: [u8; 32] = Fr::rand(&mut rng) .into_bigint() .to_bytes_be() .try_into() .unwrap(); let root = concurrent_mt.root(); let changelog_index = concurrent_mt.changelog_index(); let old_leaf = reference_tree.get_leaf(i); let mut proof = reference_tree.get_proof_of_leaf(i, false).unwrap(); concurrent_mt .update(changelog_index, &old_leaf, &new_leaf, i, &mut proof) .unwrap(); spl_concurrent_mt .set_leaf(root, old_leaf, new_leaf, proof.as_slice(), i as u32) .unwrap(); reference_tree.update(&new_leaf, i).unwrap(); compare_trees(&concurrent_mt, &spl_concurrent_mt); } } fn from_bytes< H, const HEIGHT: usize, const CHANGELOG: usize, const ROOTS: usize, const CANOPY: usize, >() where H: Hasher, { let mut bytes = vec![ 0u8; ConcurrentMerkleTree::<H, HEIGHT>::size_in_account(HEIGHT, CHANGELOG, ROOTS, CANOPY) ]; let mut rng = thread_rng(); let mut reference_tree_1 = light_merkle_tree_reference::MerkleTree::<H>::new(HEIGHT, CANOPY); // Vector of changelog indices after each operation. let mut leaf_indices = CyclicBoundedVec::with_capacity(CHANGELOG); // Vector of roots after each operation. let mut roots = CyclicBoundedVec::with_capacity(CHANGELOG); // Vector of merkle paths we get from the reference tree after each operation. let mut merkle_paths = CyclicBoundedVec::with_capacity(CHANGELOG); // Changelog is always initialized with a changelog path consisting of zero // bytes. For consistency, we need to assert the 1st zero byte as the first // expected leaf in the changelog. let merkle_path = reference_tree_1.get_path_of_leaf(0, true).unwrap(); leaf_indices.push(0); merkle_paths.push(merkle_path); { let mut merkle_tree = ConcurrentMerkleTreeZeroCopyMut::<H, HEIGHT>::from_bytes_zero_copy_init( bytes.as_mut_slice(), HEIGHT, CANOPY, CHANGELOG, ROOTS, ) .unwrap(); merkle_tree.init().unwrap(); roots.push(merkle_tree.root()); } let mut reference_tree_2 = ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap(); reference_tree_2.init().unwrap(); // Try to make the tree full. After each append, update a random leaf. // Reload the tree from bytes after each action. for _ in 0..(1 << HEIGHT) { // Reload the tree. let mut merkle_tree = ConcurrentMerkleTreeZeroCopyMut::<H, HEIGHT>::from_bytes_zero_copy_mut( bytes.as_mut_slice(), ) .unwrap(); // Append leaf. let leaf: [u8; 32] = Fr::rand(&mut rng) .into_bigint() .to_bytes_be() .try_into() .unwrap(); let leaf_index = merkle_tree.next_index(); merkle_tree.append(&leaf).unwrap(); reference_tree_1.append(&leaf).unwrap(); reference_tree_2.append(&leaf).unwrap(); leaf_indices.push(leaf_index); roots.push(merkle_tree.root()); let merkle_path = reference_tree_1.get_path_of_leaf(leaf_index, true).unwrap(); merkle_paths.push(merkle_path); assert_eq!( merkle_tree.filled_subtrees.iter().collect::<Vec<_>>(), reference_tree_2.filled_subtrees.iter().collect::<Vec<_>>() ); assert_eq!( merkle_tree.changelog.iter().collect::<Vec<_>>(), reference_tree_2.changelog.iter().collect::<Vec<_>>() ); assert_eq!( merkle_tree.roots.iter().collect::<Vec<_>>(), reference_tree_2.roots.iter().collect::<Vec<_>>() ); assert_eq!( merkle_tree.canopy.iter().collect::<Vec<_>>(), reference_tree_2.canopy.iter().collect::<Vec<_>>() ); assert_eq!(merkle_tree.root(), reference_tree_1.root()); let changelog_entries = merkle_tree .changelog_entries(merkle_tree.changelog.first_index()) .unwrap() .collect::<Vec<_>>(); assert_eq!(changelog_entries.len(), merkle_paths.len() - 1); for ((leaf_index, merkle_path), changelog_entry) in leaf_indices .iter() .skip(1) .zip(merkle_paths.iter().skip(1)) .zip(changelog_entries) { assert_eq!(changelog_entry.index, *leaf_index as u64); for i in 0..HEIGHT { let changelog_node = changelog_entry.path[i].unwrap(); let path_node = merkle_path[i]; assert_eq!(changelog_node, path_node); } } for (root_1, root_2) in merkle_tree.roots.iter().zip(roots.iter()) { assert_eq!(root_1, root_2); } // Update random leaf. let leaf_index = rng.gen_range(0..reference_tree_1.leaves().len()); let old_leaf = reference_tree_1.get_leaf(leaf_index); let new_leaf: [u8; 32] = Fr::rand(&mut rng) .into_bigint() .to_bytes_be() .try_into() .unwrap(); let mut proof = reference_tree_1 .get_proof_of_leaf(leaf_index, false) .unwrap(); let changelog_index = merkle_tree.changelog_index(); merkle_tree .update( changelog_index, &old_leaf, &new_leaf, leaf_index, &mut proof, ) .unwrap(); reference_tree_1.update(&new_leaf, leaf_index).unwrap(); reference_tree_2 .update( changelog_index, &old_leaf, &new_leaf, leaf_index, &mut proof, ) .unwrap(); assert_eq!(merkle_tree.root(), reference_tree_1.root()); leaf_indices.push(leaf_index); roots.push(merkle_tree.root()); let merkle_path = reference_tree_1.get_path_of_leaf(leaf_index, true).unwrap(); merkle_paths.push(merkle_path); let changelog_entries = merkle_tree .changelog_entries(merkle_tree.changelog.first_index()) .unwrap() .collect::<Vec<_>>(); assert_eq!(changelog_entries.len(), merkle_paths.len() - 1); for ((leaf_index, merkle_path), changelog_entry) in leaf_indices .iter() .skip(1) .zip(merkle_paths.iter().skip(1)) .zip(changelog_entries) { assert_eq!(changelog_entry.index, *leaf_index as u64); for i in 0..HEIGHT { let changelog_node = changelog_entry.path[i].unwrap(); let path_node = merkle_path[i]; assert_eq!(changelog_node, path_node); } } for (root_1, root_2) in merkle_tree.roots.iter().zip(roots.iter()) { assert_eq!(root_1, root_2); } } // Keep updating random leaves in loop. for _ in 0..1000 { // Reload the tree. let mut merkle_tree = ConcurrentMerkleTreeZeroCopyMut::<H, HEIGHT>::from_bytes_zero_copy_mut( bytes.as_mut_slice(), ) .unwrap(); // Update random leaf. let leaf_index = rng.gen_range(0..reference_tree_1.leaves().len()); let old_leaf = reference_tree_1.get_leaf(leaf_index); let new_leaf: [u8; 32] = Fr::rand(&mut rng) .into_bigint() .to_bytes_be() .try_into() .unwrap(); let mut proof = reference_tree_1 .get_proof_of_leaf(leaf_index, false) .unwrap(); let changelog_index = merkle_tree.changelog_index(); merkle_tree .update( changelog_index, &old_leaf, &new_leaf, leaf_index, &mut proof, ) .unwrap(); reference_tree_1.update(&new_leaf, leaf_index).unwrap(); reference_tree_2 .update( changelog_index, &old_leaf, &new_leaf, leaf_index, &mut proof, ) .unwrap(); assert_eq!(merkle_tree.root(), reference_tree_1.root()); leaf_indices.push(leaf_index); roots.push(merkle_tree.root()); let merkle_path = reference_tree_1.get_path_of_leaf(leaf_index, true).unwrap(); merkle_paths.push(merkle_path); let changelog_entries = merkle_tree .changelog_entries(merkle_tree.changelog.first_index()) .unwrap() .collect::<Vec<_>>(); assert_eq!(changelog_entries.len(), merkle_paths.len() - 1); for ((leaf_index, merkle_path), changelog_entry) in leaf_indices .iter() .skip(1) .zip(merkle_paths.iter().skip(1)) .zip(changelog_entries) { assert_eq!(changelog_entry.index, *leaf_index as u64); for i in 0..HEIGHT { let changelog_node = changelog_entry.path[i].unwrap(); let path_node = merkle_path[i]; assert_eq!(changelog_node, path_node); } } for (root_1, root_2) in merkle_tree.roots.iter().zip(roots.iter()) { assert_eq!(root_1, root_2); } } } #[test] fn test_from_bytes_keccak_8_256_256() { const HEIGHT: usize = 8; const CHANGELOG: usize = 256; const ROOTS: usize = 256; const CANOPY: usize = 0; from_bytes::<Keccak, HEIGHT, CHANGELOG, ROOTS, CANOPY>() } #[test] fn test_from_bytes_poseidon_8_256_256() { const HEIGHT: usize = 8; const CHANGELOG: usize = 256; const ROOTS: usize = 256; const CANOPY: usize = 0; from_bytes::<Poseidon, HEIGHT, CHANGELOG, ROOTS, CANOPY>() } #[test] fn test_from_bytes_sha256_8_256_256_0() { const HEIGHT: usize = 8; const CHANGELOG: usize = 256; const ROOTS: usize = 256; const CANOPY: usize = 0; from_bytes::<Sha256, HEIGHT, CHANGELOG, ROOTS, CANOPY>() } /// Tests the buffer size checks. Buffer size checks should fail any time that /// a provided byte slice is smaller than the expected size indicated by the /// tree metadata (height, changelog size, roots size etc.). /// /// In case of `from_bytes_zero_copy_init`, the metadata are provided with an /// intention of initializing them. The provided parameters influence the /// size checks. /// /// In case of `from_bytes_zero_copy_mut`, the metadata are read from the /// buffer. Therefore, we end up with two phases of checks: /// /// 1. Check of the non-dynamic fields, including the metadata structs. /// Based on size of all non-dynamic fields of `ConcurrentMerkleTree`. /// 2. If the check was successful, metadata are being read from the buffer. /// 3. After reading the metadata, we check the buffer size again, now to the /// full extent, before actually using it. fn buffer_error< H, const HEIGHT: usize, const CHANGELOG: usize, const ROOTS: usize, const CANOPY: usize, >() where H: Hasher, { let valid_size = ConcurrentMerkleTree::<H, HEIGHT>::size_in_account(HEIGHT, CHANGELOG, ROOTS, CANOPY); // Check that `from_bytes_zero_copy_init` checks the bounds. for invalid_size in 1..valid_size { let mut bytes = vec![0u8; invalid_size]; let res = ConcurrentMerkleTreeZeroCopyMut::<H, HEIGHT>::from_bytes_zero_copy_init( &mut bytes, HEIGHT, CANOPY, CHANGELOG, ROOTS, ); assert!(matches!( res, Err(ConcurrentMerkleTreeError::BufferSize(_, _)) )); } // Initialize the tree correctly. let mut bytes = vec![0u8; valid_size]; ConcurrentMerkleTreeZeroCopyMut::<H, HEIGHT>::from_bytes_zero_copy_init( &mut bytes, HEIGHT, CANOPY, CHANGELOG, ROOTS, ) .unwrap(); // Check that `from_bytes_zero_copy` mut checks the bounds based on the // metadata in already existing Merkle tree. for invalid_size in 1..valid_size { let bytes = &mut bytes[..invalid_size]; let res = ConcurrentMerkleTreeZeroCopyMut::<H, HEIGHT>::from_bytes_zero_copy_mut(bytes); assert!(matches!( res, Err(ConcurrentMerkleTreeError::BufferSize(_, _)) )); } } #[test] fn test_buffer_error_keccak_8_256_256() { const HEIGHT: usize = 8; const CHANGELOG: usize = 256; const ROOTS: usize = 256; const CANOPY: usize = 0; buffer_error::<Keccak, HEIGHT, CHANGELOG, ROOTS, CANOPY>() } #[test] fn test_buffer_error_poseidon_8_256_256() { const HEIGHT: usize = 8; const CHANGELOG: usize = 256; const ROOTS: usize = 256; const CANOPY: usize = 0; buffer_error::<Poseidon, HEIGHT, CHANGELOG, ROOTS, CANOPY>() } #[test] fn test_buffer_error_sha256_8_256_256_0() { const HEIGHT: usize = 8; const CHANGELOG: usize = 256; const ROOTS: usize = 256; const CANOPY: usize = 0; buffer_error::<Sha256, HEIGHT, CHANGELOG, ROOTS, CANOPY>() } fn height_zero<H>() where H: Hasher, { const HEIGHT: usize = 0; const CHANGELOG: usize = 256; const ROOTS: usize = 256; const CANOPY: usize = 0; let res = ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY); assert!(matches!(res, Err(ConcurrentMerkleTreeError::HeightZero))); } #[test] fn test_height_zero_keccak() { height_zero::<Keccak>() } #[test] fn test_height_zero_poseidon() { height_zero::<Poseidon>() } #[test] fn test_height_zero_sha256() { height_zero::<Sha256>() } fn changelog_zero<H>() where H: Hasher, { const HEIGHT: usize = 26; const CHANGELOG: usize = 0; const ROOTS: usize = 256; const CANOPY: usize = 0; let res = ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY); assert!(matches!(res, Err(ConcurrentMerkleTreeError::ChangelogZero))); } #[test] fn test_changelog_zero_keccak() { changelog_zero::<Keccak>() } #[test] fn test_changelog_zero_poseidon() { changelog_zero::<Poseidon>() } #[test] fn test_changelog_zero_sha256() { changelog_zero::<Sha256>() } fn roots_zero<H>() where H: Hasher, { const HEIGHT: usize = 26; const CHANGELOG: usize = 256; const ROOTS: usize = 0; const CANOPY: usize = 0; let res = ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY); assert!(matches!(res, Err(ConcurrentMerkleTreeError::RootsZero))); } #[test] fn test_roots_zero_keccak() { roots_zero::<Keccak>() } #[test] fn test_roots_zero_poseidon() { roots_zero::<Poseidon>() } #[test] fn test_roots_zero_sha256() { roots_zero::<Sha256>() } fn update_with_invalid_proof<H, const HEIGHT: usize>( merkle_tree: &mut ConcurrentMerkleTree<H, HEIGHT>, proof_len: usize, ) where H: Hasher, { // It doesn't matter what values do we use. The proof length check // should happend before checking its correctness. let mut proof = BoundedVec::from_slice(vec![[5u8; 32]; proof_len].as_slice()); let res = merkle_tree.update( merkle_tree.changelog_index(), &H::zero_bytes()[0], &[4u8; 32], 0, &mut proof, ); assert!(matches!( res, Err(ConcurrentMerkleTreeError::InvalidProofLength(_, _)) )) } fn invalid_proof_len<H, const HEIGHT: usize, const CANOPY: usize>() where H: Hasher, { const CHANGELOG: usize = 256; const ROOTS: usize = 256; let mut merkle_tree = ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap(); merkle_tree.init().unwrap(); // Proof sizes lower than `height - canopy`. for proof_len in 0..(HEIGHT - CANOPY) { update_with_invalid_proof(&mut merkle_tree, proof_len); } // Proof sizes greater than `height - canopy`. for proof_len in (HEIGHT - CANOPY + 1)..256 { update_with_invalid_proof(&mut merkle_tree, proof_len); } } #[test] fn test_invalid_proof_len_keccak_height_26_canopy_0() { invalid_proof_len::<Keccak, 26, 0>() } #[test] fn test_invalid_proof_len_keccak_height_26_canopy_10() { invalid_proof_len::<Keccak, 26, 10>() } #[test] fn test_invalid_proof_len_poseidon_height_26_canopy_0() { invalid_proof_len::<Poseidon, 26, 0>() } #[test] fn test_invalid_proof_len_poseidon_height_26_canopy_10() { invalid_proof_len::<Poseidon, 26, 10>() } #[test] fn test_invalid_proof_len_sha256_height_26_canopy_0() { invalid_proof_len::<Sha256, 26, 0>() } #[test] fn test_invalid_proof_len_sha256_height_26_canopy_10() { invalid_proof_len::<Sha256, 26, 10>() } fn invalid_proof<H, const HEIGHT: usize, const CANOPY: usize>() where H: Hasher, { const CHANGELOG: usize = 256; const ROOTS: usize = 256; let mut merkle_tree = ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap(); merkle_tree.init().unwrap(); let old_leaf = [5u8; 32]; merkle_tree.append(&old_leaf).unwrap(); let mut rng = thread_rng(); let mut invalid_proof = BoundedVec::with_capacity(HEIGHT); for _ in 0..(HEIGHT - CANOPY) { let node: [u8; 32] = Fr::rand(&mut rng) .into_bigint() .to_bytes_be() .try_into() .unwrap(); invalid_proof.push(node).unwrap(); } let res = merkle_tree.update( merkle_tree.changelog_index(), &old_leaf, &[6u8; 32], 0, &mut invalid_proof, ); assert!(matches!( res, Err(ConcurrentMerkleTreeError::InvalidProof(_, _)) )); } #[test] fn test_invalid_proof_keccak_height_26_canopy_0() { invalid_proof::<Keccak, 26, 0>() } #[test] fn test_invalid_proof_keccak_height_26_canopy_10() { invalid_proof::<Keccak, 26, 10>() } #[test] fn test_invalid_proof_poseidon_height_26_canopy_0() { invalid_proof::<Poseidon, 26, 0>() } #[test] fn test_invalid_proof_poseidon_height_26_canopy_10() { invalid_proof::<Poseidon, 26, 10>() } #[test] fn test_invalid_proof_sha256_height_26_canopy_0() { invalid_proof::<Sha256, 26, 0>() } #[test] fn test_invalid_proof_sha256_height_26_canopy_10() { invalid_proof::<Sha256, 26, 10>() } fn update_empty<H>() where H: Hasher, { const HEIGHT: usize = 26; const CHANGELOG: usize = 256; const ROOTS: usize = 256; const CANOPY: usize = 0; let mut merkle_tree = ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap(); merkle_tree.init().unwrap(); // Try updating all empty leaves in the empty tree. let mut proof = BoundedVec::from_slice(&H::zero_bytes()[..HEIGHT]); for leaf_index in 0..(1 << HEIGHT) { let old_leaf = H::zero_bytes()[0]; let new_leaf = [5u8; 32]; let res = merkle_tree.update( merkle_tree.changelog_index(), &old_leaf, &new_leaf, leaf_index, &mut proof, ); assert!(matches!( res, Err(ConcurrentMerkleTreeError::CannotUpdateEmpty) )); } } #[test] fn test_update_empty_keccak() { update_empty::<Keccak>() } #[test] fn test_update_empty_poseidon() { update_empty::<Poseidon>() } #[test] fn test_update_empty_sha256() { update_empty::<Sha256>() } fn append_empty_batch<H>() where H: Hasher, { const HEIGHT: usize = 26; const CHANGELOG: usize = 256; const ROOTS: usize = 256; const CANOPY: usize = 0; let mut merkle_tree = ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap(); merkle_tree.init().unwrap(); let res = merkle_tree.append_batch(&[]); assert!(matches!(res, Err(ConcurrentMerkleTreeError::EmptyLeaves))); } #[test] fn test_append_empty_batch_keccak() { append_empty_batch::<Keccak>() } #[test] fn test_append_empty_batch_poseidon() { append_empty_batch::<Poseidon>() } #[test] fn test_append_empty_batch_sha256() { append_empty_batch::<Sha256>() } /// Reproducible only with Poseidon. Keccak and SHA256 don't return errors, as /// they don't operate on a prime field. #[test] fn hasher_error() { const HEIGHT: usize = 26; const CHANGELOG: usize = 256; const ROOTS: usize = 256; const CANOPY: usize = 0; let mut merkle_tree = ConcurrentMerkleTree::<Poseidon, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap(); merkle_tree.init().unwrap(); // Append a leaf which exceed the modulus. let res = merkle_tree.append(&[255_u8; 32]); assert!(matches!(res, Err(ConcurrentMerkleTreeError::Hasher(_)))); } #[test] pub fn test_100_nullify_mt() { for iterations in 1..100 { println!("iteration: {:?}", iterations); let mut crank_merkle_tree = light_merkle_tree_reference::MerkleTree::<light_hasher::Poseidon>::new(26, 10); let mut onchain_merkle_tree = ConcurrentMerkleTree::<Poseidon, 26>::new(26, 10, 10, 10).unwrap(); onchain_merkle_tree.init().unwrap(); assert_eq!(onchain_merkle_tree.root(), crank_merkle_tree.root()); let mut queue = HashSet::new(6857, 2400).unwrap(); let mut queue_indices = Vec::new(); for i in 1..1 + iterations { let mut leaf = [0; 32]; leaf[31] = i as u8; // onchain this is equivalent to append state (compressed pda program) onchain_merkle_tree.append(&leaf).unwrap(); crank_merkle_tree.append(&leaf).unwrap(); // onchain the equivalent is nullify state (compressed pda program) let leaf_bn = BigUint::from_be_bytes(&leaf); queue.insert(&leaf_bn, 1).unwrap(); let (_, index) = queue.find_element(&leaf_bn, None).unwrap().unwrap(); queue_indices.push(index); } assert_eq!(onchain_merkle_tree.root(), crank_merkle_tree.root()); assert_eq!( onchain_merkle_tree.canopy, crank_merkle_tree.get_canopy().unwrap() ); let mut rng = rand::thread_rng(); // Pick random queue indices to nullify. let queue_indices = queue_indices .choose_multiple(&mut rng, cmp::min(9, iterations)) .cloned() .collect::<Vec<_>>(); let change_log_index = onchain_merkle_tree.changelog_index(); let mut nullified_leaf_indices = Vec::with_capacity(queue_indices.len()); // Nullify the leaves we picked. for queue_index in queue_indices { let leaf_cell = queue.get_unmarked_bucket(queue_index).unwrap().unwrap(); let leaf_index = crank_merkle_tree .get_leaf_index(&leaf_cell.value_bytes()) .unwrap() .clone(); let mut proof = crank_merkle_tree .get_proof_of_leaf(leaf_index, false) .unwrap(); onchain_merkle_tree .update( change_log_index, &leaf_cell.value_bytes(), &[0u8; 32], leaf_index, &mut proof, ) .unwrap(); nullified_leaf_indices.push(leaf_index); } for leaf_index in nullified_leaf_indices { crank_merkle_tree.update(&[0; 32], leaf_index).unwrap(); } assert_eq!(onchain_merkle_tree.root(), crank_merkle_tree.root()); assert_eq!( onchain_merkle_tree.canopy, crank_merkle_tree.get_canopy().unwrap() ); } } const LEAVES_WITH_NULLIFICATIONS: [([u8; 32], Option<usize>); 25] = [ ( [ 9, 207, 75, 159, 247, 170, 46, 154, 178, 197, 60, 83, 191, 240, 137, 41, 36, 54, 242, 50, 43, 48, 56, 220, 154, 217, 138, 19, 152, 123, 86, 8, ], None, ), ( [ 40, 10, 138, 159, 12, 188, 226, 84, 188, 92, 250, 11, 94, 240, 77, 158, 69, 219, 175, 48, 248, 181, 216, 200, 54, 38, 12, 224, 155, 40, 23, 32, ], None, ), ( [ 11, 36, 94, 177, 195, 5, 4, 35, 75, 253, 31, 235, 68, 201, 79, 197, 199, 23, 214, 86, 196, 2, 41, 249, 246, 138, 184, 248, 245, 66, 184, 244, ], None, ), ( [ 29, 3, 221, 195, 235, 46, 139, 171, 137, 7, 36, 118, 178, 198, 52, 20, 10, 131, 164, 5, 116, 187, 118, 186, 34, 193, 46, 6, 5, 144, 82, 4, ], None, ), ( [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], Some(0), ), ( [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], Some(1), ), ( [ 6, 146, 149, 76, 49, 159, 84, 164, 203, 159, 181, 165, 21, 204, 111, 149, 87, 255, 46, 82, 162, 181, 99, 178, 247, 27, 166, 174, 212, 39, 163, 106, ], None, ), ( [ 19, 135, 28, 172, 63, 129, 175, 101, 201, 97, 135, 147, 18, 78, 152, 243, 15, 154, 120, 153, 92, 46, 245, 82, 67, 32, 224, 141, 89, 149, 162, 228, ], None, ), ( [ 4, 93, 251, 40, 246, 136, 132, 20, 175, 98, 3, 186, 159, 251, 128, 159, 219, 172, 67, 20, 69, 19, 66, 193, 232, 30, 121, 19, 193, 177, 143, 6, ], None, ), ( [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], Some(3), ), ( [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], Some(4), ), ( [ 34, 229, 118, 4, 68, 219, 118, 228, 117, 70, 150, 93, 208, 215, 51, 243, 123, 48, 39, 228, 206, 194, 200, 232, 35, 133, 166, 222, 118, 217, 122, 228, ], None, ), ( [ 24, 61, 159, 11, 70, 12, 177, 252, 244, 238, 130, 73, 202, 69, 102, 83, 33, 103, 82, 66, 83, 191, 149, 187, 141, 111, 253, 110, 49, 5, 47, 151, ], None, ), ( [ 29, 239, 118, 17, 75, 98, 148, 167, 142, 190, 223, 175, 98, 255, 153, 111, 127, 169, 62, 234, 90, 89, 90, 70, 218, 161, 233, 150, 89, 173, 19, 1, ], None, ), ( [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], Some(6), ), ( [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], Some(5), ), ( [ 45, 31, 195, 30, 201, 235, 73, 88, 57, 130, 35, 53, 202, 191, 20, 156, 125, 123, 37, 49, 154, 194, 124, 157, 198, 236, 233, 25, 195, 174, 157, 31, ], None, ), ( [ 5, 59, 32, 123, 40, 100, 50, 132, 2, 194, 104, 95, 21, 23, 52, 56, 125, 198, 102, 210, 24, 44, 99, 255, 185, 255, 151, 249, 67, 167, 189, 85, ], None, ), ( [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], Some(9), ), ( [ 36, 131, 231, 53, 12, 14, 62, 144, 170, 248, 90, 226, 125, 178, 99, 87, 101, 226, 179, 43, 110, 130, 233, 194, 112, 209, 74, 219, 154, 48, 41, 148, ], None, ), ( [ 12, 110, 79, 229, 117, 215, 178, 45, 227, 65, 183, 14, 91, 45, 170, 232, 126, 71, 37, 211, 160, 77, 148, 223, 50, 144, 134, 232, 83, 159, 131, 62, ], None, ), ( [ 28, 57, 110, 171, 41, 144, 47, 162, 132, 221, 102, 100, 30, 69, 249, 176, 87, 134, 133, 207, 250, 166, 139, 16, 73, 39, 11, 139, 158, 182, 43, 68, ], None, ), ( [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], Some(11), ), ( [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], Some(10), ), ( [ 25, 88, 170, 121, 91, 234, 185, 213, 24, 92, 209, 146, 109, 134, 118, 242, 74, 218, 69, 28, 87, 154, 207, 86, 218, 48, 182, 206, 8, 9, 35, 240, ], None, ), ]; /// Test correctness of subtree updates during updates. /// The test data is a sequence of leaves with some nullifications /// and the result of a randomized tests which has triggered subtree inconsistencies. /// 1. Test subtree consistency with test data /// 2. Test subtree consistency of updating the right most leaf #[test] fn test_subtree_updates() { const HEIGHT: usize = 26; let mut ref_mt = light_merkle_tree_reference::MerkleTree::<light_hasher::Keccak>::new(HEIGHT, 0); let mut con_mt = light_concurrent_merkle_tree::ConcurrentMerkleTree26::<light_hasher::Keccak>::new( HEIGHT, 1400, 2400, 0, ) .unwrap(); let mut spl_concurrent_mt = spl_concurrent_merkle_tree::concurrent_merkle_tree::ConcurrentMerkleTree::<HEIGHT, 256>::new(); spl_concurrent_mt.initialize().unwrap(); con_mt.init().unwrap(); assert_eq!(ref_mt.root(), con_mt.root()); for (_, leaf) in LEAVES_WITH_NULLIFICATIONS.iter().enumerate() { match leaf.1 { Some(index) => { let change_log_index = con_mt.changelog_index(); let mut proof = ref_mt.get_proof_of_leaf(index, false).unwrap(); let old_leaf = ref_mt.get_leaf(index); let current_root = con_mt.root(); spl_concurrent_mt .set_leaf( current_root, old_leaf, [0u8; 32], proof.to_array::<HEIGHT>().unwrap().as_slice(), index.try_into().unwrap(), ) .unwrap(); con_mt .update(change_log_index, &old_leaf, &[0u8; 32], index, &mut proof) .unwrap(); ref_mt.update(&[0u8; 32], index).unwrap(); } None => { con_mt.append(&leaf.0).unwrap(); ref_mt.append(&leaf.0).unwrap(); spl_concurrent_mt.append(leaf.0).unwrap(); } } assert_eq!(spl_concurrent_mt.get_root(), ref_mt.root()); assert_eq!(spl_concurrent_mt.get_root(), con_mt.root()); assert_eq!(ref_mt.root(), con_mt.root()); } let index = con_mt.next_index() - 1; // test rightmost leaf edge case let change_log_index = con_mt.changelog_index(); let mut proof = ref_mt.get_proof_of_leaf(index, false).unwrap(); let old_leaf = ref_mt.get_leaf(index); let current_root = con_mt.root(); spl_concurrent_mt .set_leaf( current_root, old_leaf, [0u8; 32], proof.to_array::<HEIGHT>().unwrap().as_slice(), index.try_into().unwrap(), ) .unwrap(); con_mt .update(change_log_index, &old_leaf, &[0u8; 32], index, &mut proof) .unwrap(); ref_mt.update(&[0u8; 32], index).unwrap(); assert_eq!(spl_concurrent_mt.get_root(), ref_mt.root()); assert_eq!(spl_concurrent_mt.get_root(), con_mt.root()); assert_eq!(ref_mt.root(), con_mt.root()); let leaf = [3u8; 32]; con_mt.append(&leaf).unwrap(); ref_mt.append(&leaf).unwrap(); spl_concurrent_mt.append(leaf).unwrap(); assert_eq!(spl_concurrent_mt.get_root(), ref_mt.root()); assert_eq!(spl_concurrent_mt.get_root(), con_mt.root()); assert_eq!(ref_mt.root(), con_mt.root()); } /// Tests an update of a leaf which was modified by another updates. fn update_already_modified_leaf< H, // Number of conflicting updates of the same leaf. const CONFLICTS: usize, // Number of appends of random leaves before submitting the conflicting // updates. const RANDOM_APPENDS_BEFORE_CONFLICTS: usize, // Number of appends of random leaves after every single conflicting // update. const RANDOM_APPENDS_AFTER_EACH_CONFLICT: usize, >() where H: Hasher, { const HEIGHT: usize = 26; const MAX_CHANGELOG: usize = 8; const MAX_ROOTS: usize = 8; const CANOPY: usize = 0; let mut merkle_tree = ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, MAX_CHANGELOG, MAX_ROOTS, CANOPY).unwrap(); merkle_tree.init().unwrap(); let mut reference_tree = light_merkle_tree_reference::MerkleTree::<H>::new(HEIGHT, CANOPY); let mut rng = thread_rng(); // Create tree with a single leaf. let first_leaf: [u8; 32] = Fr::rand(&mut rng) .into_bigint() .to_bytes_be() .try_into() .unwrap(); merkle_tree.append(&first_leaf).unwrap(); reference_tree.append(&first_leaf).unwrap(); // Save a proof of the first append. let outdated_changelog_index = merkle_tree.changelog_index(); let mut outdated_proof = reference_tree.get_proof_of_leaf(0, false).unwrap().clone(); let mut old_leaf = first_leaf; for _ in 0..CONFLICTS { // Update leaf. Always use an up-to-date proof. let mut up_to_date_proof = reference_tree.get_proof_of_leaf(0, false).unwrap(); let new_leaf = Fr::rand(&mut rng) .into_bigint() .to_bytes_be() .try_into() .unwrap(); merkle_tree .update( merkle_tree.changelog_index(), &old_leaf, &new_leaf, 0, &mut up_to_date_proof, ) .unwrap(); reference_tree.update(&new_leaf, 0).unwrap(); old_leaf = new_leaf; assert_eq!(merkle_tree.root(), reference_tree.root()); } // Update leaf. This time, try using an outdated proof. let new_leaf = Fr::rand(&mut rng) .into_bigint() .to_bytes_be() .try_into() .unwrap(); let res = merkle_tree.update( outdated_changelog_index, &first_leaf, &new_leaf, 0, &mut outdated_proof, ); assert!(matches!( res, Err(ConcurrentMerkleTreeError::CannotUpdateLeaf) )); } #[test] fn test_update_already_modified_leaf_keccak_1_0_0() { update_already_modified_leaf::<Keccak, 1, 0, 0>() } #[test] fn test_update_already_modified_leaf_poseidon_1_0_0() { update_already_modified_leaf::<Poseidon, 1, 0, 0>() } #[test] fn test_update_already_modified_leaf_sha256_1_0_0() { update_already_modified_leaf::<Sha256, 1, 0, 0>() } #[test] fn test_update_already_modified_leaf_keccak_1_1_1() { update_already_modified_leaf::<Keccak, 1, 1, 1>() } #[test] fn test_update_already_modified_leaf_poseidon_1_1_1() { update_already_modified_leaf::<Poseidon, 1, 1, 1>() } #[test] fn test_update_already_modified_leaf_sha256_1_1_1() { update_already_modified_leaf::<Sha256, 1, 1, 1>() } #[test] fn test_update_already_modified_leaf_keccak_1_2_2() { update_already_modified_leaf::<Keccak, 1, 2, 2>() } #[test] fn test_update_already_modified_leaf_poseidon_1_2_2() { update_already_modified_leaf::<Poseidon, 1, 2, 2>() } #[test] fn test_update_already_modified_leaf_sha256_1_2_2() { update_already_modified_leaf::<Sha256, 1, 2, 2>() } #[test] fn test_update_already_modified_leaf_keccak_2_0_0() { update_already_modified_leaf::<Keccak, 2, 0, 0>() } #[test] fn test_update_already_modified_leaf_poseidon_2_0_0() { update_already_modified_leaf::<Poseidon, 2, 0, 0>() } #[test] fn test_update_already_modified_leaf_sha256_2_0_0() { update_already_modified_leaf::<Sha256, 2, 0, 0>() } #[test] fn test_update_already_modified_leaf_keccak_2_1_1() { update_already_modified_leaf::<Keccak, 2, 1, 1>() } #[test] fn test_update_already_modified_leaf_poseidon_2_1_1() { update_already_modified_leaf::<Poseidon, 2, 1, 1>() } #[test] fn test_update_already_modified_leaf_sha256_2_1_1() { update_already_modified_leaf::<Sha256, 2, 1, 1>() } #[test] fn test_update_already_modified_leaf_keccak_2_2_2() { update_already_modified_leaf::<Keccak, 2, 2, 2>() } #[test] fn test_update_already_modified_leaf_poseidon_2_2_2() { update_already_modified_leaf::<Poseidon, 2, 2, 2>() } #[test] fn test_update_already_modified_leaf_sha256_2_2_2() { update_already_modified_leaf::<Sha256, 2, 2, 2>() } #[test] fn test_update_already_modified_leaf_keccak_4_0_0() { update_already_modified_leaf::<Keccak, 4, 0, 0>() } #[test] fn test_update_already_modified_leaf_poseidon_4_0_0() { update_already_modified_leaf::<Poseidon, 4, 0, 0>() } #[test] fn test_update_already_modified_leaf_sha256_4_0_0() { update_already_modified_leaf::<Sha256, 4, 0, 0>() } #[test] fn test_update_already_modified_leaf_keccak_4_1_1() { update_already_modified_leaf::<Keccak, 4, 1, 1>() } #[test] fn test_update_already_modified_leaf_poseidon_4_1_1() { update_already_modified_leaf::<Poseidon, 4, 1, 1>() } #[test] fn test_update_already_modified_leaf_sha256_4_1_1() { update_already_modified_leaf::<Sha256, 4, 1, 1>() } #[test] fn test_update_already_modified_leaf_keccak_4_4_4() { update_already_modified_leaf::<Keccak, 4, 4, 4>() } #[test] fn test_update_already_modified_leaf_poseidon_4_4_4() { update_already_modified_leaf::<Poseidon, 4, 4, 4>() } #[test] fn test_update_already_modified_leaf_sha256_4_4_4() { update_already_modified_leaf::<Sha256, 4, 4, 4>() } /// Checks whether the [`changelog_entries`](ConcurrentMerkleTree::changelog_entries) /// method returns an iterator with expected entries. /// /// We expect the `changelog_entries` method to return an iterator with entries /// newer than the requested index. /// /// # Examples /// /// (In the tree) `current_index`: 1 /// (Requested) `changelog_index`: 1 /// Expected iterator: `[]` (empty) /// /// (In the tree) `current_index`: 3 /// (Requested) `changelog_index`: 1 /// Expected iterator: `[2, 3]` (1 is skipped) /// /// Changelog capacity: 12 /// (In the tree) `current_index`: 9 /// (Requested) `changelog_index`: 3 (lowed than `current_index`, because the /// changelog is full and started overwriting values from the head) /// Expected iterator: `[10, 11, 12, 13, 14, 15]` (9 is skipped) fn changelog_entries<H>() where H: Hasher, { const HEIGHT: usize = 26; const CHANGELOG: usize = 12; const ROOTS: usize = 16; const CANOPY: usize = 0; let mut merkle_tree = ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap(); merkle_tree.init().unwrap(); merkle_tree .append(&[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, ]) .unwrap(); let changelog_entries = merkle_tree .changelog_entries(1) .unwrap() .collect::<Vec<_>>(); assert!(changelog_entries.is_empty()); // Try getting changelog entries out of bounds. for start in merkle_tree.changelog.len()..1000 { let changelog_entries = merkle_tree.changelog_entries(start); assert!(matches!( changelog_entries, Err(ConcurrentMerkleTreeError::BoundedVec( BoundedVecError::IterFromOutOfBounds )) )); } merkle_tree .append(&[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, ]) .unwrap(); merkle_tree .append(&[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, ]) .unwrap(); let changelog_leaves = merkle_tree .changelog_entries(1) .unwrap() .map(|changelog_entry| changelog_entry.path[0]) .collect::<Vec<_>>(); assert_eq!( changelog_leaves.as_slice(), &[ Some([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2 ]), Some([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3 ]) ] ); // Try getting changelog entries out of bounds. for start in merkle_tree.changelog.len()..1000 { let changelog_entries = merkle_tree.changelog_entries(start); assert!(matches!( changelog_entries, Err(ConcurrentMerkleTreeError::BoundedVec( BoundedVecError::IterFromOutOfBounds )) )); } for i in 4_u8..16_u8 { merkle_tree .append(&[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, i, ]) .unwrap(); } let changelog_leaves = merkle_tree .changelog_entries(9) .unwrap() .map(|changelog_entry| changelog_entry.path[0]) .collect::<Vec<_>>(); assert_eq!( changelog_leaves.as_slice(), &[ Some([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10 ]), Some([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11 ]), Some([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12 ]), Some([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13 ]), Some([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14 ]), Some([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15 ]) ] ); // Try getting changelog entries out of bounds. for start in merkle_tree.changelog.len()..1000 { let changelog_entries = merkle_tree.changelog_entries(start); assert!(matches!( changelog_entries, Err(ConcurrentMerkleTreeError::BoundedVec( BoundedVecError::IterFromOutOfBounds )) )); } } #[test] fn changelog_entries_keccak() { changelog_entries::<Keccak>() } #[test] fn changelog_entries_poseidon() { changelog_entries::<Poseidon>() } #[test] fn changelog_entries_sha256() { changelog_entries::<Sha256>() } /// Checks whether the [`changelog_entries`](ConcurrentMerkleTree::changelog_entries) /// method returns an iterator with expected entries. /// /// It tests random insertions and updates and checks the consistency of leaves /// (`path[0]`) in changelogs. fn changelog_entries_random< H, const HEIGHT: usize, const CHANGELOG: usize, const ROOTS: usize, const CANOPY: usize, >() where H: Hasher, { let mut merkle_tree = ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap(); merkle_tree.init().unwrap(); let mut reference_tree = light_merkle_tree_reference::MerkleTree::<H>::new(HEIGHT, CANOPY); let mut rng = thread_rng(); let changelog_entries = merkle_tree .changelog_entries(0) .unwrap() .collect::<Vec<_>>(); assert!(changelog_entries.is_empty()); // Requesting changelog entries starting from the current `changelog_index()` // should always return an empty iterator. let changelog_entries = merkle_tree .changelog_entries(merkle_tree.changelog_index()) .unwrap() .collect::<Vec<_>>(); assert!(changelog_entries.is_empty()); // Vector of changelog indices after each operation. let mut leaf_indices = CyclicBoundedVec::with_capacity(CHANGELOG); // Vector of roots after each operation. let mut roots = CyclicBoundedVec::with_capacity(CHANGELOG); // Vector of merkle paths we get from the reference tree after each operation. let mut merkle_paths = CyclicBoundedVec::with_capacity(CHANGELOG); // Changelog is always initialized with a changelog path consisting of zero // bytes. For consistency, we need to assert the 1st zero byte as the first // expected leaf in the changelog. let merkle_path = reference_tree.get_path_of_leaf(0, true).unwrap(); leaf_indices.push(0); merkle_paths.push(merkle_path); roots.push(merkle_tree.root()); for _ in 0..1000 { // Append random leaf. let leaf: [u8; 32] = Fr::rand(&mut rng) .into_bigint() .to_bytes_be() .try_into() .unwrap(); let leaf_index = merkle_tree.next_index(); merkle_tree.append(&leaf).unwrap(); reference_tree.append(&leaf).unwrap(); leaf_indices.push(leaf_index); roots.push(merkle_tree.root()); let merkle_path = reference_tree.get_path_of_leaf(leaf_index, true).unwrap(); merkle_paths.push(merkle_path); let changelog_entries = merkle_tree .changelog_entries(merkle_tree.changelog.first_index()) .unwrap() .collect::<Vec<_>>(); assert_eq!(changelog_entries.len(), merkle_paths.len() - 1); for ((leaf_index, merkle_path), changelog_entry) in leaf_indices .iter() .skip(1) .zip(merkle_paths.iter().skip(1)) .zip(changelog_entries) { assert_eq!(changelog_entry.index, *leaf_index as u64); for i in 0..HEIGHT { let changelog_node = changelog_entry.path[i].unwrap(); let path_node = merkle_path[i]; assert_eq!(changelog_node, path_node); } } // Requesting changelog entries starting from the current `changelog_index()` // should always return an empty iterator. let changelog_entries = merkle_tree .changelog_entries(merkle_tree.changelog_index()) .unwrap() .collect::<Vec<_>>(); assert!(changelog_entries.is_empty()); // Update random leaf. let leaf_index = rng.gen_range(0..reference_tree.leaves().len()); let old_leaf = reference_tree.get_leaf(leaf_index); let new_leaf: [u8; 32] = Fr::rand(&mut rng) .into_bigint() .to_bytes_be() .try_into() .unwrap(); let mut proof = reference_tree.get_proof_of_leaf(leaf_index, false).unwrap(); merkle_tree .update( merkle_tree.changelog_index(), &old_leaf, &new_leaf, leaf_index, &mut proof, ) .unwrap(); reference_tree.update(&new_leaf, leaf_index).unwrap(); leaf_indices.push(leaf_index); roots.push(merkle_tree.root()); let merkle_path = reference_tree.get_path_of_leaf(leaf_index, true).unwrap(); merkle_paths.push(merkle_path); let changelog_entries = merkle_tree .changelog_entries(merkle_tree.changelog.first_index()) .unwrap() .collect::<Vec<_>>(); assert_eq!(changelog_entries.len(), merkle_paths.len() - 1); for ((leaf_index, merkle_path), changelog_entry) in leaf_indices .iter() .skip(1) .zip(merkle_paths.iter().skip(1)) .zip(changelog_entries) { assert_eq!(changelog_entry.index, *leaf_index as u64); for i in 0..HEIGHT { let changelog_node = changelog_entry.path[i].unwrap(); let path_node = merkle_path[i]; assert_eq!(changelog_node, path_node); } } // Requesting changelog entries starting from the current `changelog_index()` // should always return an empty iterator. let changelog_entries = merkle_tree .changelog_entries(merkle_tree.changelog_index()) .unwrap() .collect::<Vec<_>>(); assert!(changelog_entries.is_empty()); } } #[test] fn test_changelog_entries_random_keccak_26_256_256_0() { const HEIGHT: usize = 26; const CHANGELOG: usize = 256; const ROOTS: usize = 256; const CANOPY: usize = 0; changelog_entries_random::<Keccak, HEIGHT, CHANGELOG, ROOTS, CANOPY>() } #[test] fn test_changelog_entries_random_keccak_26_256_256_10() { const HEIGHT: usize = 26; const CHANGELOG: usize = 256; const ROOTS: usize = 256; const CANOPY: usize = 10; changelog_entries_random::<Keccak, HEIGHT, CHANGELOG, ROOTS, CANOPY>() } #[test] fn test_changelog_entries_random_poseidon_26_256_256_0() { const HEIGHT: usize = 26; const CHANGELOG: usize = 256; const ROOTS: usize = 256; const CANOPY: usize = 0; changelog_entries_random::<Poseidon, HEIGHT, CHANGELOG, ROOTS, CANOPY>() } #[test] fn test_changelog_entries_random_poseidon_26_256_256_10() { const HEIGHT: usize = 26; const CHANGELOG: usize = 256; const ROOTS: usize = 256; const CANOPY: usize = 10; changelog_entries_random::<Poseidon, HEIGHT, CHANGELOG, ROOTS, CANOPY>() } #[test] fn test_changelog_entries_random_sha256_26_256_256_0() { const HEIGHT: usize = 26; const CHANGELOG: usize = 256; const ROOTS: usize = 256; const CANOPY: usize = 0; changelog_entries_random::<Sha256, HEIGHT, CHANGELOG, ROOTS, CANOPY>() } #[test] fn test_changelog_entries_random_sha256_26_256_256_10() { const HEIGHT: usize = 26; const CHANGELOG: usize = 256; const ROOTS: usize = 256; const CANOPY: usize = 10; changelog_entries_random::<Sha256, HEIGHT, CHANGELOG, ROOTS, CANOPY>() } /// When reading the tests above (`changelog_entries`, `changelog_entries_random`) /// you might be still wondering why is skipping the **current** changelog element /// necessary. /// /// The explanation is that not skipping the current element might produce leaf /// conflicts. Imagine that we insert a leaf and then we try to immediately update /// it. Starting the iteration /// /// This test reproduces that case and serves as a proof that skipping is the /// right action. fn changelog_iteration_without_skipping< H, const HEIGHT: usize, const CHANGELOG: usize, const ROOTS: usize, const CANOPY: usize, >() where H: Hasher, { /// A broken re-implementation of `ConcurrentMerkleTree::update_proof_from_changelog` /// which reproduces the described issue. fn update_proof_from_changelog<H, const HEIGHT: usize>( merkle_tree: &ConcurrentMerkleTree<H, HEIGHT>, changelog_index: usize, leaf_index: usize, proof: &mut BoundedVec<[u8; 32]>, ) -> Result<(), ConcurrentMerkleTreeError> where H: Hasher, { for changelog_entry in merkle_tree.changelog.iter_from(changelog_index).unwrap() { changelog_entry.update_proof(leaf_index, proof)?; } Ok(()) } let mut merkle_tree = ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap(); merkle_tree.init().unwrap(); let mut reference_tree = light_merkle_tree_reference::MerkleTree::<H>::new(HEIGHT, CANOPY); let mut rng = thread_rng(); let leaf: [u8; 32] = Fr::rand(&mut rng) .into_bigint() .to_bytes_be() .try_into() .unwrap(); merkle_tree.append(&leaf).unwrap(); reference_tree.append(&leaf).unwrap(); let mut proof = reference_tree.get_proof_of_leaf(0, false).unwrap(); let res = update_proof_from_changelog(&merkle_tree, merkle_tree.changelog_index(), 0, &mut proof); assert!(matches!( res, Err(ConcurrentMerkleTreeError::CannotUpdateLeaf) )); } #[test] fn test_changelog_interation_without_skipping_keccak_26_16_16_0() { const HEIGHT: usize = 26; const CHANGELOG: usize = 16; const ROOTS: usize = 16; const CANOPY: usize = 0; changelog_iteration_without_skipping::<Keccak, HEIGHT, CHANGELOG, ROOTS, CANOPY>() } #[test] fn test_changelog_interation_without_skipping_poseidon_26_16_16_0() { const HEIGHT: usize = 26; const CHANGELOG: usize = 16; const ROOTS: usize = 16; const CANOPY: usize = 0; changelog_iteration_without_skipping::<Poseidon, HEIGHT, CHANGELOG, ROOTS, CANOPY>() } #[test] fn test_changelog_interation_without_skipping_sha256_26_16_16_0() { const HEIGHT: usize = 26; const CHANGELOG: usize = 16; const ROOTS: usize = 16; const CANOPY: usize = 0; changelog_iteration_without_skipping::<Sha256, HEIGHT, CHANGELOG, ROOTS, CANOPY>() } /// Tests an update with an old `changelog_index` and proof, which refers to the /// state before the changelog wrap-around (enough new operations to overwrite /// the whole changelog). Such an update should fail, fn update_changelog_wrap_around< H, const HEIGHT: usize, const CHANGELOG: usize, const ROOTS: usize, const CANOPY: usize, >() where H: Hasher, { let mut merkle_tree = ConcurrentMerkleTree::<H, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY).unwrap(); merkle_tree.init().unwrap(); let mut reference_tree = light_merkle_tree_reference::MerkleTree::<H>::new(HEIGHT, CANOPY); let mut rng = thread_rng(); // The leaf which we will want to update with an expired changelog. let leaf: [u8; 32] = Fr::rand(&mut rng) .into_bigint() .to_bytes_be() .try_into() .unwrap(); let (changelog_index, _) = merkle_tree.append(&leaf).unwrap(); reference_tree.append(&leaf).unwrap(); let mut proof = reference_tree.get_proof_of_leaf(0, false).unwrap(); // Perform enough appends and updates to overfill the changelog for i in 0..CHANGELOG { if i % 2 == 0 { // Append random leaf. let leaf: [u8; 32] = Fr::rand(&mut rng) .into_bigint() .to_bytes_be() .try_into() .unwrap(); merkle_tree.append(&leaf).unwrap(); reference_tree.append(&leaf).unwrap(); } else { // Update random leaf. let leaf_index = rng.gen_range(1..reference_tree.leaves().len()); let old_leaf = reference_tree.get_leaf(leaf_index); let new_leaf: [u8; 32] = Fr::rand(&mut rng) .into_bigint() .to_bytes_be() .try_into() .unwrap(); let mut proof = reference_tree.get_proof_of_leaf(leaf_index, false).unwrap(); merkle_tree .update( merkle_tree.changelog_index(), &old_leaf, &new_leaf, leaf_index, &mut proof, ) .unwrap(); reference_tree.update(&new_leaf, leaf_index).unwrap(); } } // Try to update the original `leaf` with an outdated proof and changelog // index. Expect an error. let new_leaf: [u8; 32] = Fr::rand(&mut rng) .into_bigint() .to_bytes_be() .try_into() .unwrap(); let res = merkle_tree.update(changelog_index, &leaf, &new_leaf, 0, &mut proof); assert!(matches!( res, Err(ConcurrentMerkleTreeError::InvalidProof(_, _)) )); // Try to update the original `leaf` with an up-to-date proof and changelog // index. Expect a success. let changelog_index = merkle_tree.changelog_index(); let mut proof = reference_tree.get_proof_of_leaf(0, false).unwrap(); merkle_tree .update(changelog_index, &leaf, &new_leaf, 0, &mut proof) .unwrap(); } #[test] fn test_update_changelog_wrap_around_keccak_26_256_512_0() { const HEIGHT: usize = 26; const CHANGELOG: usize = 256; const ROOTS: usize = 256; const CANOPY: usize = 0; update_changelog_wrap_around::<Keccak, HEIGHT, CHANGELOG, ROOTS, CANOPY>() } #[test] fn test_update_changelog_wrap_around_poseidon_26_256_512_0() { const HEIGHT: usize = 26; const CHANGELOG: usize = 256; const ROOTS: usize = 256; const CANOPY: usize = 0; update_changelog_wrap_around::<Poseidon, HEIGHT, CHANGELOG, ROOTS, CANOPY>() } #[test] fn test_update_changelog_wrap_around_sha256_26_256_512_0() { const HEIGHT: usize = 26; const CHANGELOG: usize = 256; const ROOTS: usize = 256; const CANOPY: usize = 0; update_changelog_wrap_around::<Sha256, HEIGHT, CHANGELOG, ROOTS, CANOPY>() } #[test] fn test_append_batch() { let mut tree = ConcurrentMerkleTree::<Sha256, 2>::new(2, 2, 2, 1).unwrap(); tree.init().unwrap(); let leaf_0 = [0; 32]; let leaf_1 = [1; 32]; tree.append_batch(&[&leaf_0, &leaf_1]).unwrap(); let change_log_0 = &tree .changelog .get(tree.changelog.first_index()) .unwrap() .path; let change_log_1 = &tree .changelog .get(tree.changelog.last_index()) .unwrap() .path; let path_0 = ChangelogPath([Some(leaf_0), None]); let path_1 = ChangelogPath([ Some(leaf_1), Some(Sha256::hashv(&[&leaf_0, &leaf_1]).unwrap()), ]); assert_eq!(change_log_1, &path_1); assert_eq!(change_log_0, &path_0); } /// Tests that updating proof with changelog entries with incomplete paths (coming /// from batched appends) works. #[test] fn test_append_batch_and_update() { let mut tree = ConcurrentMerkleTree::<Sha256, 3>::new(3, 10, 10, 0).unwrap(); tree.init().unwrap(); let mut reference_tree = light_merkle_tree_reference::MerkleTree::<Sha256>::new(3, 0); // Append two leaves. let leaf_0 = [0; 32]; let leaf_1 = [1; 32]; tree.append_batch(&[&leaf_0, &leaf_1]).unwrap(); reference_tree.append(&leaf_0).unwrap(); reference_tree.append(&leaf_1).unwrap(); let changelog_index = tree.changelog_index(); let mut proof_leaf_0 = reference_tree.get_proof_of_leaf(0, false).unwrap(); let mut proof_leaf_1 = reference_tree.get_proof_of_leaf(1, false).unwrap(); // Append another two leaves. let leaf_2 = [2; 32]; let leaf_3 = [3; 32]; tree.append_batch(&[&leaf_2, &leaf_3]).unwrap(); reference_tree.append(&leaf_2).unwrap(); reference_tree.append(&leaf_3).unwrap(); let changelog_entry_leaf_2 = &tree.changelog[3]; // Make sure that the non-terminal changelog entry has `None` nodes. assert_eq!( changelog_entry_leaf_2.path, ChangelogPath([Some([2; 32]), None, None]) ); let changelog_entry_leaf_3 = &tree.changelog[4]; // And that the terminal one has no `None` nodes. assert_eq!( changelog_entry_leaf_3.path, ChangelogPath([ Some([3; 32]), Some([ 39, 243, 47, 187, 250, 194, 251, 187, 206, 88, 177, 7, 82, 20, 75, 90, 116, 70, 212, 185, 30, 75, 169, 15, 253, 238, 48, 94, 145, 89, 128, 232 ]), Some([ 211, 95, 81, 105, 147, 137, 218, 126, 236, 124, 229, 235, 2, 100, 12, 109, 49, 140, 245, 26, 227, 158, 202, 137, 11, 188, 123, 132, 236, 181, 218, 104 ]) ]) ); // The tree (only the used fragment) looks like: // // _ H2 _ // / \ // H0 H1 // / \ / \ // L0 L1 L2 L3 // Update `leaf_0`. Expect a success. let new_leaf_0 = [10; 32]; tree.update(changelog_index, &leaf_0, &new_leaf_0, 0, &mut proof_leaf_0) .unwrap(); // Update `leaf_1`. Expect a success. let new_leaf_1 = [20; 32]; tree.update(changelog_index, &leaf_1, &new_leaf_1, 1, &mut proof_leaf_1) .unwrap(); } /// Makes sure canopy works by: /// /// 1. Appending 3 leaves. /// 2. Updating the first leaf. /// 3. Updating the second leaf. fn update_with_canopy<H>() where H: Hasher, { let mut tree = ConcurrentMerkleTree::<H, 2>::new(2, 2, 2, 1).unwrap(); tree.init().unwrap(); let leaf_0 = [0; 32]; let leaf_1 = [1; 32]; let leaf_2 = [2; 32]; tree.append(&leaf_0).unwrap(); tree.append(&leaf_1).unwrap(); tree.append(&leaf_2).unwrap(); let old_canopy = tree.canopy.as_slice()[0].clone(); let new_leaf_0 = [1; 32]; let mut leaf_0_proof = BoundedVec::with_capacity(2); leaf_0_proof.push(leaf_1).unwrap(); tree.update( tree.changelog_index(), &leaf_0, &new_leaf_0, 0, &mut leaf_0_proof, ) .unwrap(); let new_canopy = tree.canopy.as_slice()[0].clone(); assert_ne!(old_canopy, new_canopy); let new_leaf_2 = [3; 32]; let mut leaf_2_proof = BoundedVec::with_capacity(2); leaf_2_proof.push([0; 32]).unwrap(); tree.update( tree.changelog_index(), &leaf_2, &new_leaf_2, 2, &mut leaf_2_proof, ) .unwrap(); } #[test] fn test_update_with_canopy_keccak() { update_with_canopy::<Keccak>() } #[test] fn test_update_with_canopy_poseidon() { update_with_canopy::<Poseidon>() } #[test] fn test_update_with_canopy_sha256() { update_with_canopy::<Sha256>() }
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/concurrent
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/concurrent/src/copy.rs
use std::ops::Deref; use light_bounded_vec::{BoundedVecMetadata, CyclicBoundedVecMetadata}; use light_hasher::Hasher; use light_utils::offset::copy::{read_bounded_vec_at, read_cyclic_bounded_vec_at, read_value_at}; use memoffset::{offset_of, span_of}; use crate::{errors::ConcurrentMerkleTreeError, ConcurrentMerkleTree}; #[derive(Debug)] pub struct ConcurrentMerkleTreeCopy<H, const HEIGHT: usize>(ConcurrentMerkleTree<H, HEIGHT>) where H: Hasher; impl<H, const HEIGHT: usize> ConcurrentMerkleTreeCopy<H, HEIGHT> where H: Hasher, { pub fn struct_from_bytes_copy( bytes: &[u8], ) -> Result<(ConcurrentMerkleTree<H, HEIGHT>, usize), ConcurrentMerkleTreeError> { let expected_size = ConcurrentMerkleTree::<H, HEIGHT>::non_dyn_fields_size(); if bytes.len() < expected_size { return Err(ConcurrentMerkleTreeError::BufferSize( expected_size, bytes.len(), )); } let height = usize::from_le_bytes( bytes[span_of!(ConcurrentMerkleTree<H, HEIGHT>, height)] .try_into() .unwrap(), ); let canopy_depth = usize::from_le_bytes( bytes[span_of!(ConcurrentMerkleTree<H, HEIGHT>, canopy_depth)] .try_into() .unwrap(), ); let mut offset = offset_of!(ConcurrentMerkleTree<H, HEIGHT>, next_index); let next_index = unsafe { read_value_at(bytes, &mut offset) }; let sequence_number = unsafe { read_value_at(bytes, &mut offset) }; let rightmost_leaf = unsafe { read_value_at(bytes, &mut offset) }; let filled_subtrees_metadata: BoundedVecMetadata = unsafe { read_value_at(bytes, &mut offset) }; let changelog_metadata: CyclicBoundedVecMetadata = unsafe { read_value_at(bytes, &mut offset) }; let roots_metadata: CyclicBoundedVecMetadata = unsafe { read_value_at(bytes, &mut offset) }; let canopy_metadata: BoundedVecMetadata = unsafe { read_value_at(bytes, &mut offset) }; let expected_size = ConcurrentMerkleTree::<H, HEIGHT>::size_in_account( height, changelog_metadata.capacity(), roots_metadata.capacity(), canopy_depth, ); if bytes.len() < expected_size { return Err(ConcurrentMerkleTreeError::BufferSize( expected_size, bytes.len(), )); } let filled_subtrees = unsafe { read_bounded_vec_at(bytes, &mut offset, &filled_subtrees_metadata) }; let changelog = unsafe { read_cyclic_bounded_vec_at(bytes, &mut offset, &changelog_metadata) }; let roots = unsafe { read_cyclic_bounded_vec_at(bytes, &mut offset, &roots_metadata) }; let canopy = unsafe { read_bounded_vec_at(bytes, &mut offset, &canopy_metadata) }; let mut merkle_tree = ConcurrentMerkleTree::new( height, changelog_metadata.capacity(), roots_metadata.capacity(), canopy_depth, )?; // SAFETY: Tree is initialized. unsafe { *merkle_tree.next_index = next_index; *merkle_tree.sequence_number = sequence_number; *merkle_tree.rightmost_leaf = rightmost_leaf; } merkle_tree.filled_subtrees = filled_subtrees; merkle_tree.changelog = changelog; merkle_tree.roots = roots; merkle_tree.canopy = canopy; Ok((merkle_tree, offset)) } pub fn from_bytes_copy(bytes: &[u8]) -> Result<Self, ConcurrentMerkleTreeError> { let (merkle_tree, _) = Self::struct_from_bytes_copy(bytes)?; merkle_tree.check_size_constraints()?; Ok(Self(merkle_tree)) } } impl<H, const HEIGHT: usize> Deref for ConcurrentMerkleTreeCopy<H, HEIGHT> where H: Hasher, { type Target = ConcurrentMerkleTree<H, HEIGHT>; fn deref(&self) -> &Self::Target { &self.0 } } #[cfg(test)] mod test { use crate::zero_copy::ConcurrentMerkleTreeZeroCopyMut; use super::*; use ark_bn254::Fr; use ark_ff::{BigInteger, PrimeField, UniformRand}; use light_hasher::Poseidon; use rand::{thread_rng, Rng}; fn from_bytes_copy< const HEIGHT: usize, const CHANGELOG: usize, const ROOTS: usize, const CANOPY_DEPTH: usize, const OPERATIONS: usize, >() { let mut mt_1 = ConcurrentMerkleTree::<Poseidon, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY_DEPTH) .unwrap(); mt_1.init().unwrap(); // Create a buffer with random bytes - the `*_init` method should // initialize the buffer gracefully and the randomness shouldn't cause // undefined behavior. let mut bytes = vec![ 0u8; ConcurrentMerkleTree::<Poseidon, HEIGHT>::size_in_account( HEIGHT, CHANGELOG, ROOTS, CANOPY_DEPTH ) ]; thread_rng().fill(bytes.as_mut_slice()); // Initialize a Merkle tree on top of a byte slice. { let mut mt = ConcurrentMerkleTreeZeroCopyMut::<Poseidon, HEIGHT>::from_bytes_zero_copy_init( bytes.as_mut_slice(), HEIGHT, CANOPY_DEPTH, CHANGELOG, ROOTS, ) .unwrap(); mt.init().unwrap(); // Ensure that it was properly initialized. assert_eq!(mt.height, HEIGHT); assert_eq!(mt.canopy_depth, CANOPY_DEPTH); assert_eq!(mt.next_index(), 0); assert_eq!(mt.sequence_number(), 0); assert_eq!(mt.rightmost_leaf(), Poseidon::zero_bytes()[0]); assert_eq!(mt.filled_subtrees.capacity(), HEIGHT); assert_eq!(mt.filled_subtrees.len(), HEIGHT); assert_eq!(mt.changelog.capacity(), CHANGELOG); assert_eq!(mt.changelog.len(), 1); assert_eq!(mt.roots.capacity(), ROOTS); assert_eq!(mt.roots.len(), 1); assert_eq!( mt.canopy.capacity(), ConcurrentMerkleTree::<Poseidon, HEIGHT>::canopy_size(CANOPY_DEPTH) ); assert_eq!(mt.root(), Poseidon::zero_bytes()[HEIGHT]); } let mut rng = thread_rng(); for _ in 0..OPERATIONS { // Reload the tree from bytes on each iteration. let mut mt_2 = ConcurrentMerkleTreeZeroCopyMut::<Poseidon, HEIGHT>::from_bytes_zero_copy_mut( &mut bytes, ) .unwrap(); let leaf: [u8; 32] = Fr::rand(&mut rng) .into_bigint() .to_bytes_be() .try_into() .unwrap(); mt_1.append(&leaf).unwrap(); mt_2.append(&leaf).unwrap(); assert_eq!(mt_1, *mt_2); } // Read a copy of that Merkle tree. let mt_2 = ConcurrentMerkleTreeCopy::<Poseidon, HEIGHT>::from_bytes_copy(&bytes).unwrap(); assert_eq!(mt_1.height, mt_2.height); assert_eq!(mt_1.canopy_depth, mt_2.canopy_depth); assert_eq!(mt_1.next_index(), mt_2.next_index()); assert_eq!(mt_1.sequence_number(), mt_2.sequence_number()); assert_eq!(mt_1.rightmost_leaf(), mt_2.rightmost_leaf()); assert_eq!( mt_1.filled_subtrees.as_slice(), mt_2.filled_subtrees.as_slice() ); } #[test] fn test_from_bytes_copy_26_1400_2400_10_256_1024() { const HEIGHT: usize = 26; const CHANGELOG_SIZE: usize = 1400; const ROOTS: usize = 2400; const CANOPY_DEPTH: usize = 10; const OPERATIONS: usize = 1024; from_bytes_copy::<HEIGHT, CHANGELOG_SIZE, ROOTS, CANOPY_DEPTH, OPERATIONS>() } }
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/concurrent
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/concurrent/src/zero_copy.rs
use std::{ marker::PhantomData, mem, ops::{Deref, DerefMut}, }; use light_bounded_vec::{ BoundedVec, BoundedVecMetadata, CyclicBoundedVec, CyclicBoundedVecMetadata, }; use light_hasher::Hasher; use light_utils::offset::zero_copy::{read_array_like_ptr_at, read_ptr_at, write_at}; use memoffset::{offset_of, span_of}; use crate::{errors::ConcurrentMerkleTreeError, ConcurrentMerkleTree}; #[derive(Debug)] pub struct ConcurrentMerkleTreeZeroCopy<'a, H, const HEIGHT: usize> where H: Hasher, { merkle_tree: mem::ManuallyDrop<ConcurrentMerkleTree<H, HEIGHT>>, // The purpose of this field is ensuring that the wrapper does not outlive // the buffer. _bytes: &'a [u8], } impl<'a, H, const HEIGHT: usize> ConcurrentMerkleTreeZeroCopy<'a, H, HEIGHT> where H: Hasher, { pub fn struct_from_bytes_zero_copy( bytes: &'a [u8], ) -> Result<(ConcurrentMerkleTree<H, HEIGHT>, usize), ConcurrentMerkleTreeError> { let expected_size = ConcurrentMerkleTree::<H, HEIGHT>::non_dyn_fields_size(); if bytes.len() < expected_size { return Err(ConcurrentMerkleTreeError::BufferSize( expected_size, bytes.len(), )); } let height = usize::from_le_bytes( bytes[span_of!(ConcurrentMerkleTree<H, HEIGHT>, height)] .try_into() .unwrap(), ); let canopy_depth = usize::from_le_bytes( bytes[span_of!(ConcurrentMerkleTree<H, HEIGHT>, canopy_depth)] .try_into() .unwrap(), ); let mut offset = offset_of!(ConcurrentMerkleTree<H, HEIGHT>, next_index); let next_index = unsafe { read_ptr_at(bytes, &mut offset) }; let sequence_number = unsafe { read_ptr_at(bytes, &mut offset) }; let rightmost_leaf = unsafe { read_ptr_at(bytes, &mut offset) }; let filled_subtrees_metadata = unsafe { read_ptr_at(bytes, &mut offset) }; let changelog_metadata: *mut CyclicBoundedVecMetadata = unsafe { read_ptr_at(bytes, &mut offset) }; let roots_metadata: *mut CyclicBoundedVecMetadata = unsafe { read_ptr_at(bytes, &mut offset) }; let canopy_metadata = unsafe { read_ptr_at(bytes, &mut offset) }; let expected_size = ConcurrentMerkleTree::<H, HEIGHT>::size_in_account( height, unsafe { (*changelog_metadata).capacity() }, unsafe { (*roots_metadata).capacity() }, canopy_depth, ); if bytes.len() < expected_size { return Err(ConcurrentMerkleTreeError::BufferSize( expected_size, bytes.len(), )); } let filled_subtrees = unsafe { BoundedVec::from_raw_parts( filled_subtrees_metadata, read_array_like_ptr_at(bytes, &mut offset, height), ) }; let changelog = unsafe { CyclicBoundedVec::from_raw_parts( changelog_metadata, read_array_like_ptr_at(bytes, &mut offset, (*changelog_metadata).capacity()), ) }; let roots = unsafe { CyclicBoundedVec::from_raw_parts( roots_metadata, read_array_like_ptr_at(bytes, &mut offset, (*roots_metadata).capacity()), ) }; let canopy = unsafe { BoundedVec::from_raw_parts( canopy_metadata, read_array_like_ptr_at(bytes, &mut offset, (*canopy_metadata).capacity()), ) }; let merkle_tree = ConcurrentMerkleTree { height, canopy_depth, next_index, sequence_number, rightmost_leaf, filled_subtrees, changelog, roots, canopy, _hasher: PhantomData, }; merkle_tree.check_size_constraints()?; Ok((merkle_tree, offset)) } pub fn from_bytes_zero_copy(bytes: &'a [u8]) -> Result<Self, ConcurrentMerkleTreeError> { let (merkle_tree, _) = Self::struct_from_bytes_zero_copy(bytes)?; merkle_tree.check_size_constraints()?; Ok(Self { merkle_tree: mem::ManuallyDrop::new(merkle_tree), _bytes: bytes, }) } } impl<'a, H, const HEIGHT: usize> Deref for ConcurrentMerkleTreeZeroCopy<'a, H, HEIGHT> where H: Hasher, { type Target = ConcurrentMerkleTree<H, HEIGHT>; fn deref(&self) -> &Self::Target { &self.merkle_tree } } impl<'a, H, const HEIGHT: usize> Drop for ConcurrentMerkleTreeZeroCopy<'a, H, HEIGHT> where H: Hasher, { fn drop(&mut self) { // SAFETY: Don't do anything here! Why? // // * Primitive fields of `ConcurrentMerkleTree` implement `Copy`, // therefore `drop()` has no effect on them - Rust drops them when // they go out of scope. // * Don't drop the dynamic fields (`filled_subtrees`, `roots` etc.). In // `ConcurrentMerkleTreeZeroCopy`, they are backed by buffers provided // by the caller. These buffers are going to be eventually deallocated. // Performing an another `drop()` here would result double `free()` // which would result in aborting the program (either with `SIGABRT` // or `SIGSEGV`). } } #[derive(Debug)] pub struct ConcurrentMerkleTreeZeroCopyMut<'a, H, const HEIGHT: usize>( ConcurrentMerkleTreeZeroCopy<'a, H, HEIGHT>, ) where H: Hasher; impl<'a, H, const HEIGHT: usize> ConcurrentMerkleTreeZeroCopyMut<'a, H, HEIGHT> where H: Hasher, { pub fn from_bytes_zero_copy_mut( bytes: &'a mut [u8], ) -> Result<Self, ConcurrentMerkleTreeError> { Ok(Self(ConcurrentMerkleTreeZeroCopy::from_bytes_zero_copy( bytes, )?)) } pub fn fill_non_dyn_fields_in_buffer( bytes: &mut [u8], height: usize, canopy_depth: usize, changelog_capacity: usize, roots_capacity: usize, ) -> Result<usize, ConcurrentMerkleTreeError> { let expected_size = ConcurrentMerkleTree::<H, HEIGHT>::size_in_account( height, changelog_capacity, roots_capacity, canopy_depth, ); if bytes.len() < expected_size { return Err(ConcurrentMerkleTreeError::BufferSize( expected_size, bytes.len(), )); } bytes[span_of!(ConcurrentMerkleTree<H, HEIGHT>, height)] .copy_from_slice(&height.to_le_bytes()); bytes[span_of!(ConcurrentMerkleTree<H, HEIGHT>, canopy_depth)] .copy_from_slice(&canopy_depth.to_le_bytes()); let mut offset = offset_of!(ConcurrentMerkleTree<H, HEIGHT>, next_index); // next_index write_at::<usize>(bytes, &0_usize.to_le_bytes(), &mut offset); // sequence_number write_at::<usize>(bytes, &0_usize.to_le_bytes(), &mut offset); // rightmost_leaf write_at::<[u8; 32]>(bytes, &H::zero_bytes()[0], &mut offset); // filled_subtrees (metadata) let filled_subtrees_metadata = BoundedVecMetadata::new(height); write_at::<BoundedVecMetadata>(bytes, &filled_subtrees_metadata.to_le_bytes(), &mut offset); // changelog (metadata) let changelog_metadata = CyclicBoundedVecMetadata::new(changelog_capacity); write_at::<CyclicBoundedVecMetadata>(bytes, &changelog_metadata.to_le_bytes(), &mut offset); // roots (metadata) let roots_metadata = CyclicBoundedVecMetadata::new(roots_capacity); write_at::<CyclicBoundedVecMetadata>(bytes, &roots_metadata.to_le_bytes(), &mut offset); // canopy (metadata) let canopy_size = ConcurrentMerkleTree::<H, HEIGHT>::canopy_size(canopy_depth); let canopy_metadata = BoundedVecMetadata::new(canopy_size); write_at::<BoundedVecMetadata>(bytes, &canopy_metadata.to_le_bytes(), &mut offset); Ok(offset) } pub fn from_bytes_zero_copy_init( bytes: &'a mut [u8], height: usize, canopy_depth: usize, changelog_capacity: usize, roots_capacity: usize, ) -> Result<Self, ConcurrentMerkleTreeError> { Self::fill_non_dyn_fields_in_buffer( bytes, height, canopy_depth, changelog_capacity, roots_capacity, )?; Self::from_bytes_zero_copy_mut(bytes) } } impl<'a, H, const HEIGHT: usize> Deref for ConcurrentMerkleTreeZeroCopyMut<'a, H, HEIGHT> where H: Hasher, { type Target = ConcurrentMerkleTree<H, HEIGHT>; fn deref(&self) -> &Self::Target { &self.0.merkle_tree } } impl<'a, H, const HEIGHT: usize> DerefMut for ConcurrentMerkleTreeZeroCopyMut<'a, H, HEIGHT> where H: Hasher, { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0.merkle_tree } } #[cfg(test)] mod test { use super::*; use ark_bn254::Fr; use ark_ff::{BigInteger, PrimeField, UniformRand}; use light_hasher::Poseidon; use rand::{thread_rng, Rng}; fn load_from_bytes< const HEIGHT: usize, const CHANGELOG: usize, const ROOTS: usize, const CANOPY_DEPTH: usize, const OPERATIONS: usize, >() { let mut mt_1 = ConcurrentMerkleTree::<Poseidon, HEIGHT>::new(HEIGHT, CHANGELOG, ROOTS, CANOPY_DEPTH) .unwrap(); mt_1.init().unwrap(); // Create a buffer with random bytes - the `*_init` method should // initialize the buffer gracefully and the randomness shouldn't cause // undefined behavior. let mut bytes = vec![ 0u8; ConcurrentMerkleTree::<Poseidon, HEIGHT>::size_in_account( HEIGHT, CHANGELOG, ROOTS, CANOPY_DEPTH ) ]; thread_rng().fill(bytes.as_mut_slice()); // Initialize a Merkle tree on top of a byte slice. { let mut mt = ConcurrentMerkleTreeZeroCopyMut::<Poseidon, HEIGHT>::from_bytes_zero_copy_init( bytes.as_mut_slice(), HEIGHT, CANOPY_DEPTH, CHANGELOG, ROOTS, ) .unwrap(); mt.init().unwrap(); // Ensure that it was properly initialized. assert_eq!(mt.height, HEIGHT); assert_eq!(mt.canopy_depth, CANOPY_DEPTH,); assert_eq!(mt.next_index(), 0); assert_eq!(mt.sequence_number(), 0); assert_eq!(mt.rightmost_leaf(), Poseidon::zero_bytes()[0]); assert_eq!(mt.filled_subtrees.capacity(), HEIGHT); assert_eq!(mt.filled_subtrees.len(), HEIGHT); assert_eq!(mt.changelog.capacity(), CHANGELOG); assert_eq!(mt.changelog.len(), 1); assert_eq!(mt.roots.capacity(), ROOTS); assert_eq!(mt.roots.len(), 1); assert_eq!( mt.canopy.capacity(), ConcurrentMerkleTree::<Poseidon, HEIGHT>::canopy_size(CANOPY_DEPTH) ); assert_eq!(mt.root(), Poseidon::zero_bytes()[HEIGHT]); } let mut rng = thread_rng(); for _ in 0..OPERATIONS { // Reload the tree from bytes on each iteration. let mut mt_2 = ConcurrentMerkleTreeZeroCopyMut::<Poseidon, HEIGHT>::from_bytes_zero_copy_mut( &mut bytes, ) .unwrap(); let leaf: [u8; 32] = Fr::rand(&mut rng) .into_bigint() .to_bytes_be() .try_into() .unwrap(); mt_1.append(&leaf).unwrap(); mt_2.append(&leaf).unwrap(); assert_eq!(mt_1, *mt_2); } } #[test] fn test_load_from_bytes_22_256_256_0_1024() { load_from_bytes::<22, 256, 256, 0, 1024>() } #[test] fn test_load_from_bytes_22_256_256_10_1024() { load_from_bytes::<22, 256, 256, 10, 1024>() } }
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/concurrent
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/concurrent/src/lib.rs
use std::{ alloc::{self, handle_alloc_error, Layout}, iter::Skip, marker::PhantomData, mem, }; use changelog::ChangelogPath; use light_bounded_vec::{ BoundedVec, BoundedVecMetadata, CyclicBoundedVec, CyclicBoundedVecIterator, CyclicBoundedVecMetadata, }; pub use light_hasher; use light_hasher::Hasher; pub mod changelog; pub mod copy; pub mod errors; pub mod event; pub mod hash; pub mod zero_copy; use crate::{ changelog::ChangelogEntry, errors::ConcurrentMerkleTreeError, hash::{compute_parent_node, compute_root}, }; /// [Concurrent Merkle tree](https://drive.google.com/file/d/1BOpa5OFmara50fTvL0VIVYjtg-qzHCVc/view) /// which allows for multiple requests of updating leaves, without making any /// of the requests invalid, as long as they are not modyfing the same leaf. /// /// When any of the above happens, some of the concurrent requests are going to /// be invalid, forcing the clients to re-generate the Merkle proof. But that's /// still better than having such a failure after any update happening in the /// middle of requesting the update. /// /// Due to ability to make a decent number of concurrent update requests to be /// valid, no lock is necessary. #[repr(C)] #[derive(Debug)] // TODO(vadorovsky): The only reason why are we still keeping `HEIGHT` as a // const generic here is that removing it would require keeping a `BoundecVec` // inside `CyclicBoundedVec`. Casting byte slices to such nested vector is not // a trivial task, but we might eventually do it at some point. pub struct ConcurrentMerkleTree<H, const HEIGHT: usize> where H: Hasher, { pub height: usize, pub canopy_depth: usize, pub next_index: *mut usize, pub sequence_number: *mut usize, pub rightmost_leaf: *mut [u8; 32], /// Hashes of subtrees. pub filled_subtrees: BoundedVec<[u8; 32]>, /// History of Merkle proofs. pub changelog: CyclicBoundedVec<ChangelogEntry<HEIGHT>>, /// History of roots. pub roots: CyclicBoundedVec<[u8; 32]>, /// Cached upper nodes. pub canopy: BoundedVec<[u8; 32]>, pub _hasher: PhantomData<H>, } pub type ConcurrentMerkleTree26<H> = ConcurrentMerkleTree<H, 26>; impl<H, const HEIGHT: usize> ConcurrentMerkleTree<H, HEIGHT> where H: Hasher, { /// Number of nodes to include in canopy, based on `canopy_depth`. #[inline(always)] pub fn canopy_size(canopy_depth: usize) -> usize { (1 << (canopy_depth + 1)) - 2 } /// Size of the struct **without** dynamically sized fields (`BoundedVec`, /// `CyclicBoundedVec`). pub fn non_dyn_fields_size() -> usize { // height mem::size_of::<usize>() // changelog_capacity + mem::size_of::<usize>() // next_index + mem::size_of::<usize>() // sequence_number + mem::size_of::<usize>() // rightmost_leaf + mem::size_of::<[u8; 32]>() // filled_subtrees (metadata) + mem::size_of::<BoundedVecMetadata>() // changelog (metadata) + mem::size_of::<CyclicBoundedVecMetadata>() // roots (metadata) + mem::size_of::<CyclicBoundedVecMetadata>() // canopy (metadata) + mem::size_of::<BoundedVecMetadata>() } // TODO(vadorovsky): Make a macro for that. pub fn size_in_account( height: usize, changelog_size: usize, roots_size: usize, canopy_depth: usize, ) -> usize { // non-dynamic fields Self::non_dyn_fields_size() // filled_subtrees + mem::size_of::<[u8; 32]>() * height // changelog + mem::size_of::<ChangelogEntry<HEIGHT>>() * changelog_size // roots + mem::size_of::<[u8; 32]>() * roots_size // canopy + mem::size_of::<[u8; 32]>() * Self::canopy_size(canopy_depth) } fn check_size_constraints_new( height: usize, changelog_size: usize, roots_size: usize, canopy_depth: usize, ) -> Result<(), ConcurrentMerkleTreeError> { if height == 0 || HEIGHT == 0 { return Err(ConcurrentMerkleTreeError::HeightZero); } if height != HEIGHT { return Err(ConcurrentMerkleTreeError::InvalidHeight(HEIGHT)); } if canopy_depth > height { return Err(ConcurrentMerkleTreeError::CanopyGeThanHeight); } // Changelog needs to be at least 1, because it's used for storing // Merkle paths in `append`/`append_batch`. if changelog_size == 0 { return Err(ConcurrentMerkleTreeError::ChangelogZero); } if roots_size == 0 { return Err(ConcurrentMerkleTreeError::RootsZero); } Ok(()) } fn check_size_constraints(&self) -> Result<(), ConcurrentMerkleTreeError> { Self::check_size_constraints_new( self.height, self.changelog.capacity(), self.roots.capacity(), self.canopy_depth, ) } pub fn new( height: usize, changelog_size: usize, roots_size: usize, canopy_depth: usize, ) -> Result<Self, ConcurrentMerkleTreeError> { Self::check_size_constraints_new(height, changelog_size, roots_size, canopy_depth)?; let layout = Layout::new::<usize>(); let next_index = unsafe { alloc::alloc(layout) as *mut usize }; if next_index.is_null() { handle_alloc_error(layout); } unsafe { *next_index = 0 }; let layout = Layout::new::<usize>(); let sequence_number = unsafe { alloc::alloc(layout) as *mut usize }; if sequence_number.is_null() { handle_alloc_error(layout); } unsafe { *sequence_number = 0 }; let layout = Layout::new::<[u8; 32]>(); let rightmost_leaf = unsafe { alloc::alloc(layout) as *mut [u8; 32] }; if rightmost_leaf.is_null() { handle_alloc_error(layout); } unsafe { *rightmost_leaf = [0u8; 32] }; Ok(Self { height, canopy_depth, next_index, sequence_number, rightmost_leaf, filled_subtrees: BoundedVec::with_capacity(height), changelog: CyclicBoundedVec::with_capacity(changelog_size), roots: CyclicBoundedVec::with_capacity(roots_size), canopy: BoundedVec::with_capacity(Self::canopy_size(canopy_depth)), _hasher: PhantomData, }) } /// Initializes the Merkle tree. pub fn init(&mut self) -> Result<(), ConcurrentMerkleTreeError> { self.check_size_constraints()?; // Initialize root. let root = H::zero_bytes()[self.height]; self.roots.push(root); // Initialize changelog. let path = ChangelogPath::from_fn(|i| Some(H::zero_bytes()[i])); let changelog_entry = ChangelogEntry { path, index: 0 }; self.changelog.push(changelog_entry); // Initialize filled subtrees. for i in 0..self.height { self.filled_subtrees.push(H::zero_bytes()[i]).unwrap(); } // Initialize canopy. for level_i in 0..self.canopy_depth { let level_nodes = 1 << (level_i + 1); for _ in 0..level_nodes { let node = H::zero_bytes()[self.height - level_i - 1]; self.canopy.push(node)?; } } Ok(()) } /// Returns the index of the current changelog entry. pub fn changelog_index(&self) -> usize { self.changelog.last_index() } /// Returns the index of the current root in the tree's root buffer. pub fn root_index(&self) -> usize { self.roots.last_index() } /// Returns the current root. pub fn root(&self) -> [u8; 32] { // PANICS: This should never happen - there is always a root in the // tree and `self.root_index()` should always point to an existing index. self.roots[self.root_index()] } pub fn current_index(&self) -> usize { let next_index = self.next_index(); if next_index > 0 { next_index - 1 } else { next_index } } pub fn next_index(&self) -> usize { unsafe { *self.next_index } } fn inc_next_index(&mut self) -> Result<(), ConcurrentMerkleTreeError> { unsafe { *self.next_index = self .next_index() .checked_add(1) .ok_or(ConcurrentMerkleTreeError::IntegerOverflow)?; } Ok(()) } pub fn sequence_number(&self) -> usize { unsafe { *self.sequence_number } } fn inc_sequence_number(&mut self) -> Result<(), ConcurrentMerkleTreeError> { unsafe { *self.sequence_number = self .sequence_number() .checked_add(1) .ok_or(ConcurrentMerkleTreeError::IntegerOverflow)?; } Ok(()) } pub fn rightmost_leaf(&self) -> [u8; 32] { unsafe { *self.rightmost_leaf } } fn set_rightmost_leaf(&mut self, leaf: &[u8; 32]) { unsafe { *self.rightmost_leaf = *leaf }; } pub fn update_proof_from_canopy( &self, leaf_index: usize, proof: &mut BoundedVec<[u8; 32]>, ) -> Result<(), ConcurrentMerkleTreeError> { let mut node_index = ((1 << self.height) + leaf_index) >> (self.height - self.canopy_depth); while node_index > 1 { // `node_index - 2` maps to the canopy index. let canopy_index = node_index - 2; let canopy_index = if canopy_index % 2 == 0 { canopy_index + 1 } else { canopy_index - 1 }; proof.push(self.canopy[canopy_index])?; node_index >>= 1; } Ok(()) } /// Returns an iterator with changelog entries newer than the requested /// `changelog_index`. pub fn changelog_entries( &self, changelog_index: usize, ) -> Result<Skip<CyclicBoundedVecIterator<'_, ChangelogEntry<HEIGHT>>>, ConcurrentMerkleTreeError> { // `CyclicBoundedVec::iter_from` returns an iterator which includes also // the element indicated by the provided index. // // However, we want to iterate only on changelog events **newer** than // the provided one. // // Calling `iter_from(changelog_index + 1)` wouldn't work. If // `changelog_index` points to the newest changelog entry, // `changelog_index + 1` would point to the **oldest** changelog entry. // That would result in iterating over the whole changelog - from the // oldest to the newest element. Ok(self.changelog.iter_from(changelog_index)?.skip(1)) } /// Updates the given Merkle proof. /// /// The update is performed by checking whether there are any new changelog /// entries and whether they contain changes which affect the current /// proof. To be precise, for each changelog entry, it's done in the /// following steps: /// /// * Check if the changelog entry was directly updating the `leaf_index` /// we are trying to update. /// * If no (we check that condition first, since it's more likely), /// it means that there is a change affecting the proof, but not the /// leaf. /// Check which element from our proof was affected by the change /// (using the `critbit_index` method) and update it (copy the new /// element from the changelog to our updated proof). /// * If yes, it means that the same leaf we want to update was already /// updated. In such case, updating the proof is not possible. pub fn update_proof_from_changelog( &self, changelog_index: usize, leaf_index: usize, proof: &mut BoundedVec<[u8; 32]>, ) -> Result<(), ConcurrentMerkleTreeError> { // Iterate over changelog entries starting from the requested // `changelog_index`. // // Since we are interested only in subsequent, new changelog entries, // skip the first result. for changelog_entry in self.changelog_entries(changelog_index)? { changelog_entry.update_proof(leaf_index, proof)?; } Ok(()) } /// Checks whether the given Merkle `proof` for the given `node` (with index /// `i`) is valid. The proof is valid when computing parent node hashes using /// the whole path of the proof gives the same result as the given `root`. pub fn validate_proof( &self, leaf: &[u8; 32], leaf_index: usize, proof: &BoundedVec<[u8; 32]>, ) -> Result<(), ConcurrentMerkleTreeError> { let expected_root = self.root(); let computed_root = compute_root::<H>(leaf, leaf_index, proof)?; if computed_root == expected_root { Ok(()) } else { Err(ConcurrentMerkleTreeError::InvalidProof( expected_root, computed_root, )) } } /// Updates the leaf under `leaf_index` with the `new_leaf` value. /// /// 1. Computes the new path and root from `new_leaf` and Merkle proof /// (`proof`). /// 2. Stores the new path as the latest changelog entry and increments the /// latest changelog index. /// 3. Stores the latest root and increments the latest root index. /// 4. If new leaf is at the rightmost index, stores it as the new /// rightmost leaft and stores the Merkle proof as the new rightmost /// proof. /// /// # Validation /// /// This method doesn't validate the proof. Caller is responsible for /// doing that before. fn update_leaf_in_tree( &mut self, new_leaf: &[u8; 32], leaf_index: usize, proof: &BoundedVec<[u8; 32]>, ) -> Result<(usize, usize), ConcurrentMerkleTreeError> { let mut changelog_entry = ChangelogEntry::default_with_index(leaf_index); let mut current_node = *new_leaf; for (level, sibling) in proof.iter().enumerate() { changelog_entry.path[level] = Some(current_node); current_node = compute_parent_node::<H>(&current_node, sibling, leaf_index, level)?; } self.inc_sequence_number()?; self.roots.push(current_node); // Check if the leaf is the last leaf in the tree. if self.next_index() < (1 << self.height) { changelog_entry.update_proof(self.next_index(), &mut self.filled_subtrees)?; // Check if we updated the rightmost leaf. if leaf_index >= self.current_index() { self.set_rightmost_leaf(new_leaf); } } self.changelog.push(changelog_entry); if self.canopy_depth > 0 { self.update_canopy(self.changelog.last_index(), 1); } Ok((self.changelog.last_index(), self.sequence_number())) } /// Replaces the `old_leaf` under the `leaf_index` with a `new_leaf`, using /// the given `proof` and `changelog_index` (pointing to the changelog entry /// which was the newest at the time of preparing the proof). #[inline(never)] pub fn update( &mut self, changelog_index: usize, old_leaf: &[u8; 32], new_leaf: &[u8; 32], leaf_index: usize, proof: &mut BoundedVec<[u8; 32]>, ) -> Result<(usize, usize), ConcurrentMerkleTreeError> { let expected_proof_len = self.height - self.canopy_depth; if proof.len() != expected_proof_len { return Err(ConcurrentMerkleTreeError::InvalidProofLength( expected_proof_len, proof.len(), )); } if leaf_index >= self.next_index() { return Err(ConcurrentMerkleTreeError::CannotUpdateEmpty); } if self.canopy_depth > 0 { self.update_proof_from_canopy(leaf_index, proof)?; } if changelog_index != self.changelog_index() { self.update_proof_from_changelog(changelog_index, leaf_index, proof)?; } self.validate_proof(old_leaf, leaf_index, proof)?; self.update_leaf_in_tree(new_leaf, leaf_index, proof) } /// Appends a new leaf to the tree. pub fn append(&mut self, leaf: &[u8; 32]) -> Result<(usize, usize), ConcurrentMerkleTreeError> { self.append_batch(&[leaf]) } /// Appends a new leaf to the tree. Saves Merkle proof to the provided /// `proof` reference. pub fn append_with_proof( &mut self, leaf: &[u8; 32], proof: &mut BoundedVec<[u8; 32]>, ) -> Result<(usize, usize), ConcurrentMerkleTreeError> { self.append_batch_with_proofs(&[leaf], &mut [proof]) } /// Appends a batch of new leaves to the tree. pub fn append_batch( &mut self, leaves: &[&[u8; 32]], ) -> Result<(usize, usize), ConcurrentMerkleTreeError> { self.append_batch_common::<false>(leaves, None) } /// Appends a batch of new leaves to the tree. Saves Merkle proofs to the /// provided `proofs` slice. pub fn append_batch_with_proofs( &mut self, leaves: &[&[u8; 32]], proofs: &mut [&mut BoundedVec<[u8; 32]>], ) -> Result<(usize, usize), ConcurrentMerkleTreeError> { self.append_batch_common::<true>(leaves, Some(proofs)) } /// Appends a batch of new leaves to the tree. /// /// This method contains the common logic and is not intended for external /// use. Callers should choose between [`append_batch`](ConcurrentMerkleTree::append_batch) /// and [`append_batch_with_proofs`](ConcurrentMerkleTree::append_batch_with_proofs). fn append_batch_common< // The only purpose of this const generic is to force compiler to // produce separate functions, with and without proof. // // Unfortunately, using `Option` is not enough: // // https://godbolt.org/z/fEMMfMdPc // https://godbolt.org/z/T3dxnjMzz // // Using the const generic helps and ends up generating two separate // functions: // // https://godbolt.org/z/zGnM7Ycn1 const WITH_PROOFS: bool, >( &mut self, leaves: &[&[u8; 32]], // Slice for saving Merkle proofs. // // Currently it's used only for indexed Merkle trees. mut proofs: Option<&mut [&mut BoundedVec<[u8; 32]>]>, ) -> Result<(usize, usize), ConcurrentMerkleTreeError> { if leaves.is_empty() { return Err(ConcurrentMerkleTreeError::EmptyLeaves); } if (self.next_index() + leaves.len() - 1) >= 1 << self.height { return Err(ConcurrentMerkleTreeError::TreeFull); } if leaves.len() > self.changelog.capacity() { return Err(ConcurrentMerkleTreeError::BatchGreaterThanChangelog( leaves.len(), self.changelog.capacity(), )); } let first_changelog_index = (self.changelog.last_index() + 1) % self.changelog.capacity(); let first_sequence_number = self.sequence_number() + 1; for (leaf_i, leaf) in leaves.iter().enumerate() { let mut current_index = self.next_index(); self.changelog .push(ChangelogEntry::<HEIGHT>::default_with_index(current_index)); let changelog_index = self.changelog_index(); let mut current_node = **leaf; self.changelog[changelog_index].path[0] = Some(**leaf); for i in 0..self.height { let is_left = current_index % 2 == 0; if is_left { // If the current node is on the left side: // // U // / \ // CUR SIB // / \ // N N // // * The sibling (on the right) is a "zero node". // * That "zero node" becomes a part of Merkle proof. // * The upper (next current) node is `H(cur, Ø)`. let empty_node = H::zero_bytes()[i]; if WITH_PROOFS { // PANICS: `proofs` should be always `Some` at this point. proofs.as_mut().unwrap()[leaf_i].push(empty_node)?; } self.filled_subtrees[i] = current_node; // For all non-terminal leaves, stop computing parents as // soon as we are on the left side. // Computation of the parent nodes is going to happen in // the next iterations. if leaf_i < leaves.len() - 1 { break; } current_node = H::hashv(&[&current_node, &empty_node])?; } else { // If the current node is on the right side: // // U // / \ // SIB CUR // / \ // N N // * The sigling on the left is a "filled subtree". // * That "filled subtree" becomes a part of Merkle proof. // * The upper (next current) node is `H(sib, cur)`. if WITH_PROOFS { // PANICS: `proofs` should be always `Some` at this point. proofs.as_mut().unwrap()[leaf_i].push(self.filled_subtrees[i])?; } current_node = H::hashv(&[&self.filled_subtrees[i], &current_node])?; } if i < self.height - 1 { self.changelog[changelog_index].path[i + 1] = Some(current_node); } current_index /= 2; } if leaf_i == leaves.len() - 1 { self.roots.push(current_node); } else { // Photon returns only the sequence number and we use it in the // JS client and forester to derive the root index. Therefore, // we need to emit a "zero root" to not break that property. self.roots.push([0u8; 32]); } self.inc_next_index()?; self.inc_sequence_number()?; self.set_rightmost_leaf(leaf); } if self.canopy_depth > 0 { self.update_canopy(first_changelog_index, leaves.len()); } Ok((first_changelog_index, first_sequence_number)) } fn update_canopy(&mut self, first_changelog_index: usize, num_leaves: usize) { for i in 0..num_leaves { let changelog_index = (first_changelog_index + i) % self.changelog.capacity(); for (i, path_node) in self.changelog[changelog_index] .path .iter() .rev() .take(self.canopy_depth) .enumerate() { if let Some(path_node) = path_node { let level = self.height - i - 1; let index = (1 << (self.height - level)) + (self.changelog[changelog_index].index >> level); // `index - 2` maps to the canopy index. self.canopy[(index - 2) as usize] = *path_node; } } } } } impl<H, const HEIGHT: usize> Drop for ConcurrentMerkleTree<H, HEIGHT> where H: Hasher, { fn drop(&mut self) { let layout = Layout::new::<usize>(); unsafe { alloc::dealloc(self.next_index as *mut u8, layout) }; let layout = Layout::new::<usize>(); unsafe { alloc::dealloc(self.sequence_number as *mut u8, layout) }; let layout = Layout::new::<[u8; 32]>(); unsafe { alloc::dealloc(self.rightmost_leaf as *mut u8, layout) }; } } impl<H, const HEIGHT: usize> PartialEq for ConcurrentMerkleTree<H, HEIGHT> where H: Hasher, { fn eq(&self, other: &Self) -> bool { self.height.eq(&other.height) && self.canopy_depth.eq(&other.canopy_depth) && self.next_index().eq(&other.next_index()) && self.sequence_number().eq(&other.sequence_number()) && self.rightmost_leaf().eq(&other.rightmost_leaf()) && self .filled_subtrees .as_slice() .eq(other.filled_subtrees.as_slice()) && self.changelog.iter().eq(other.changelog.iter()) && self.roots.iter().eq(other.roots.iter()) && self.canopy.as_slice().eq(other.canopy.as_slice()) } }
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/concurrent
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/concurrent/src/event.rs
use borsh::{BorshDeserialize, BorshSerialize}; #[derive(BorshDeserialize, BorshSerialize, Debug)] pub struct MerkleTreeEvents { pub events: Vec<MerkleTreeEvent>, } /// Event containing the Merkle path of the given /// [`StateMerkleTree`](light_merkle_tree_program::state::StateMerkleTree) /// change. Indexers can use this type of events to re-build a non-sparse /// version of state Merkle tree. #[derive(BorshDeserialize, BorshSerialize, Debug)] #[repr(C)] pub enum MerkleTreeEvent { V1(ChangelogEvent), V2(NullifierEvent), V3(IndexedMerkleTreeEvent), } /// Node of the Merkle path with an index representing the position in a /// non-sparse Merkle tree. #[derive(BorshDeserialize, BorshSerialize, Debug, Eq, PartialEq)] pub struct PathNode { pub node: [u8; 32], pub index: u32, } /// Version 1 of the [`ChangelogEvent`](light_merkle_tree_program::state::ChangelogEvent). #[derive(BorshDeserialize, BorshSerialize, Debug)] pub struct ChangelogEvent { /// Public key of the tree. pub id: [u8; 32], // Merkle paths. pub paths: Vec<Vec<PathNode>>, /// Number of successful operations on the on-chain tree. pub seq: u64, /// Changelog event index. pub index: u32, } #[derive(BorshDeserialize, BorshSerialize, Debug)] pub struct NullifierEvent { /// Public key of the tree. pub id: [u8; 32], /// Indices of leaves that were nullified. /// Nullified means updated with [0u8;32]. pub nullified_leaves_indices: Vec<u64>, /// Number of successful operations on the on-chain tree. /// seq corresponds to leaves[0]. /// seq + 1 corresponds to leaves[1]. pub seq: u64, } #[derive(Debug, Default, Clone, Copy, BorshSerialize, BorshDeserialize, Eq, PartialEq)] pub struct RawIndexedElement<I> where I: Clone, { pub value: [u8; 32], pub next_index: I, pub next_value: [u8; 32], pub index: I, } #[derive(BorshDeserialize, BorshSerialize, Debug, Clone)] pub struct IndexedMerkleTreeUpdate<I> where I: Clone, { pub new_low_element: RawIndexedElement<I>, /// Leaf hash in new_low_element.index. pub new_low_element_hash: [u8; 32], pub new_high_element: RawIndexedElement<I>, /// Leaf hash in new_high_element.index, /// is equivalent with next_index. pub new_high_element_hash: [u8; 32], } #[derive(BorshDeserialize, BorshSerialize, Debug)] pub struct IndexedMerkleTreeEvent { /// Public key of the tree. pub id: [u8; 32], pub updates: Vec<IndexedMerkleTreeUpdate<usize>>, /// Number of successful operations on the on-chain tree. /// seq corresponds to leaves[0]. /// seq + 1 corresponds to leaves[1]. pub seq: u64, }
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/concurrent
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/concurrent/src/errors.rs
use light_bounded_vec::BoundedVecError; use light_hasher::errors::HasherError; use thiserror::Error; #[derive(Debug, Error)] pub enum ConcurrentMerkleTreeError { #[error("Integer overflow")] IntegerOverflow, #[error("Invalid height, it has to be greater than 0")] HeightZero, #[error("Invalud height, expected {0}")] InvalidHeight(usize), #[error("Invalid changelog size, it has to be greater than 0. Changelog is used for storing Merkle paths during appends.")] ChangelogZero, #[error("Invalid number of roots, it has to be greater than 0")] RootsZero, #[error("Canopy depth has to be lower than height")] CanopyGeThanHeight, #[error("Merkle tree is full, cannot append more leaves.")] TreeFull, #[error("Number of leaves ({0}) exceeds the changelog capacity ({1}).")] BatchGreaterThanChangelog(usize, usize), #[error("Invalid proof length, expected {0}, got {1}.")] InvalidProofLength(usize, usize), #[error("Invalid Merkle proof, expected root: {0:?}, the provided proof produces root: {1:?}")] InvalidProof([u8; 32], [u8; 32]), #[error("Attempting to update the leaf which was updated by an another newest change.")] CannotUpdateLeaf, #[error("Cannot update the empty leaf")] CannotUpdateEmpty, #[error("The batch of leaves is empty")] EmptyLeaves, #[error("Invalid buffer size, expected {0}, got {1}")] BufferSize(usize, usize), #[error("Hasher error: {0}")] Hasher(#[from] HasherError), #[error("Bounded vector error: {0}")] BoundedVec(#[from] BoundedVecError), } // NOTE(vadorovsky): Unfortunately, we need to do it by hand. `num_derive::ToPrimitive` // doesn't support data-carrying enums. #[cfg(feature = "solana")] impl From<ConcurrentMerkleTreeError> for u32 { fn from(e: ConcurrentMerkleTreeError) -> u32 { match e { ConcurrentMerkleTreeError::IntegerOverflow => 10001, ConcurrentMerkleTreeError::HeightZero => 10002, ConcurrentMerkleTreeError::InvalidHeight(_) => 10003, ConcurrentMerkleTreeError::ChangelogZero => 10004, ConcurrentMerkleTreeError::RootsZero => 10005, ConcurrentMerkleTreeError::CanopyGeThanHeight => 10006, ConcurrentMerkleTreeError::TreeFull => 10007, ConcurrentMerkleTreeError::BatchGreaterThanChangelog(_, _) => 10008, ConcurrentMerkleTreeError::InvalidProofLength(_, _) => 10009, ConcurrentMerkleTreeError::InvalidProof(_, _) => 10010, ConcurrentMerkleTreeError::CannotUpdateLeaf => 10011, ConcurrentMerkleTreeError::CannotUpdateEmpty => 10012, ConcurrentMerkleTreeError::EmptyLeaves => 10013, ConcurrentMerkleTreeError::BufferSize(_, _) => 10014, ConcurrentMerkleTreeError::Hasher(e) => e.into(), ConcurrentMerkleTreeError::BoundedVec(e) => e.into(), } } } #[cfg(feature = "solana")] impl From<ConcurrentMerkleTreeError> for solana_program::program_error::ProgramError { fn from(e: ConcurrentMerkleTreeError) -> Self { solana_program::program_error::ProgramError::Custom(e.into()) } }
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/concurrent
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/concurrent/src/hash.rs
use light_bounded_vec::BoundedVec; use light_hasher::Hasher; use crate::errors::ConcurrentMerkleTreeError; /// Returns the hash of the parent node based on the provided `node` (with its /// `node_index`) and `sibling` (with its `sibling_index`). pub fn compute_parent_node<H>( node: &[u8; 32], sibling: &[u8; 32], node_index: usize, level: usize, ) -> Result<[u8; 32], ConcurrentMerkleTreeError> where H: Hasher, { let is_left = (node_index >> level) & 1 == 0; let hash = if is_left { H::hashv(&[node, sibling])? } else { H::hashv(&[sibling, node])? }; Ok(hash) } /// Computes the root for the given `leaf` (with index `i`) and `proof`. It /// doesn't perform the validation of the provided `proof`. pub fn compute_root<H>( leaf: &[u8; 32], leaf_index: usize, proof: &BoundedVec<[u8; 32]>, ) -> Result<[u8; 32], ConcurrentMerkleTreeError> where H: Hasher, { let mut node = *leaf; for (level, sibling) in proof.iter().enumerate() { node = compute_parent_node::<H>(&node, sibling, leaf_index, level)?; } Ok(node) }
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/concurrent
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/concurrent/src/changelog.rs
use std::ops::{Deref, DerefMut}; use light_bounded_vec::BoundedVec; use crate::errors::ConcurrentMerkleTreeError; #[derive(Clone, Debug, PartialEq, Eq)] #[repr(transparent)] pub struct ChangelogPath<const HEIGHT: usize>(pub [Option<[u8; 32]>; HEIGHT]); impl<const HEIGHT: usize> ChangelogPath<HEIGHT> { pub fn from_fn<F>(cb: F) -> Self where F: FnMut(usize) -> Option<[u8; 32]>, { Self(std::array::from_fn(cb)) } /// Checks whether the path is equal to the provided [`BoundedVec`]. /// /// [`ChangelogPath`] might contain `None` nodes at the end, which /// mean that it does not define them, but the following changelog /// paths are expected to overwrite them. /// /// Therefore, the comparison ends on the first encountered first /// `None`. If all `Some` nodes are equal to the corresponding ones /// in the provided vector, the result is `true`. pub fn eq_to(&self, other: BoundedVec<[u8; 32]>) -> bool { if other.len() != HEIGHT { return false; } for i in 0..HEIGHT { let changelog_node = self.0[i]; let path_node = other[i]; match changelog_node { Some(changelog_node) => { if changelog_node != path_node { return false; } } None => break, } } true } } impl<const HEIGHT: usize> Default for ChangelogPath<HEIGHT> { fn default() -> Self { Self([None; HEIGHT]) } } impl<const HEIGHT: usize> Deref for ChangelogPath<HEIGHT> { type Target = [Option<[u8; 32]>; HEIGHT]; fn deref(&self) -> &Self::Target { &self.0 } } impl<const HEIGHT: usize> DerefMut for ChangelogPath<HEIGHT> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } #[derive(Clone, Debug, PartialEq, Eq)] #[repr(C)] pub struct ChangelogEntry<const HEIGHT: usize> { // Path of the changelog. pub path: ChangelogPath<HEIGHT>, // Index of the affected leaf. pub index: u64, } pub type ChangelogEntry22 = ChangelogEntry<22>; pub type ChangelogEntry26 = ChangelogEntry<26>; pub type ChangelogEntry32 = ChangelogEntry<32>; pub type ChangelogEntry40 = ChangelogEntry<40>; impl<const HEIGHT: usize> ChangelogEntry<HEIGHT> { pub fn new(path: ChangelogPath<HEIGHT>, index: usize) -> Self { let index = index as u64; Self { path, index } } pub fn default_with_index(index: usize) -> Self { Self { path: ChangelogPath::default(), index: index as u64, } } pub fn index(&self) -> usize { self.index as usize } /// Returns an intersection index in the changelog entry which affects the /// provided path. /// /// Determining it can be done by taking a XOR of the leaf index (which was /// directly updated in the changelog entry) and the leaf index we are /// trying to update. /// /// The number of bytes in the binary representations of the indexes is /// determined by the height of the tree. For example, for the tree with /// height 4, update attempt of leaf under index 2 and changelog affecting /// index 4, critbit would be: /// /// 2 ^ 4 = 0b_0010 ^ 0b_0100 = 0b_0110 = 6 fn intersection_index(&self, leaf_index: usize) -> usize { let padding = 64 - HEIGHT; let common_path_len = ((leaf_index ^ self.index()) << padding).leading_zeros() as usize; (HEIGHT - 1) - common_path_len } pub fn update_proof( &self, leaf_index: usize, proof: &mut BoundedVec<[u8; 32]>, ) -> Result<(), ConcurrentMerkleTreeError> { if leaf_index != self.index() { let intersection_index = self.intersection_index(leaf_index); if let Some(node) = self.path[intersection_index] { proof[intersection_index] = node; } } else { // This case means that the leaf we are trying to update was // already updated. Therefore, the right thing to do is to notify // the caller to sync the local Merkle tree and update the leaf, // if necessary. return Err(ConcurrentMerkleTreeError::CannotUpdateLeaf); } Ok(()) } }
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/reference/Cargo.toml
[package] name = "light-merkle-tree-reference" version = "1.1.0" description = "Non-sparse reference Merkle tree implementation" repository = "https://github.com/Lightprotocol/light-protocol" license = "Apache-2.0" edition = "2021" [dependencies] light-bounded-vec = { path = "../bounded-vec", version = "1.1.0" } light-hasher = { path = "../hasher", version = "1.1.0" } thiserror = "1.0" log = "0.4.20" num-bigint = "0.4" [dev-dependencies] hex = "0.4"
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/reference
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/reference/tests/tests.rs
use light_bounded_vec::BoundedVec; use light_hasher::{Hasher, Keccak, Poseidon, Sha256}; use light_merkle_tree_reference::MerkleTree; fn append<H>(canopy_depth: usize) where H: Hasher, { const HEIGHT: usize = 4; let mut mt = MerkleTree::<H>::new(4, canopy_depth); let leaf_1 = [1_u8; 32]; mt.append(&leaf_1).unwrap(); // The hash of our new leaf and its sibling (a zero value). // // H1 // / \ // L1 Z[0] let h1 = H::hashv(&[&leaf_1, &H::zero_bytes()[0]]).unwrap(); // The hash of `h1` and its sibling (a subtree represented by `Z[1]`). // // H2 // /-/ \-\ // H1 Z[1] // / \ / \ // L1 Z[0] Z[0] Z[0] // // `Z[1]` represents the whole subtree on the right from `h2`. In the next // examples, we are just going to show empty subtrees instead of the whole // hierarchy. let h2 = H::hashv(&[&h1, &H::zero_bytes()[1]]).unwrap(); // The hash of `h3` and its sibling (a subtree represented by `Z[2]`). // // H3 // / \ // H2 Z[2] // / \ // H1 Z[1] // / \ // L1 Z[0] let h3 = H::hashv(&[&h2, &H::zero_bytes()[2]]).unwrap(); // The hash of `h4` and its sibling (a subtree represented by `Z[3]`), // which is the root. // // R // / \ // H3 Z[3] // / \ // H2 Z[2] // / \ // H1 Z[1] // / \ // L1 Z[0] let expected_root = H::hashv(&[&h3, &H::zero_bytes()[3]]).unwrap(); assert_eq!(mt.root(), expected_root); // The Merkle path of L1 consists of nodes from L1 up to the root. // In this case: L1, H1, H2, H3. // // R // / \ // *H3* Z[3] // / \ // *H2* Z[2] // / \ // *H1* Z[1] // / \ // *L1* Z[0] let expected_merkle_path = &[leaf_1, h1, h2, h3]; let full_merkle_path = mt.get_path_of_leaf(0, true).unwrap(); assert_eq!(full_merkle_path.as_slice(), expected_merkle_path); let partial_merkle_path = mt.get_path_of_leaf(0, false).unwrap(); assert_eq!( partial_merkle_path.as_slice(), &expected_merkle_path[..HEIGHT - canopy_depth] ); // The Merkle proof consists of siblings of L1 and all its parent // nodes. In this case, these are just zero bytes: Z[0], Z[1], Z[2], // Z[3]. // // R // / \ // H3 *Z[3]* // / \ // H2 *Z[2]* // / \ // H1 *Z[1]* // / \ // L1 *Z[0]* let expected_merkle_proof = &H::zero_bytes()[..HEIGHT]; let full_merkle_proof = mt.get_proof_of_leaf(0, true).unwrap(); assert_eq!(full_merkle_proof.as_slice(), expected_merkle_proof); let partial_merkle_proof = mt.get_proof_of_leaf(0, false).unwrap(); assert_eq!( partial_merkle_proof.as_slice(), &expected_merkle_proof[..HEIGHT - canopy_depth] ); // Appending the 2nd leaf should result in recomputing the root due to the // change of the `h1`, which now is a hash of the two non-zero leafs. So // when computing hashes from H2 up to the root, we are still going to use // zero bytes. // // The other subtrees still remain the same. // // R // / \ // H3 Z[3] // / \ // H2 Z[2] // / \ // H1 Z[1] // / \ // L1 L2 let leaf_2 = H::hash(&[2u8; 32]).unwrap(); mt.append(&leaf_2).unwrap(); let h1 = H::hashv(&[&leaf_1, &leaf_2]).unwrap(); let h2 = H::hashv(&[&h1, &H::zero_bytes()[1]]).unwrap(); let h3 = H::hashv(&[&h2, &H::zero_bytes()[2]]).unwrap(); let expected_root = H::hashv(&[&h3, &H::zero_bytes()[3]]).unwrap(); assert_eq!(mt.root(), expected_root); // The Merkle path of L2 consists of nodes from L2 up to the root. // In this case: L2, H1, H2, H3. // // R // / \ // *H3* Z[3] // / \ // *H2* Z[2] // / \ // *H1* Z[1] // / \ // L1 *L2* let expected_merkle_path = &[leaf_2, h1, h2, h3]; let full_merkle_path = mt.get_path_of_leaf(1, true).unwrap(); assert_eq!(full_merkle_path.as_slice(), expected_merkle_path); let partial_merkle_path = mt.get_path_of_leaf(1, false).unwrap(); assert_eq!( partial_merkle_path.as_slice(), &expected_merkle_path[..HEIGHT - canopy_depth] ); // The Merkle proof consists of siblings of L2 and all its parent // nodes. In this case, these are: L1, Z[1], Z[2], Z[3]. // // R // / \ // H3 *Z[3]* // / \ // H2 *Z[2]* // / \ // H1 *Z[1]* // / \ // *L1* L2 let expected_merkle_proof = &[ leaf_1, H::zero_bytes()[1], H::zero_bytes()[2], H::zero_bytes()[3], ]; let full_merkle_proof = mt.get_proof_of_leaf(1, true).unwrap(); assert_eq!(full_merkle_proof.as_slice(), expected_merkle_proof); let partial_merkle_proof = mt.get_proof_of_leaf(1, false).unwrap(); assert_eq!( partial_merkle_proof.as_slice(), &expected_merkle_proof[..HEIGHT - canopy_depth] ); // Appending the 3rd leaf alters the next subtree on the right. // Instead of using Z[1], we will end up with the hash of the new leaf and // Z[0]. // // The other subtrees still remain the same. // // R // / \ // H4 Z[3] // / \ // H3 Z[2] // / \ // H1 H2 // / \ / \ // L1 L2 L3 Z[0] let leaf_3 = H::hash(&[3u8; 32]).unwrap(); mt.append(&leaf_3).unwrap(); let h1 = H::hashv(&[&leaf_1, &leaf_2]).unwrap(); let h2 = H::hashv(&[&leaf_3, &H::zero_bytes()[0]]).unwrap(); let h3 = H::hashv(&[&h1, &h2]).unwrap(); let h4 = H::hashv(&[&h3, &H::zero_bytes()[2]]).unwrap(); let expected_root = H::hashv(&[&h4, &H::zero_bytes()[3]]).unwrap(); assert_eq!(mt.root(), expected_root); // The Merkle path of L3 consists of nodes from L3 up to the root. // In this case: L3, H2, H3, H4. // // R // / \ // *H4* Z[3] // / \ // *H3* Z[2] // / \ // H1 *H2* // / \ / \ // L1 L2 *L3* Z[0] let expected_merkle_path = &[leaf_3, h2, h3, h4]; let full_merkle_path = mt.get_path_of_leaf(2, true).unwrap(); assert_eq!(full_merkle_path.as_slice(), expected_merkle_path); let partial_merkle_path = mt.get_path_of_leaf(2, false).unwrap(); assert_eq!( partial_merkle_path.as_slice(), &expected_merkle_path[..HEIGHT - canopy_depth] ); // The Merkle proof consists of siblings of L2 and all its parent // nodes. In this case, these are: Z[0], H1, Z[2], Z[3]. // // R // / \ // H4 *Z[3]* // / \ // H3 *Z[2]* // / \ // *H1* H2 // / \ / \ // L1 L2 L3 *Z[0]* let expected_merkle_proof = &[ H::zero_bytes()[0], h1, H::zero_bytes()[2], H::zero_bytes()[3], ]; let full_merkle_proof = mt.get_proof_of_leaf(2, true).unwrap(); assert_eq!(full_merkle_proof.as_slice(), expected_merkle_proof); let partial_merkle_proof = mt.get_proof_of_leaf(2, false).unwrap(); assert_eq!( partial_merkle_proof.as_slice(), &expected_merkle_proof[..HEIGHT - canopy_depth] ); // Appending the 4th leaf alters the next subtree on the right. // Instead of using Z[1], we will end up with the hash of the new leaf and // Z[0]. // // The other subtrees still remain the same. // // R // / \ // H4 Z[3] // / \ // H3 Z[2] // / \ // H1 H2 // / \ / \ // L1 L2 L3 L4 let leaf_4 = H::hash(&[4u8; 32]).unwrap(); mt.append(&leaf_4).unwrap(); let h1 = H::hashv(&[&leaf_1, &leaf_2]).unwrap(); let h2 = H::hashv(&[&leaf_3, &leaf_4]).unwrap(); let h3 = H::hashv(&[&h1, &h2]).unwrap(); let h4 = H::hashv(&[&h3, &H::zero_bytes()[2]]).unwrap(); let expected_root = H::hashv(&[&h4, &H::zero_bytes()[3]]).unwrap(); assert_eq!(mt.root(), expected_root); } #[test] fn test_append_keccak_4_0() { append::<Keccak>(0) } #[test] fn test_append_poseidon_4_0() { append::<Poseidon>(0) } #[test] fn test_append_sha256_4_0() { append::<Sha256>(0) } #[test] fn test_append_keccak_4_1() { append::<Keccak>(1) } #[test] fn test_append_poseidon_4_1() { append::<Poseidon>(1) } #[test] fn test_append_sha256_4_1() { append::<Sha256>(1) } #[test] fn test_append_keccak_4_2() { append::<Keccak>(2) } #[test] fn test_append_poseidon_4_2() { append::<Poseidon>(2) } #[test] fn test_append_sha256_4_2() { append::<Sha256>(2) } fn update<H>() where H: Hasher, { const HEIGHT: usize = 4; const CANOPY: usize = 0; let mut merkle_tree = MerkleTree::<H>::new(HEIGHT, CANOPY); let leaf1 = H::hash(&[1u8; 32]).unwrap(); // The hash of our new leaf and its sibling (a zero value). // // H1 // / \ // L1 Z[0] let h1 = H::hashv(&[&leaf1, &H::zero_bytes()[0]]).unwrap(); // The hash of `h1` and its sibling (a subtree represented by `Z[1]`). // // H2 // /-/ \-\ // H1 Z[1] // / \ / \ // L1 Z[0] Z[0] Z[0] // // `Z[1]` represents the whole subtree on the right from `h2`. In the next // examples, we are just going to show empty subtrees instead of the whole // hierarchy. let h2 = H::hashv(&[&h1, &H::zero_bytes()[1]]).unwrap(); // The hash of `h3` and its sibling (a subtree represented by `Z[2]`). // // H3 // / \ // H2 Z[2] // / \ // H1 Z[1] // / \ // L1 Z[0] let h3 = H::hashv(&[&h2, &H::zero_bytes()[2]]).unwrap(); // The hash of `h4` and its sibling (a subtree represented by `Z[3]`), // which is the root. // // R // / \ // H3 Z[3] // / \ // H2 Z[2] // / \ // H1 Z[1] // / \ // L1 Z[0] let expected_root = H::hashv(&[&h3, &H::zero_bytes()[3]]).unwrap(); // The Merkle path is: // [L1, H1, H2, H3] let expected_path = BoundedVec::from_array(&[leaf1, h1, h2, h3]); let expected_proof = BoundedVec::from_array(&[ H::zero_bytes()[0], H::zero_bytes()[1], H::zero_bytes()[2], H::zero_bytes()[3], ]); merkle_tree.append(&leaf1).unwrap(); assert_eq!(merkle_tree.root(), expected_root); assert_eq!( merkle_tree.get_path_of_leaf(0, false).unwrap(), expected_path ); assert_eq!( merkle_tree.get_proof_of_leaf(0, false).unwrap(), expected_proof ); // Appending the 2nd leaf should result in recomputing the root due to the // change of the `h1`, which now is a hash of the two non-zero leafs. So // when computing all hashes up to the root, we are still going to use // zero bytes from 1 to 8. // // The other subtrees still remain the same. // // R // / \ // H3 Z[3] // / \ // H2 Z[2] // / \ // H1 Z[1] // / \ // L1 L2 let leaf2 = H::hash(&[2u8; 32]).unwrap(); let h1 = H::hashv(&[&leaf1, &leaf2]).unwrap(); let h2 = H::hashv(&[&h1, &H::zero_bytes()[1]]).unwrap(); let h3 = H::hashv(&[&h2, &H::zero_bytes()[2]]).unwrap(); let expected_root = H::hashv(&[&h3, &H::zero_bytes()[3]]).unwrap(); // The Merkle path is: // [L2, H1, H2, H3] let expected_path = BoundedVec::from_array(&[leaf2, h1, h2, h3]); let expected_proof = BoundedVec::from_array(&[ leaf1, H::zero_bytes()[1], H::zero_bytes()[2], H::zero_bytes()[3], ]); merkle_tree.append(&leaf2).unwrap(); assert_eq!(merkle_tree.root(), expected_root); assert_eq!( merkle_tree.get_path_of_leaf(1, false).unwrap(), expected_path ); assert_eq!( merkle_tree.get_proof_of_leaf(1, false).unwrap(), expected_proof ); // Appending the 3rd leaf alters the next subtree on the right. // Instead of using Z[1], we will end up with the hash of the new leaf and // Z[0]. // // The other subtrees still remain the same. // // R // / \ // H4 Z[3] // / \ // H3 Z[2] // / \ // H1 H2 // / \ / \ // L1 L2 L3 Z[0] let leaf3 = H::hash(&[3u8; 32]).unwrap(); let h1 = H::hashv(&[&leaf1, &leaf2]).unwrap(); let h2 = H::hashv(&[&leaf3, &H::zero_bytes()[0]]).unwrap(); let h3 = H::hashv(&[&h1, &h2]).unwrap(); let h4 = H::hashv(&[&h3, &H::zero_bytes()[2]]).unwrap(); let expected_root = H::hashv(&[&h4, &H::zero_bytes()[3]]).unwrap(); // The Merkle path is: // [L3, H2, H3, H4] let expected_path = BoundedVec::from_array(&[leaf3, h2, h3, h4]); let expected_proof = BoundedVec::from_array(&[ H::zero_bytes()[0], h1, H::zero_bytes()[2], H::zero_bytes()[3], ]); merkle_tree.append(&leaf3).unwrap(); assert_eq!(merkle_tree.root(), expected_root); assert_eq!( merkle_tree.get_path_of_leaf(2, false).unwrap(), expected_path ); assert_eq!( merkle_tree.get_proof_of_leaf(2, false).unwrap(), expected_proof ); // Appending the 4th leaf alters the next subtree on the right. // Instead of using Z[1], we will end up with the hash of the new leaf and // Z[0]. // // The other subtrees still remain the same. // // R // / \ // H4 Z[3] // / \ // H3 Z[2] // / \ // H1 H2 // / \ / \ // L1 L2 L3 L4 let leaf4 = H::hash(&[4u8; 32]).unwrap(); let h1 = H::hashv(&[&leaf1, &leaf2]).unwrap(); let h2 = H::hashv(&[&leaf3, &leaf4]).unwrap(); let h3 = H::hashv(&[&h1, &h2]).unwrap(); let h4 = H::hashv(&[&h3, &H::zero_bytes()[2]]).unwrap(); let expected_root = H::hashv(&[&h4, &H::zero_bytes()[3]]).unwrap(); // The Merkle path is: // [L4, H2, H3, H4] let expected_path = BoundedVec::from_array(&[leaf4, h2, h3, h4]); let expected_proof = BoundedVec::from_array(&[leaf3, h1, H::zero_bytes()[2], H::zero_bytes()[3]]); merkle_tree.append(&leaf4).unwrap(); assert_eq!(merkle_tree.root(), expected_root); assert_eq!( merkle_tree.get_path_of_leaf(3, false).unwrap(), expected_path ); assert_eq!( merkle_tree.get_proof_of_leaf(3, false).unwrap(), expected_proof ); // Update `leaf1`. let new_leaf1 = [9u8; 32]; // Updating L1 affects H1 and all parent hashes up to the root. // // R // / \ // *H4* Z[3] // / \ // *H3* Z[2] // / \ // *H1* H2 // / \ / \ // *L1* L2 L3 L4 // // Merkle proof for the replaced leaf L1 is: // [L2, H2, Z[2], Z[3]] // // Our Merkle tree implementation should be smart enough to fill up the // proof with zero bytes, so we can skip them and just define the proof as: // [L2, H2] merkle_tree.update(&new_leaf1, 0).unwrap(); let h1 = H::hashv(&[&new_leaf1, &leaf2]).unwrap(); let h2 = H::hashv(&[&leaf3, &leaf4]).unwrap(); let h3 = H::hashv(&[&h1, &h2]).unwrap(); let h4 = H::hashv(&[&h3, &H::zero_bytes()[2]]).unwrap(); let expected_root = H::hashv(&[&h4, &H::zero_bytes()[3]]).unwrap(); // The Merkle path is: // [L1, H1, H3, H4] let expected_path = BoundedVec::from_array(&[new_leaf1, h1, h3, h4]); let expected_proof = BoundedVec::from_array(&[leaf2, h2, H::zero_bytes()[2], H::zero_bytes()[3]]); assert_eq!(merkle_tree.root(), expected_root); assert_eq!( merkle_tree.get_path_of_leaf(0, false).unwrap(), expected_path ); assert_eq!( merkle_tree.get_proof_of_leaf(0, false).unwrap(), expected_proof ); // Update `leaf2`. let new_leaf2 = H::hash(&[8u8; 32]).unwrap(); // Updating L2 affects H1 and all parent hashes up to the root. // // R // / \ // *H4* Z[3] // / \ // *H3* Z[2] // / \ // *H1* H2 // / \ / \ // L1 *L2* L3 L4 // // Merkle proof for the replaced leaf L2 is: // [L1, H2] merkle_tree.update(&new_leaf2, 1).unwrap(); let h1 = H::hashv(&[&new_leaf1, &new_leaf2]).unwrap(); let h2 = H::hashv(&[&leaf3, &leaf4]).unwrap(); let h3 = H::hashv(&[&h1, &h2]).unwrap(); let h4 = H::hashv(&[&h3, &H::zero_bytes()[2]]).unwrap(); let expected_root = H::hashv(&[&h4, &H::zero_bytes()[3]]).unwrap(); // The Merkle path is: // [L2, H1, H3, H4] let expected_path = BoundedVec::from_array(&[new_leaf2, h1, h3, h4]); let expected_proof = BoundedVec::from_array(&[new_leaf1, h2, H::zero_bytes()[2], H::zero_bytes()[3]]); assert_eq!(merkle_tree.root(), expected_root); assert_eq!( merkle_tree.get_path_of_leaf(1, false).unwrap(), expected_path ); assert_eq!( merkle_tree.get_proof_of_leaf(1, false).unwrap(), expected_proof ); // Update `leaf3`. let new_leaf3 = H::hash(&[7u8; 32]).unwrap(); // Updating L3 affects H1 and all parent hashes up to the root. // // R // / \ // *H4* Z[3] // / \ // *H3* Z[2] // / \ // H1 *H2* // / \ / \ // L1 L2 *L3* L4 // // Merkle proof for the replaced leaf L3 is: // [L4, H1] merkle_tree.update(&new_leaf3, 2).unwrap(); let h1 = H::hashv(&[&new_leaf1, &new_leaf2]).unwrap(); let h2 = H::hashv(&[&new_leaf3, &leaf4]).unwrap(); let h3 = H::hashv(&[&h1, &h2]).unwrap(); let h4 = H::hashv(&[&h3, &H::zero_bytes()[2]]).unwrap(); let expected_root = H::hashv(&[&h4, &H::zero_bytes()[3]]).unwrap(); // The Merkle path is: // [L3, H2, H3, H4] let expected_path = BoundedVec::from_array(&[new_leaf3, h2, h3, h4]); let expected_proof = BoundedVec::from_array(&[leaf4, h1, H::zero_bytes()[2], H::zero_bytes()[3]]); assert_eq!(merkle_tree.root(), expected_root); assert_eq!( merkle_tree.get_path_of_leaf(2, false).unwrap(), expected_path ); assert_eq!( merkle_tree.get_proof_of_leaf(2, false).unwrap(), expected_proof ); // Update `leaf4`. let new_leaf4 = H::hash(&[6u8; 32]).unwrap(); // Updating L4 affects H1 and all parent hashes up to the root. // // R // / \ // *H4* Z[3] // / \ // *H3* Z[2] // / \ // H1 *H2* // / \ / \ // L1 L2 L3 *L4* // // Merkle proof for the replaced leaf L4 is: // [L3, H1] merkle_tree.update(&new_leaf4, 3).unwrap(); let h1 = H::hashv(&[&new_leaf1, &new_leaf2]).unwrap(); let h2 = H::hashv(&[&new_leaf3, &new_leaf4]).unwrap(); let h3 = H::hashv(&[&h1, &h2]).unwrap(); let h4 = H::hashv(&[&h3, &H::zero_bytes()[2]]).unwrap(); let expected_root = H::hashv(&[&h4, &H::zero_bytes()[3]]).unwrap(); // The Merkle path is: // [L4, H2, H3, H4] let expected_path = BoundedVec::from_array(&[new_leaf4, h2, h3, h4]); let expected_proof = BoundedVec::from_array(&[new_leaf3, h1, H::zero_bytes()[2], H::zero_bytes()[3]]); assert_eq!(merkle_tree.root(), expected_root); assert_eq!( merkle_tree.get_path_of_leaf(3, false).unwrap(), expected_path ); assert_eq!( merkle_tree.get_proof_of_leaf(3, false).unwrap(), expected_proof ); } #[test] fn test_update_keccak() { update::<Keccak>() } #[test] fn test_update_poseidon() { update::<Poseidon>() } #[test] fn test_update_sha256() { update::<Sha256>() } #[test] fn test_sequence_number() { let mut merkle_tree = MerkleTree::<Poseidon>::new(4, 0); assert_eq!(merkle_tree.sequence_number, 0); let leaf1 = Poseidon::hash(&[1u8; 32]).unwrap(); merkle_tree.append(&leaf1).unwrap(); assert_eq!(merkle_tree.sequence_number, 1); let leaf2 = Poseidon::hash(&[2u8; 32]).unwrap(); merkle_tree.update(&leaf2, 0).unwrap(); assert_eq!(merkle_tree.sequence_number, 2); }
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/reference
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/reference/src/lib.rs
pub mod sparse_merkle_tree; use std::marker::PhantomData; use light_bounded_vec::{BoundedVec, BoundedVecError}; use light_hasher::{errors::HasherError, Hasher}; use thiserror::Error; #[derive(Debug, Error)] pub enum ReferenceMerkleTreeError { #[error("Leaf {0} does not exist")] LeafDoesNotExist(usize), #[error("Hasher error: {0}")] Hasher(#[from] HasherError), #[error("Invalid proof length provided: {0} required {1}")] InvalidProofLength(usize, usize), } #[derive(Debug, Clone)] pub struct MerkleTree<H> where H: Hasher, { pub height: usize, pub capacity: usize, pub canopy_depth: usize, pub layers: Vec<Vec<[u8; 32]>>, pub roots: Vec<[u8; 32]>, pub rightmost_index: usize, pub sequence_number: usize, _hasher: PhantomData<H>, } impl<H> MerkleTree<H> where H: Hasher, { pub fn new(height: usize, canopy_depth: usize) -> Self { Self { height, capacity: 1 << height, canopy_depth, layers: vec![Vec::new(); height], roots: vec![H::zero_bytes()[height]], rightmost_index: 0, sequence_number: 0, _hasher: PhantomData, } } /// Number of nodes to include in canopy, based on `canopy_depth`. pub fn canopy_size(&self) -> usize { (1 << (self.canopy_depth + 1)) - 2 } fn update_upper_layers(&mut self, mut i: usize) -> Result<(), HasherError> { for level in 1..self.height { i /= 2; let left_index = i * 2; let right_index = i * 2 + 1; let left_child = self.layers[level - 1] .get(left_index) .cloned() .unwrap_or(H::zero_bytes()[level - 1]); let right_child = self.layers[level - 1] .get(right_index) .cloned() .unwrap_or(H::zero_bytes()[level - 1]); let node = H::hashv(&[&left_child[..], &right_child[..]])?; if self.layers[level].len() > i { // A node already exists and we are overwriting it. self.layers[level][i] = node; } else { // A node didn't exist before. self.layers[level].push(node); } } let left_child = &self.layers[self.height - 1] .first() .cloned() .unwrap_or(H::zero_bytes()[self.height - 1]); let right_child = &self.layers[self.height - 1] .get(1) .cloned() .unwrap_or(H::zero_bytes()[self.height - 1]); let root = H::hashv(&[&left_child[..], &right_child[..]])?; self.roots.push(root); Ok(()) } pub fn append(&mut self, leaf: &[u8; 32]) -> Result<(), HasherError> { self.layers[0].push(*leaf); let i = self.rightmost_index; self.rightmost_index += 1; self.update_upper_layers(i)?; self.sequence_number += 1; Ok(()) } pub fn append_batch(&mut self, leaves: &[&[u8; 32]]) -> Result<(), HasherError> { for leaf in leaves { self.append(leaf)?; } Ok(()) } pub fn update( &mut self, leaf: &[u8; 32], leaf_index: usize, ) -> Result<(), ReferenceMerkleTreeError> { *self.layers[0] .get_mut(leaf_index) .ok_or(ReferenceMerkleTreeError::LeafDoesNotExist(leaf_index))? = *leaf; self.update_upper_layers(leaf_index)?; self.sequence_number += 1; Ok(()) } pub fn root(&self) -> [u8; 32] { // PANICS: We always initialize the Merkle tree with a // root (from zero bytes), so the following should never // panic. self.roots.last().cloned().unwrap() } pub fn get_path_of_leaf( &self, mut index: usize, full: bool, ) -> Result<BoundedVec<[u8; 32]>, BoundedVecError> { let mut path = BoundedVec::with_capacity(self.height); let limit = match full { true => self.height, false => self.height - self.canopy_depth, }; for level in 0..limit { let node = self.layers[level] .get(index) .cloned() .unwrap_or(H::zero_bytes()[level]); path.push(node)?; index /= 2; } Ok(path) } pub fn get_proof_of_leaf( &self, mut index: usize, full: bool, ) -> Result<BoundedVec<[u8; 32]>, BoundedVecError> { let mut proof = BoundedVec::with_capacity(self.height); let limit = match full { true => self.height, false => self.height - self.canopy_depth, }; for level in 0..limit { let is_left = index % 2 == 0; let sibling_index = if is_left { index + 1 } else { index - 1 }; let node = self.layers[level] .get(sibling_index) .cloned() .unwrap_or(H::zero_bytes()[level]); proof.push(node)?; index /= 2; } Ok(proof) } pub fn get_canopy(&self) -> Result<BoundedVec<[u8; 32]>, BoundedVecError> { if self.canopy_depth == 0 { return Ok(BoundedVec::with_capacity(0)); } let mut canopy = BoundedVec::with_capacity(self.canopy_size()); let mut num_nodes_in_level = 2; for i in 0..self.canopy_depth { let level = self.height - 1 - i; for j in 0..num_nodes_in_level { let node = self.layers[level] .get(j) .cloned() .unwrap_or(H::zero_bytes()[level]); canopy.push(node)?; } num_nodes_in_level *= 2; } Ok(canopy) } pub fn get_leaf(&self, leaf_index: usize) -> [u8; 32] { self.layers[0] .get(leaf_index) .cloned() .unwrap_or(H::zero_bytes()[0]) } pub fn get_leaf_index(&self, leaf: &[u8; 32]) -> Option<usize> { self.layers[0].iter().position(|node| node == leaf) } pub fn leaves(&self) -> &[[u8; 32]] { self.layers[0].as_slice() } pub fn verify( &self, leaf: &[u8; 32], proof: &BoundedVec<[u8; 32]>, leaf_index: usize, ) -> Result<bool, ReferenceMerkleTreeError> { if leaf_index >= self.capacity { return Err(ReferenceMerkleTreeError::LeafDoesNotExist(leaf_index)); } if proof.len() != self.height { return Err(ReferenceMerkleTreeError::InvalidProofLength( proof.len(), self.height, )); } let mut computed_hash = *leaf; let mut current_index = leaf_index; for sibling_hash in proof.iter() { let is_left = current_index % 2 == 0; let hashes = if is_left { [&computed_hash[..], &sibling_hash[..]] } else { [&sibling_hash[..], &computed_hash[..]] }; computed_hash = H::hashv(&hashes)?; // Move to the parent index for the next iteration current_index /= 2; } // Compare the computed hash to the last known root Ok(computed_hash == self.root()) } /// Returns the filled subtrees of the Merkle tree. /// Subtrees are the rightmost left node of each level. /// Subtrees can be used for efficient append operations. pub fn get_subtrees(&self) -> Vec<[u8; 32]> { let mut subtrees = H::zero_bytes()[0..self.height].to_vec(); if self.layers.last().and_then(|layer| layer.first()).is_some() { for level in (0..self.height).rev() { if let Some(left_child) = self.layers.get(level).and_then(|layer| { if layer.len() % 2 == 0 { layer.get(layer.len() - 2) } else { layer.last() } }) { subtrees[level] = *left_child; } } } subtrees } } #[cfg(test)] mod tests { use light_hasher::{zero_bytes::poseidon::ZERO_BYTES, Poseidon}; use super::*; const TREE_AFTER_1_UPDATE: [[u8; 32]; 4] = [ [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, ], [ 0, 122, 243, 70, 226, 211, 4, 39, 158, 121, 224, 169, 243, 2, 63, 119, 18, 148, 167, 138, 203, 112, 231, 63, 144, 175, 226, 124, 173, 64, 30, 129, ], [ 4, 163, 62, 195, 162, 201, 237, 49, 131, 153, 66, 155, 106, 112, 192, 40, 76, 131, 230, 239, 224, 130, 106, 36, 128, 57, 172, 107, 60, 247, 103, 194, ], [ 7, 118, 172, 114, 242, 52, 137, 62, 111, 106, 113, 139, 123, 161, 39, 255, 86, 13, 105, 167, 223, 52, 15, 29, 137, 37, 106, 178, 49, 44, 226, 75, ], ]; const TREE_AFTER_2_UPDATES: [[u8; 32]; 4] = [ [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, ], [ 0, 122, 243, 70, 226, 211, 4, 39, 158, 121, 224, 169, 243, 2, 63, 119, 18, 148, 167, 138, 203, 112, 231, 63, 144, 175, 226, 124, 173, 64, 30, 129, ], [ 18, 102, 129, 25, 152, 42, 192, 218, 100, 215, 169, 202, 77, 24, 100, 133, 45, 152, 17, 121, 103, 9, 187, 226, 182, 36, 35, 35, 126, 255, 244, 140, ], [ 11, 230, 92, 56, 65, 91, 231, 137, 40, 92, 11, 193, 90, 225, 123, 79, 82, 17, 212, 147, 43, 41, 126, 223, 49, 2, 139, 211, 249, 138, 7, 12, ], ]; #[test] fn test_subtrees() { let tree_depth = 4; let mut tree = MerkleTree::<Poseidon>::new(tree_depth, 0); // Replace TestHasher with your specific hasher. let subtrees = tree.get_subtrees(); for (i, subtree) in subtrees.iter().enumerate() { assert_eq!(*subtree, ZERO_BYTES[i]); } let mut leaf_0: [u8; 32] = [0; 32]; leaf_0[31] = 1; tree.append(&leaf_0).unwrap(); tree.append(&leaf_0).unwrap(); let subtrees = tree.get_subtrees(); for (i, subtree) in subtrees.iter().enumerate() { assert_eq!(*subtree, TREE_AFTER_1_UPDATE[i]); } let mut leaf_1: [u8; 32] = [0; 32]; leaf_1[31] = 2; tree.append(&leaf_1).unwrap(); tree.append(&leaf_1).unwrap(); let subtrees = tree.get_subtrees(); for (i, subtree) in subtrees.iter().enumerate() { assert_eq!(*subtree, TREE_AFTER_2_UPDATES[i]); } } }
0
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/reference
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/reference/src/sparse_merkle_tree.rs
use light_hasher::Hasher; use num_bigint::BigUint; use std::marker::PhantomData; #[derive(Clone, Debug)] pub struct SparseMerkleTree<H: Hasher, const HEIGHT: usize> { subtrees: [[u8; 32]; HEIGHT], next_index: usize, root: [u8; 32], _hasher: PhantomData<H>, } impl<H, const HEIGHT: usize> SparseMerkleTree<H, HEIGHT> where H: Hasher, { pub fn new(subtrees: [[u8; 32]; HEIGHT], next_index: usize) -> Self { Self { subtrees, next_index, root: [0u8; 32], _hasher: PhantomData, } } pub fn new_empty() -> Self { Self { subtrees: H::zero_bytes()[0..HEIGHT].try_into().unwrap(), next_index: 0, root: H::zero_bytes()[HEIGHT], _hasher: PhantomData, } } pub fn append(&mut self, leaf: [u8; 32]) -> [[u8; 32]; HEIGHT] { let mut current_index = self.next_index; let mut current_level_hash = leaf; let mut left; let mut right; let mut proof: [[u8; 32]; HEIGHT] = [[0u8; 32]; HEIGHT]; for (i, (subtree, zero_byte)) in self .subtrees .iter_mut() .zip(H::zero_bytes().iter()) .enumerate() { if current_index % 2 == 0 { left = current_level_hash; right = *zero_byte; *subtree = current_level_hash; proof[i] = right; } else { left = *subtree; right = current_level_hash; proof[i] = left; } current_level_hash = H::hashv(&[&left, &right]).unwrap(); current_index /= 2; } self.root = current_level_hash; self.next_index += 1; proof } pub fn root(&self) -> [u8; 32] { self.root } pub fn get_subtrees(&self) -> [[u8; 32]; HEIGHT] { self.subtrees } pub fn get_height(&self) -> usize { HEIGHT } pub fn get_next_index(&self) -> usize { self.next_index } } pub fn arr_to_string(arr: [u8; 32]) -> String { format!("0x{}", BigUint::from_bytes_be(&arr).to_str_radix(16)) } #[cfg(test)] mod test { use crate::MerkleTree; use super::*; use light_hasher::Poseidon; #[test] fn test_sparse_merkle_tree() { let height = 4; let mut merkle_tree = SparseMerkleTree::<Poseidon, 4>::new_empty(); let mut reference_merkle_tree = MerkleTree::<Poseidon>::new(height, 0); let leaf = [1u8; 32]; merkle_tree.append(leaf); reference_merkle_tree.append(&leaf).unwrap(); assert_eq!(merkle_tree.root(), reference_merkle_tree.root()); let subtrees = merkle_tree.get_subtrees(); let reference_subtrees = reference_merkle_tree.get_subtrees(); assert_eq!(subtrees.to_vec(), reference_subtrees); } }
0
solana_public_repos/Lightprotocol/light-protocol/js
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/README.md
<p align="center"> <img src="https://github.com/ldiego08/light-protocol/raw/main/assets/logo.svg" width="90" /> </p> <h1 align="center">@lightprotocol/stateless.js</h1> <p align="center"> <b>This is the JavaScript SDK for building Solana applications with ZK Compression for Node and web.</b> </p> <p align="center"> <a href="https://badge.fury.io/js/@lightprotocol%2Fstateless.js"> <img src="https://badge.fury.io/js/@lightprotocol%2Fstateless.js.svg" alt="package npm version" height="18" /> </a> <img src="https://img.shields.io/npm/l/%40lightprotocol%2Fstateless.js" alt="package license" height="18"> <img src="https://img.shields.io/npm/dw/%40lightprotocol%2Fstateless.js" alt="package weekly downloads" height="18" /> </p> ## Usage ### Installation Install this package in your project by running the following terminal command: ```bin npm install --save @lightprotocol/stateless.js ``` ## Documentation and Examples For a more detailed documentation on usage, please check [the respective section at the ZK Compression documentation.](https://www.zkcompression.com/developers/typescript-client) For example implementations, including web and Node, refer to the respective repositories: - [Web application example implementation](https://github.com/Lightprotocol/example-web-client) - [Node server example implementation](https://github.com/Lightprotocol/example-nodejs-client) ## Troubleshooting Have a question or a problem? Feel free to ask in the [Light](https://discord.gg/CYvjBgzRFP) and [Helius](https://discord.gg/Uzzf6a7zKr) developer Discord servers. Please, include the following information: - A detailed description or context of the issue or what you are trying to achieve. - A code example that we can use to test and debug (if possible). Use [CodeSandbox](https://codesandbox.io/p/sandbox/vanilla-ts) or any other live environment provider. - A description or context of any errors you are encountering with stacktraces if available. ## Contributing Light and ZK Compression are open source protocols and very much welcome contributions. If you have a contribution, do not hesitate to send a PR to the respective repository or discuss in the linked developer Discord servers. - 🐞 For bugs or feature requests, please open an [issue](https://github.com/lightprotocol/lightprotocol/issues/new). - 🔒 For security vulnerabilities, please follow the [security policy](https://github.com/Lightprotocol/light-protocol/blob/main/SECURITY.md). ## Additional Resources - [Light Protocol Repository](https://github.com/Lightprotocol/light-protocol) - [ZK Compression Official Documentation](https://www.zkcompression.com/) ## Disclaimer All claims, content, designs, algorithms, estimates, roadmaps, specifications, and performance measurements described in this project are done with Light Protocol Labs' ("Labs") best efforts. It is up to the reader to check and validate their accuracy and truthfulness. Furthermore nothing in this project constitutes a solicitation for investment. Any content produced by Labs or developer resources that Labs provides, are for educational and inspiration purposes only. Labs does not encourage, induce or sanction the deployment, integration or use of any such applications (including the code comprising the Light blockchain protocol) in violation of applicable laws or regulations and hereby prohibits any such deployment, integration or use. This includes use of any such applications by the reader (a) in violation of export control or sanctions laws of the United States or any other applicable jurisdiction, (b) if the reader is located in or ordinarily resident in a country or territory subject to comprehensive sanctions administered by the U.S. Office of Foreign Assets Control (OFAC), or (c) if the reader is or is working on behalf of a Specially Designated National (SDN) or a person subject to similar blocking or denied party prohibitions. The reader should be aware that U.S. export control and sanctions laws prohibit U.S. persons (and other persons that are subject to such laws) from transacting with persons in certain countries and territories or that are on the SDN list. As a project based primarily on open-source software, it is possible that such sanctioned persons may nevertheless bypass prohibitions, obtain the code comprising the Light blockchain protocol (or other project code or applications) and deploy, integrate, or otherwise use it. Accordingly, there is a risk to individuals that other persons using the Light blockchain protocol may be sanctioned persons and that transactions with such persons would be a violation of U.S. export controls and sanctions law. This risk applies to individuals, organizations, and other ecosystem participants that deploy, integrate, or use the Light blockchain protocol code directly (e.g., as a node operator), and individuals that transact on the Light blockchain protocol implementation through clients, other kinds of nodes, third party interfaces, and/or wallet software.
0
solana_public_repos/Lightprotocol/light-protocol/js
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/rollup.config.js
import typescript from '@rollup/plugin-typescript'; import nodePolyfills from 'rollup-plugin-polyfill-node'; import dts from 'rollup-plugin-dts'; import resolve from '@rollup/plugin-node-resolve'; import commonjs from '@rollup/plugin-commonjs'; const rolls = (fmt, env) => ({ input: 'src/index.ts', output: { dir: `dist/${fmt}/${env}`, format: fmt, entryFileNames: `[name].${fmt === 'cjs' ? 'cjs' : 'js'}`, sourcemap: true, }, external: [ '@coral-xyz/anchor', '@solana/web3.js', '@noble/hashes', 'buffer', 'superstruct', 'tweetnacl', ], plugins: [ typescript({ target: fmt === 'es' ? 'ES2022' : 'ES2017', outDir: `dist/${fmt}/${env}`, rootDir: 'src', }), commonjs(), resolve({ browser: env === 'browser', preferBuiltins: env === 'node', }), env === 'browser' ? nodePolyfills() : undefined, ].filter(Boolean), onwarn(warning, warn) { if (warning.code !== 'CIRCULAR_DEPENDENCY') { warn(warning); } }, }); const typesConfig = { input: 'src/index.ts', output: [{ file: 'dist/types/index.d.ts', format: 'es' }], plugins: [dts()], }; export default [ rolls('cjs', 'browser'), rolls('es', 'browser'), rolls('cjs', 'node'), typesConfig, ];
0
solana_public_repos/Lightprotocol/light-protocol/js
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tsconfig.test.json
{ "compilerOptions": { "esModuleInterop": true, "rootDirs": ["src", "tests"] }, "extends": "./tsconfig.json", "include": ["./tests/**/*.ts", "vitest.config.ts"] }
0
solana_public_repos/Lightprotocol/light-protocol/js
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/package.json
{ "name": "@lightprotocol/stateless.js", "version": "0.16.0", "description": "JavaScript API for Light and ZK Compression", "sideEffects": false, "main": "dist/cjs/node/index.cjs", "type": "module", "exports": { ".": { "require": "./dist/cjs/node/index.cjs", "types": "./dist/types/index.d.ts", "default": "./dist/cjs/node/index.cjs" }, "./browser": { "import": "./dist/es/browser/index.js", "require": "./dist/cjs/browser/index.cjs", "types": "./dist/types/index.d.ts" } }, "types": "./dist/types/index.d.ts", "files": [ "dist" ], "keywords": [ "zk", "compression", "stateless", "solana" ], "maintainers": [ { "name": "Light Protocol Maintainers", "email": "friends@lightprotocol.com" } ], "license": "Apache-2.0", "dependencies": { "@coral-xyz/anchor": "0.29.0", "@solana/web3.js": "1.95.3", "@noble/hashes": "1.5.0", "buffer": "6.0.3", "superstruct": "2.0.2", "tweetnacl": "1.0.3" }, "devDependencies": { "@lightprotocol/hasher.rs": "workspace:*", "@esbuild-plugins/node-globals-polyfill": "^0.2.3", "@lightprotocol/programs": "workspace:*", "@playwright/test": "^1.47.1", "@rollup/plugin-babel": "^6.0.4", "@rollup/plugin-commonjs": "^26.0.1", "@rollup/plugin-json": "^6.1.0", "@rollup/plugin-node-resolve": "^15.2.3", "@rollup/plugin-replace": "^5.0.7", "@rollup/plugin-terser": "^0.4.4", "@rollup/plugin-typescript": "^11.1.6", "@types/bn.js": "^5.1.5", "@types/node": "^22.5.5", "@typescript-eslint/eslint-plugin": "^7.13.1", "@typescript-eslint/parser": "^7.13.1", "eslint": "^8.56.0", "eslint-plugin-n": "^17.10.2", "eslint-plugin-promise": "^7.1.0", "eslint-plugin-vitest": "^0.5.4", "http-server": "^14.1.1", "playwright": "^1.47.1", "prettier": "^3.3.3", "rimraf": "^6.0.1", "rollup": "^4.21.3", "rollup-plugin-dts": "^6.1.1", "rollup-plugin-polyfill-node": "^0.13.0", "ts-node": "^10.9.2", "tslib": "^2.7.0", "typescript": "^5.6.2", "vitest": "^2.1.1" }, "scripts": { "test": "pnpm test:unit:all && pnpm test:e2e:all", "test-all": "vitest run", "test:unit:all": "vitest run tests/unit", "test-validator": "./../../cli/test_bin/run test-validator --prover-run-mode rpc", "test:e2e:transfer": "pnpm test-validator && vitest run tests/e2e/transfer.test.ts --reporter=verbose", "test:e2e:compress": "pnpm test-validator && vitest run tests/e2e/compress.test.ts --reporter=verbose", "test:e2e:test-rpc": "pnpm test-validator && vitest run tests/e2e/test-rpc.test.ts", "test:e2e:rpc-interop": "pnpm test-validator && vitest run tests/e2e/rpc-interop.test.ts", "test:e2e:browser": "pnpm playwright test", "test:e2e:all": "pnpm test-validator && vitest run tests/e2e/test-rpc.test.ts && vitest run tests/e2e/compress.test.ts && vitest run tests/e2e/transfer.test.ts && vitest run tests/e2e/rpc-interop.test.ts", "test:index": "vitest run tests/e2e/program.test.ts", "test:e2e:serde": "vitest run tests/e2e/serde.test.ts", "test:verbose": "vitest run --reporter=verbose", "test:testnet": "vitest run tests/e2e/testnet.test.ts --reporter=verbose", "pull-idls": "../../scripts/push-stateless-js-idls.sh && ../../scripts/push-compressed-token-idl.sh", "build": "rimraf dist && pnpm pull-idls && pnpm build:bundle", "build:bundle": "rollup -c", "format": "prettier --write .", "lint": "eslint ." }, "nx": { "targets": { "build": { "inputs": [ "{workspaceRoot}/cli", "{workspaceRoot}/target/idl", "{workspaceRoot}/target/types" ] } } } }
0
solana_public_repos/Lightprotocol/light-protocol/js
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/.prettierrc
{ "semi": true, "trailingComma": "all", "singleQuote": true, "printWidth": 80, "useTabs": false, "tabWidth": 4, "bracketSpacing": true, "arrowParens": "avoid" }
0
solana_public_repos/Lightprotocol/light-protocol/js
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tsconfig.json
{ "$schema": "https://json.schemastore.org/tsconfig", "compilerOptions": { "importHelpers": true, "outDir": "./dist", "esModuleInterop": true, "allowSyntheticDefaultImports": true, "strict": true, "declaration": true, "target": "ESNext", "module": "ESNext", "moduleResolution": "Node", "lib": ["ESNext", "DOM"], "types": ["node"], "skipLibCheck": false }, "include": ["./src/**/*.ts", "playwright.config.ts", "rollup.config.js"] }
0
solana_public_repos/Lightprotocol/light-protocol/js
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/.eslintignore
node_modules lib dist
0
solana_public_repos/Lightprotocol/light-protocol/js
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/playwright.config.ts
import { PlaywrightTestConfig } from '@playwright/test'; const config: PlaywrightTestConfig = { webServer: { command: 'npx http-server -p 4004 -c-1', port: 4004, cwd: '.', timeout: 15 * 1000, }, testDir: './tests/e2e/browser', }; export default config;
0
solana_public_repos/Lightprotocol/light-protocol/js
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/vitest.config.ts
import { defineConfig } from 'vitest/config'; export default defineConfig({ test: { include: ['tests/**/*.test.ts'], exclude: process.env.EXCLUDE_E2E ? ['tests/e2e/**'] : [], testTimeout: 30000, }, });
0
solana_public_repos/Lightprotocol/light-protocol/js
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/.eslintrc.json
{ "root": true, "ignorePatterns": ["node_modules", "lib"], "parser": "@typescript-eslint/parser", "parserOptions": { "project": ["./tsconfig.json", "./tsconfig.test.json"] }, "plugins": ["@typescript-eslint", "vitest"], "extends": [ "eslint:recommended", "plugin:@typescript-eslint/eslint-recommended", "plugin:@typescript-eslint/recommended" ], "rules": { "@typescript-eslint/ban-ts-comment": 0, "@typescript-eslint/no-explicit-any": 0, "@typescript-eslint/no-var-requires": 0, "@typescript-eslint/no-unused-vars": 0, "no-prototype-builtins": 0 } }
0
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/unit
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/unit/merkle-tree/merkle-tree.test.ts
import { IndexedArray } from '../../../src/test-helpers/merkle-tree/indexed-array'; import { beforeAll, describe, expect, it } from 'vitest'; import { IndexedElement } from '../../../src/test-helpers/merkle-tree/indexed-array'; import { HIGHEST_ADDRESS_PLUS_ONE } from '../../../src/constants'; import { bn } from '../../../src/state'; import { MerkleTree } from '../../../src/test-helpers/merkle-tree'; describe('MerkleTree', () => { let WasmFactory: any; const refIndexedMerkleTreeInitedRoot = [ 33, 133, 56, 184, 142, 166, 110, 161, 4, 140, 169, 247, 115, 33, 15, 181, 76, 89, 48, 126, 58, 86, 204, 81, 16, 121, 185, 77, 75, 152, 43, 15, ]; const refIndexedMerkleTreeRootWithOneAppend = [ 31, 159, 196, 171, 68, 16, 213, 28, 158, 200, 223, 91, 244, 193, 188, 162, 50, 68, 54, 244, 116, 44, 153, 65, 209, 9, 47, 98, 126, 89, 131, 158, ]; const refIndexedMerkleTreeRootWithTwoAppends = [ 1, 185, 99, 233, 59, 202, 51, 222, 224, 31, 119, 180, 76, 104, 72, 27, 152, 12, 236, 78, 81, 60, 87, 158, 237, 1, 176, 9, 155, 166, 108, 89, ]; const refIndexedMerkleTreeRootWithThreeAppends = [ 41, 143, 181, 2, 66, 117, 37, 226, 134, 212, 45, 95, 114, 60, 189, 18, 44, 155, 132, 148, 41, 54, 131, 106, 61, 120, 237, 168, 118, 198, 63, 116, ]; const refIndexedArrayElem0 = new IndexedElement(0, bn(0), 2); const refIndexedArrayElem1 = new IndexedElement( 1, HIGHEST_ADDRESS_PLUS_ONE, 0, ); const refIndexedArrayElem2 = new IndexedElement(2, bn(30), 1); describe('IndexedArray', () => { beforeAll(async () => { WasmFactory = (await import('@lightprotocol/hasher.rs')) .WasmFactory; }); it('should findLowElementIndex', () => { const indexedArray = new IndexedArray( [ refIndexedArrayElem0, refIndexedArrayElem1, refIndexedArrayElem2, ], 2, 1, ); expect(indexedArray.findLowElementIndex(bn(29))).toEqual(0); expect(() => indexedArray.findLowElementIndex(bn(30))).toThrow(); expect(indexedArray.findLowElementIndex(bn(31))).toEqual(2); }); it('should findLowElement', () => { const indexedArray = new IndexedArray( [ refIndexedArrayElem0, refIndexedArrayElem1, refIndexedArrayElem2, ], 2, 1, ); const [lowElement, nextValue] = indexedArray.findLowElement(bn(29)); expect(lowElement).toEqual(refIndexedArrayElem0); expect(nextValue).toEqual(bn(30)); expect(() => indexedArray.findLowElement(bn(30))).toThrow(); const [lowElement2, nextValue2] = indexedArray.findLowElement( bn(31), ); expect(lowElement2).toEqual(refIndexedArrayElem2); expect(nextValue2).toEqual(HIGHEST_ADDRESS_PLUS_ONE); }); it('should appendWithLowElementIndex', () => { const indexedArray = new IndexedArray( [ new IndexedElement(0, bn(0), 1), new IndexedElement(1, HIGHEST_ADDRESS_PLUS_ONE, 0), ], 1, 1, ); const newElement = indexedArray.appendWithLowElementIndex( 0, bn(30), ); expect(newElement.newElement).toEqual(refIndexedArrayElem2); expect(newElement.newLowElement).toEqual(refIndexedArrayElem0); expect(newElement.newElementNextValue).toEqual( HIGHEST_ADDRESS_PLUS_ONE, ); }); it('should append', () => { const indexedArray = new IndexedArray( [ new IndexedElement(0, bn(0), 1), new IndexedElement(1, HIGHEST_ADDRESS_PLUS_ONE, 0), ], 1, 1, ); const newElement = indexedArray.append(bn(30)); expect(newElement.newElement).toEqual(refIndexedArrayElem2); expect(newElement.newLowElement).toEqual(refIndexedArrayElem0); expect(newElement.newElementNextValue).toEqual( HIGHEST_ADDRESS_PLUS_ONE, ); }); it('should append 3 times and match merkle trees', async () => { const lightWasm = await WasmFactory.getInstance(); const indexedArray = IndexedArray.default(); indexedArray.init(); let hash0 = indexedArray.hashElement(lightWasm, 0); let hash1 = indexedArray.hashElement(lightWasm, 1); let leaves = [hash0, hash1].map(leaf => bn(leaf!).toString()); let tree = new MerkleTree(26, lightWasm, leaves); expect(tree.root()).toEqual( bn(refIndexedMerkleTreeInitedRoot).toString(), ); // 1st const newElement = indexedArray.append(bn(30)); expect(newElement.newElement).toEqual(refIndexedArrayElem2); expect(newElement.newLowElement).toEqual(refIndexedArrayElem0); expect(newElement.newElementNextValue).toEqual( HIGHEST_ADDRESS_PLUS_ONE, ); hash0 = indexedArray.hashElement(lightWasm, 0); hash1 = indexedArray.hashElement(lightWasm, 1); let hash2 = indexedArray.hashElement(lightWasm, 2); leaves = [hash0, hash1, hash2].map(leaf => bn(leaf!).toString()); tree = new MerkleTree(26, lightWasm, leaves); expect(tree.root()).toEqual( bn(refIndexedMerkleTreeRootWithOneAppend).toString(), ); // 2nd let refItems0 = new IndexedElement(0, bn(0), 2); let refItems1 = new IndexedElement(1, HIGHEST_ADDRESS_PLUS_ONE, 0); let refItems2 = new IndexedElement(2, bn(30), 3); let refItems3 = new IndexedElement(3, bn(42), 1); const newElement2 = indexedArray.append(bn(42)); expect(newElement2.newElement).toEqual(refItems3); expect(newElement2.newLowElement).toEqual(refItems2); expect(newElement2.newElementNextValue).toEqual( HIGHEST_ADDRESS_PLUS_ONE, ); expect(indexedArray.elements[0].equals(refItems0)).toBeTruthy(); expect(indexedArray.elements[1].equals(refItems1)).toBeTruthy(); expect(indexedArray.elements[2].equals(refItems2)).toBeTruthy(); expect(indexedArray.elements[3].equals(refItems3)).toBeTruthy(); hash0 = indexedArray.hashElement(lightWasm, 0); hash1 = indexedArray.hashElement(lightWasm, 1); hash2 = indexedArray.hashElement(lightWasm, 2); let hash3 = indexedArray.hashElement(lightWasm, 3); leaves = [hash0, hash1, hash2, hash3].map(leaf => bn(leaf!).toString(), ); tree = new MerkleTree(26, lightWasm, leaves); expect(tree.root()).toEqual( bn(refIndexedMerkleTreeRootWithTwoAppends).toString(), ); // 3rd refItems0 = new IndexedElement(0, bn(0), 4); refItems1 = new IndexedElement(1, HIGHEST_ADDRESS_PLUS_ONE, 0); refItems2 = new IndexedElement(2, bn(30), 3); refItems3 = new IndexedElement(3, bn(42), 1); const refItems4 = new IndexedElement(4, bn(12), 2); const newElement3 = indexedArray.append(bn(12)); expect(newElement3.newElement).toEqual(refItems4); expect(newElement3.newLowElement).toEqual(refItems0); expect(newElement3.newElementNextValue).toEqual(bn(30)); expect(indexedArray.elements[0].equals(refItems0)).toBeTruthy(); expect(indexedArray.elements[1].equals(refItems1)).toBeTruthy(); expect(indexedArray.elements[2].equals(refItems2)).toBeTruthy(); expect(indexedArray.elements[3].equals(refItems3)).toBeTruthy(); expect(indexedArray.elements[4].equals(refItems4)).toBeTruthy(); hash0 = indexedArray.hashElement(lightWasm, 0); hash1 = indexedArray.hashElement(lightWasm, 1); hash2 = indexedArray.hashElement(lightWasm, 2); hash3 = indexedArray.hashElement(lightWasm, 3); const hash4 = indexedArray.hashElement(lightWasm, 4); leaves = [hash0, hash1, hash2, hash3, hash4].map(leaf => bn(leaf!).toString(), ); tree = new MerkleTree(26, lightWasm, leaves); expect(tree.root()).toEqual( bn(refIndexedMerkleTreeRootWithThreeAppends).toString(), ); }); }); });
0
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/unit
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/unit/utils/conversion.test.ts
import { describe, it, expect } from 'vitest'; import { toArray } from '../../../src/utils/conversion'; import { calculateComputeUnitPrice } from '../../../src/utils'; describe('toArray', () => { it('should return same array if array is passed', () => { const arr = [1, 2, 3]; expect(toArray(arr)).toBe(arr); }); it('should wrap non-array in array', () => { const value = 42; expect(toArray(value)).toEqual([42]); }); describe('calculateComputeUnitPrice', () => { it('calculates correct price', () => { expect(calculateComputeUnitPrice(1000, 200000)).toBe(5000); // 1000 lamports / 200k CU = 5000 microlamports/CU expect(calculateComputeUnitPrice(100, 50000)).toBe(2000); // 100 lamports / 50k CU = 2000 microlamports/CU expect(calculateComputeUnitPrice(1, 1000000)).toBe(1); // 1 lamport / 1M CU = 1 microlamport/CU }); }); });
0
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/unit
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/unit/utils/validation.test.ts
import { describe, it, expect } from 'vitest'; import { validateSufficientBalance } from '../../../src/utils/validation'; import { bn } from '../../../src/state'; describe('validateSufficientBalance', () => { it('should not throw error for positive balance', () => { expect(() => validateSufficientBalance(bn(100))).not.toThrow(); }); it('should not throw error for zero balance', () => { expect(() => validateSufficientBalance(bn(0))).not.toThrow(); }); it('should throw error for negative balance', () => { expect(() => validateSufficientBalance(bn(-1))).toThrow(); }); });
0
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/unit
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/unit/state/compressed-account.test.ts
import { describe, it, expect } from 'vitest'; import { createCompressedAccount, createCompressedAccountWithMerkleContext, createMerkleContext, } from '../../../src/state/compressed-account'; import { PublicKey } from '@solana/web3.js'; import { bn } from '../../../src/state'; describe('createCompressedAccount function', () => { it('should create a compressed account with default values', () => { const owner = PublicKey.unique(); const account = createCompressedAccount(owner); expect(account).toEqual({ owner, lamports: bn(0), address: null, data: null, }); }); it('should create a compressed account with provided values', () => { const owner = PublicKey.unique(); const lamports = bn(100); const data = { discriminator: [0], data: Buffer.from(new Uint8Array([1, 2, 3])), dataHash: [0], }; const address = Array.from(PublicKey.unique().toBytes()); const account = createCompressedAccount(owner, lamports, data, address); expect(account).toEqual({ owner, lamports, address, data, }); }); }); describe('createCompressedAccountWithMerkleContext function', () => { it('should create a compressed account with merkle context', () => { const owner = PublicKey.unique(); const merkleTree = PublicKey.unique(); const nullifierQueue = PublicKey.unique(); const hash = new Array(32).fill(1); const leafIndex = 0; const merkleContext = createMerkleContext( merkleTree, nullifierQueue, hash, leafIndex, ); const accountWithMerkleContext = createCompressedAccountWithMerkleContext(merkleContext, owner); expect(accountWithMerkleContext).toEqual({ owner, lamports: bn(0), address: null, data: null, merkleTree, nullifierQueue, hash, leafIndex, readOnly: false, }); }); }); describe('createMerkleContext function', () => { it('should create a merkle context', () => { const merkleTree = PublicKey.unique(); const nullifierQueue = PublicKey.unique(); const hash = new Array(32).fill(1); const leafIndex = 0; const merkleContext = createMerkleContext( merkleTree, nullifierQueue, hash, leafIndex, ); expect(merkleContext).toEqual({ merkleTree, nullifierQueue, hash, leafIndex, }); }); });
0
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/unit
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/unit/state/bn254.test.ts
import { describe, it, expect } from 'vitest'; import { createBN254, encodeBN254toBase58 } from '../../../src/state/BN254'; import { bn } from '../../../src/state'; import { PublicKey } from '@solana/web3.js'; import { FIELD_SIZE } from '../../../src/constants'; describe('createBN254 function', () => { it('should create a BN254 from a string', () => { const bigint = createBN254('100'); expect(bigint.toNumber()).toBe(100); }); it('should create a BN254 from a number', () => { const bigint = createBN254(100); expect(bigint.toNumber()).toBe(100); }); it('should create a BN254 from a bigint', () => { const bigint = createBN254(bn(100)); expect(bigint.toNumber()).toBe(100); }); it('should create a BN254 from a Buffer', () => { const bigint = createBN254(Buffer.from([100])); expect(bigint.toNumber()).toBe(100); }); it('should create a BN254 from a Uint8Array', () => { const bigint = createBN254(new Uint8Array([100])); expect(bigint.toNumber()).toBe(100); }); it('should create a BN254 from a number[]', () => { const bigint = createBN254([100]); expect(bigint.toNumber()).toBe(100); }); it('should create a BN254 from a base58 string', () => { const bigint = createBN254('2j', 'base58'); expect(bigint.toNumber()).toBe(bn(100).toNumber()); }); }); describe('encodeBN254toBase58 function', () => { it('should convert a BN254 to a base58 string, pad to 32 implicitly', () => { const bigint = createBN254('100'); const base58 = encodeBN254toBase58(bigint); expect(base58).toBe('11111111111111111111111111111112j'); }); it('should match transformation via pubkey', () => { const refHash = [ 13, 225, 248, 105, 237, 121, 108, 70, 70, 197, 240, 130, 226, 236, 129, 58, 213, 50, 236, 99, 216, 99, 91, 201, 141, 76, 196, 33, 41, 181, 236, 187, ]; const base58 = encodeBN254toBase58(bn(refHash)); const pubkeyConv = new PublicKey(refHash).toBase58(); expect(base58).toBe(pubkeyConv); }); it('should pad to 32 bytes converting BN to Pubkey', () => { const refHash31 = [ 13, 225, 248, 105, 237, 121, 108, 70, 70, 197, 240, 130, 226, 236, 129, 58, 213, 50, 236, 99, 216, 99, 91, 201, 141, 76, 196, 33, 41, 181, 236, ]; const base58 = encodeBN254toBase58(bn(refHash31)); expect( createBN254(base58, 'base58').toArray('be', 32), ).to.be.deep.equal([0].concat(refHash31)); }); it('should throw an error for a value that is too large', () => { expect(() => createBN254(FIELD_SIZE)).toThrow( 'Value is too large. Max <254 bits', ); }); });
0
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/unit
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/unit/instruction/pack-compressed-accounts.test.ts
import { describe, expect, it } from 'vitest'; import { PublicKey } from '@solana/web3.js'; import { padOutputStateMerkleTrees } from '../../../src/instruction/pack-compressed-accounts'; describe('padOutputStateMerkleTrees', () => { const treeA: any = PublicKey.unique(); const treeB: any = PublicKey.unique(); const treeC: any = PublicKey.unique(); const accA: any = { merkleTree: treeA }; const accB: any = { merkleTree: treeB }; const accC: any = { merkleTree: treeC }; it('should use the 0th state tree of input state if no output state trees are provided', () => { const result = padOutputStateMerkleTrees(undefined, 3, [accA, accB]); expect(result).toEqual([treeA, treeA, treeA]); }); it('should fill up with the first state tree if provided trees are less than required', () => { const result = padOutputStateMerkleTrees([treeA, treeB], 5, []); expect(result).toEqual([treeA, treeB, treeA, treeA, treeA]); }); it('should remove extra trees if the number of output state trees is greater than the number of output accounts', () => { const result = padOutputStateMerkleTrees([treeA, treeB, treeC], 2, []); expect(result).toEqual([treeA, treeB]); }); it('should return the same outputStateMerkleTrees if its length equals the number of output compressed accounts', () => { const result = padOutputStateMerkleTrees([treeA, treeB, treeC], 3, []); expect(result).toEqual([treeA, treeB, treeC]); }); });
0
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/e2e/transfer.test.ts
import { describe, it, assert, beforeAll } from 'vitest'; import { Signer } from '@solana/web3.js'; import { newAccountWithLamports } from '../../src/utils/test-utils'; import { Rpc } from '../../src/rpc'; import { bn, compress } from '../../src'; import { transfer } from '../../src/actions/transfer'; import { getTestRpc } from '../../src/test-helpers/test-rpc'; import { WasmFactory } from '@lightprotocol/hasher.rs'; describe('transfer', () => { let rpc: Rpc; let payer: Signer; let bob: Signer; beforeAll(async () => { const lightWasm = await WasmFactory.getInstance(); rpc = await getTestRpc(lightWasm); payer = await newAccountWithLamports(rpc, 2e9, 256); bob = await newAccountWithLamports(rpc, 2e9, 256); await compress(rpc, payer, 1e9, payer.publicKey); }); const numberOfTransfers = 10; it(`should send compressed lamports alice -> bob for ${numberOfTransfers} transfers in a loop`, async () => { const transferAmount = 1000; for (let i = 0; i < numberOfTransfers; i++) { const preSenderBalance = ( await rpc.getCompressedAccountsByOwner(payer.publicKey) ).items.reduce((acc, account) => acc.add(account.lamports), bn(0)); const preReceiverBalance = ( await rpc.getCompressedAccountsByOwner(bob.publicKey) ).items.reduce((acc, account) => acc.add(account.lamports), bn(0)); await transfer(rpc, payer, transferAmount, payer, bob.publicKey); const postSenderAccs = await rpc.getCompressedAccountsByOwner( payer.publicKey, ); const postReceiverAccs = await rpc.getCompressedAccountsByOwner( bob.publicKey, ); const postSenderBalance = postSenderAccs.items.reduce( (acc, account) => acc.add(account.lamports), bn(0), ); const postReceiverBalance = postReceiverAccs.items.reduce( (acc, account) => acc.add(account.lamports), bn(0), ); assert( postSenderBalance.sub(preSenderBalance).eq(bn(-transferAmount)), `Iteration ${i + 1}: Sender balance should decrease by ${transferAmount}`, ); assert( postReceiverBalance .sub(preReceiverBalance) .eq(bn(transferAmount)), `Iteration ${i + 1}: Receiver balance should increase by ${transferAmount}`, ); } }); });
0
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/e2e/serde.test.ts
import { describe, it, expect } from 'vitest'; import { LightSystemProgram } from '../../src/programs'; import { CompressedAccount, PublicTransactionEvent, bn, useWallet, } from '../../src'; import { Connection, Keypair, PublicKey } from '@solana/web3.js'; import { AnchorProvider, Program, setProvider } from '@coral-xyz/anchor'; import { IDL } from '../../src/idls/account_compression'; describe('account compression program', () => { it('instantiate using IDL', async () => { const mockKeypair = Keypair.generate(); const mockConnection = new Connection( 'http://127.0.0.1:8899', 'confirmed', ); const mockProvider = new AnchorProvider( mockConnection, useWallet(mockKeypair), { commitment: 'confirmed', preflightCommitment: 'confirmed', }, ); setProvider(mockProvider); const program = new Program( IDL, new PublicKey('5QPEJ5zDsVou9FQS3KCauKswM3VwBEBu4dpL9xTqkWwN'), mockProvider, ); expect(program).toBeDefined(); }); }); describe('serde', () => { it('decode output compressed account ', async () => { const compressedAccount = [ 88, 8, 48, 185, 124, 227, 14, 195, 230, 152, 61, 39, 56, 191, 13, 126, 54, 43, 47, 131, 175, 16, 52, 167, 129, 174, 200, 118, 174, 9, 254, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]; const deserializedCompressedAccount: CompressedAccount = LightSystemProgram.program.coder.types.decode( 'CompressedAccount', Buffer.from(compressedAccount), ); expect(deserializedCompressedAccount.data).toBe(null); expect(deserializedCompressedAccount.address).toBe(null); expect(deserializedCompressedAccount.lamports.eq(bn(0))).toBe(true); }); it('decode event ', async () => { const data = [ 0, 0, 0, 0, 1, 0, 0, 0, 33, 32, 204, 221, 5, 83, 170, 139, 228, 191, 81, 173, 10, 116, 229, 191, 155, 209, 23, 164, 28, 64, 188, 34, 248, 127, 110, 97, 26, 188, 139, 164, 0, 0, 0, 0, 1, 0, 0, 0, 22, 143, 135, 215, 254, 121, 58, 95, 241, 202, 91, 53, 255, 47, 224, 255, 67, 218, 48, 172, 51, 208, 29, 102, 177, 187, 207, 73, 108, 18, 59, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 68, 77, 125, 32, 76, 128, 61, 180, 1, 207, 69, 44, 121, 118, 153, 17, 179, 183, 115, 34, 163, 127, 102, 214, 1, 87, 175, 177, 95, 49, 65, 69, 0, ]; const event: PublicTransactionEvent = LightSystemProgram.program.coder.types.decode( 'PublicTransactionEvent', Buffer.from(data), ); const refOutputCompressedAccountHash = [ 33, 32, 204, 221, 5, 83, 170, 139, 228, 191, 81, 173, 10, 116, 229, 191, 155, 209, 23, 164, 28, 64, 188, 34, 248, 127, 110, 97, 26, 188, 139, 164, ]; expect( bn(event.outputCompressedAccountHashes[0]).eq( bn(refOutputCompressedAccountHash), ), ).toBe(true); }); });
0
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/e2e/test-rpc.test.ts
import { describe, it, assert, beforeAll, expect } from 'vitest'; import { Signer } from '@solana/web3.js'; import { STATE_MERKLE_TREE_NETWORK_FEE, STATE_MERKLE_TREE_ROLLOVER_FEE, defaultTestStateTreeAccounts, } from '../../src/constants'; import { newAccountWithLamports } from '../../src/utils/test-utils'; import { compress, decompress, transfer } from '../../src/actions'; import { bn, CompressedAccountWithMerkleContext } from '../../src/state'; import { getTestRpc, TestRpc } from '../../src/test-helpers/test-rpc'; import { WasmFactory } from '@lightprotocol/hasher.rs'; /// TODO: add test case for payer != address describe('test-rpc', () => { const { merkleTree } = defaultTestStateTreeAccounts(); let rpc: TestRpc; let payer: Signer; let preCompressBalance: number; let postCompressBalance: number; let compressLamportsAmount: number; let compressedTestAccount: CompressedAccountWithMerkleContext; let refPayer: Signer; const refCompressLamports = 1e7; beforeAll(async () => { const lightWasm = await WasmFactory.getInstance(); rpc = await getTestRpc(lightWasm); refPayer = await newAccountWithLamports(rpc, 1e9, 200); payer = await newAccountWithLamports(rpc, 1e9, 148); /// compress refPayer await compress( rpc, refPayer, refCompressLamports, refPayer.publicKey, merkleTree, ); /// compress compressLamportsAmount = 1e7; preCompressBalance = await rpc.getBalance(payer.publicKey); await compress( rpc, payer, compressLamportsAmount, payer.publicKey, merkleTree, ); }); it('getCompressedAccountsByOwner', async () => { const compressedAccounts = await rpc.getCompressedAccountsByOwner( payer.publicKey, ); compressedTestAccount = compressedAccounts.items[0]; assert.equal(compressedAccounts.items.length, 1); assert.equal( Number(compressedTestAccount.lamports), compressLamportsAmount, ); assert.equal( compressedTestAccount.owner.toBase58(), payer.publicKey.toBase58(), ); assert.equal(compressedTestAccount.data?.data, null); postCompressBalance = await rpc.getBalance(payer.publicKey); assert.equal( postCompressBalance, preCompressBalance - compressLamportsAmount - 5000 - STATE_MERKLE_TREE_ROLLOVER_FEE.toNumber(), ); }); it('getCompressedAccountProof for refPayer', async () => { const compressedAccounts = await rpc.getCompressedAccountsByOwner( payer.publicKey, ); const refHash = compressedAccounts.items[0].hash; const compressedAccountProof = await rpc.getCompressedAccountProof( bn(refHash), ); const proof = compressedAccountProof.merkleProof.map(x => x.toString()); expect(proof.length).toStrictEqual(26); expect(compressedAccountProof.hash).toStrictEqual(refHash); expect(compressedAccountProof.leafIndex).toStrictEqual( compressedAccounts.items[0].leafIndex, ); expect(compressedAccountProof.rootIndex).toStrictEqual(2); preCompressBalance = await rpc.getBalance(payer.publicKey); await transfer( rpc, payer, compressLamportsAmount, payer, payer.publicKey, merkleTree, ); const compressedAccounts1 = await rpc.getCompressedAccountsByOwner( payer.publicKey, ); expect(compressedAccounts1.items.length).toStrictEqual(1); postCompressBalance = await rpc.getBalance(payer.publicKey); assert.equal( postCompressBalance, preCompressBalance - 5000 - STATE_MERKLE_TREE_ROLLOVER_FEE.toNumber() - STATE_MERKLE_TREE_NETWORK_FEE.toNumber(), ); await compress(rpc, payer, compressLamportsAmount, payer.publicKey); const compressedAccounts2 = await rpc.getCompressedAccountsByOwner( payer.publicKey, ); expect(compressedAccounts2.items.length).toStrictEqual(2); }); it('getCompressedAccountProof: get many valid proofs (10)', async () => { for (let lamports = 1; lamports <= 10; lamports++) { await decompress(rpc, payer, lamports, payer.publicKey); } }); it('getIndexerHealth', async () => { /// getHealth const health = await rpc.getIndexerHealth(); assert.strictEqual(health, 'ok'); }); it('getIndexerSlot / getSlot', async () => { const slot = await rpc.getIndexerSlot(); const slotWeb3 = await rpc.getSlot(); assert(slot > 0); assert(slotWeb3 > 0); }); it('getCompressedAccount', async () => { const compressedAccounts = await rpc.getCompressedAccountsByOwner( payer.publicKey, ); const refHash = compressedAccounts.items[0].hash; /// getCompressedAccount const compressedAccount = await rpc.getCompressedAccount( undefined, bn(refHash), ); assert(compressedAccount !== null); assert.equal( compressedAccount.owner.toBase58(), payer.publicKey.toBase58(), ); assert.equal(compressedAccount.data, null); }); it('getCompressedBalance', async () => { const compressedAccounts = await rpc.getCompressedAccountsByOwner( refPayer.publicKey, ); const refHash = compressedAccounts.items[0].hash; /// getCompressedBalance await expect(rpc.getCompressedBalance(bn(refHash))).rejects.toThrow( 'address is not supported in test-rpc', ); const compressedBalance = await rpc.getCompressedBalance( undefined, bn(refHash), ); expect(compressedBalance?.eq(bn(refCompressLamports))).toBeTruthy(); }); });
0
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/e2e/compress.test.ts
import { describe, it, assert, beforeAll, expect } from 'vitest'; import { Signer } from '@solana/web3.js'; import { STATE_MERKLE_TREE_NETWORK_FEE, ADDRESS_QUEUE_ROLLOVER_FEE, STATE_MERKLE_TREE_ROLLOVER_FEE, defaultTestStateTreeAccounts, ADDRESS_TREE_NETWORK_FEE, } from '../../src/constants'; import { newAccountWithLamports } from '../../src/utils/test-utils'; import { Rpc } from '../../src/rpc'; import { LightSystemProgram, bn, compress, createAccount, createAccountWithLamports, decompress, } from '../../src'; import { TestRpc, getTestRpc } from '../../src/test-helpers/test-rpc'; import { WasmFactory } from '@lightprotocol/hasher.rs'; /// TODO: make available to developers via utils function txFees( txs: { in: number; out: number; addr?: number; base?: number; }[], ): number { let totalFee = bn(0); txs.forEach(tx => { const solanaBaseFee = tx.base === 0 ? bn(0) : bn(tx.base || 5000); /// Fee per output const stateOutFee = STATE_MERKLE_TREE_ROLLOVER_FEE.mul(bn(tx.out)); /// Fee per new address created const addrFee = tx.addr ? ADDRESS_QUEUE_ROLLOVER_FEE.mul(bn(tx.addr)) : bn(0); /// Fee if the tx nullifies at least one input account const networkInFee = tx.in ? STATE_MERKLE_TREE_NETWORK_FEE : bn(0); /// Fee if the tx creates at least one address const networkAddressFee = tx.addr ? ADDRESS_TREE_NETWORK_FEE : bn(0); totalFee = totalFee.add( solanaBaseFee .add(stateOutFee) .add(addrFee) .add(networkInFee) .add(networkAddressFee), ); }); return totalFee.toNumber(); } /// TODO: add test case for payer != address describe('compress', () => { const { merkleTree } = defaultTestStateTreeAccounts(); let rpc: Rpc; let payer: Signer; beforeAll(async () => { const lightWasm = await WasmFactory.getInstance(); rpc = await getTestRpc(lightWasm); payer = await newAccountWithLamports(rpc, 1e9, 256); }); it('should create account with address', async () => { const preCreateAccountsBalance = await rpc.getBalance(payer.publicKey); await createAccount( rpc as TestRpc, payer, [ new Uint8Array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, ]), ], LightSystemProgram.programId, ); await createAccountWithLamports( rpc as TestRpc, payer, [ new Uint8Array([ 1, 2, 255, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, ]), ], 0, LightSystemProgram.programId, ); await createAccount( rpc as TestRpc, payer, [ new Uint8Array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 1, ]), ], LightSystemProgram.programId, ); await createAccount( rpc as TestRpc, payer, [ new Uint8Array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 2, ]), ], LightSystemProgram.programId, ); await expect( createAccount( rpc as TestRpc, payer, [ new Uint8Array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 2, ]), ], LightSystemProgram.programId, ), ).rejects.toThrow(); const postCreateAccountsBalance = await rpc.getBalance(payer.publicKey); assert.equal( postCreateAccountsBalance, preCreateAccountsBalance - txFees([ { in: 0, out: 1, addr: 1 }, { in: 0, out: 1, addr: 1 }, { in: 0, out: 1, addr: 1 }, { in: 0, out: 1, addr: 1 }, ]), ); }); it('should compress lamports and create an account with address and lamports', async () => { payer = await newAccountWithLamports(rpc, 1e9, 256); const compressLamportsAmount = 1e7; const preCompressBalance = await rpc.getBalance(payer.publicKey); assert.equal(preCompressBalance, 1e9); await compress(rpc, payer, compressLamportsAmount, payer.publicKey); const compressedAccounts = await rpc.getCompressedAccountsByOwner( payer.publicKey, ); assert.equal(compressedAccounts.items.length, 1); assert.equal( Number(compressedAccounts.items[0].lamports), compressLamportsAmount, ); assert.equal(compressedAccounts.items[0].data, null); const postCompressBalance = await rpc.getBalance(payer.publicKey); assert.equal( postCompressBalance, preCompressBalance - compressLamportsAmount - txFees([{ in: 0, out: 1 }]), ); await createAccountWithLamports( rpc as TestRpc, payer, [ new Uint8Array([ 1, 255, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, ]), ], 100, LightSystemProgram.programId, ); const postCreateAccountBalance = await rpc.getBalance(payer.publicKey); assert.equal( postCreateAccountBalance, postCompressBalance - txFees([{ in: 1, out: 2, addr: 1 }]), ); }); it('should compress lamports and create an account with address and lamports', async () => { payer = await newAccountWithLamports(rpc, 1e9, 256); const compressLamportsAmount = 1e7; const preCompressBalance = await rpc.getBalance(payer.publicKey); assert.equal(preCompressBalance, 1e9); await compress( rpc, payer, compressLamportsAmount, payer.publicKey, merkleTree, ); const compressedAccounts = await rpc.getCompressedAccountsByOwner( payer.publicKey, ); assert.equal(compressedAccounts.items.length, 1); assert.equal( Number(compressedAccounts.items[0].lamports), compressLamportsAmount, ); assert.equal(compressedAccounts.items[0].data, null); const postCompressBalance = await rpc.getBalance(payer.publicKey); assert.equal( postCompressBalance, preCompressBalance - compressLamportsAmount - txFees([{ in: 0, out: 1 }]), ); /// Decompress const decompressLamportsAmount = 1e6; const decompressRecipient = payer.publicKey; await decompress( rpc, payer, decompressLamportsAmount, decompressRecipient, ); const compressedAccounts2 = await rpc.getCompressedAccountsByOwner( payer.publicKey, ); assert.equal(compressedAccounts2.items.length, 1); assert.equal( Number(compressedAccounts2.items[0].lamports), compressLamportsAmount - decompressLamportsAmount, ); await decompress(rpc, payer, 1, decompressRecipient, merkleTree); const postDecompressBalance = await rpc.getBalance(decompressRecipient); assert.equal( postDecompressBalance, postCompressBalance + decompressLamportsAmount + 1 - txFees([ { in: 1, out: 1 }, { in: 1, out: 1 }, ]), ); }); });
0
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/e2e/testnet.test.ts
import { describe, it, assert, beforeAll } from 'vitest'; import { Signer } from '@solana/web3.js'; import { newAccountWithLamports } from '../../src/utils/test-utils'; import { createRpc, Rpc } from '../../src/rpc'; import { bn, compress } from '../../src'; import { transfer } from '../../src/actions/transfer'; import { getTestRpc } from '../../src/test-helpers/test-rpc'; import { WasmFactory } from '@lightprotocol/hasher.rs'; describe('testnet transfer', () => { let rpc: Rpc; let payer: Signer; let bob: Signer; beforeAll(async () => { const validatorUrl = 'https://zk-testnet.helius.dev:8899'; const photonUrl = 'https://zk-testnet.helius.dev:8784'; const proverUrl = 'https://zk-testnet.helius.dev:3001'; rpc = createRpc(validatorUrl, photonUrl, proverUrl); payer = await newAccountWithLamports(rpc, 2e9, 256); bob = await newAccountWithLamports(rpc, 2e9, 256); await compress(rpc, payer, 1e9, payer.publicKey); }); const numberOfTransfers = 10; it(`should send compressed lamports alice -> bob for ${numberOfTransfers} transfers in a loop`, async () => { const transferAmount = 1000; for (let i = 0; i < numberOfTransfers; i++) { const preSenderBalance = ( await rpc.getCompressedAccountsByOwner(payer.publicKey) ).items.reduce((acc, account) => acc.add(account.lamports), bn(0)); const preReceiverBalance = ( await rpc.getCompressedAccountsByOwner(bob.publicKey) ).items.reduce((acc, account) => acc.add(account.lamports), bn(0)); await transfer(rpc, payer, transferAmount, payer, bob.publicKey); const postSenderAccs = await rpc.getCompressedAccountsByOwner( payer.publicKey, ); const postReceiverAccs = await rpc.getCompressedAccountsByOwner( bob.publicKey, ); const postSenderBalance = postSenderAccs.items.reduce( (acc, account) => acc.add(account.lamports), bn(0), ); const postReceiverBalance = postReceiverAccs.items.reduce( (acc, account) => acc.add(account.lamports), bn(0), ); assert( postSenderBalance.sub(preSenderBalance).eq(bn(-transferAmount)), `Iteration ${i + 1}: Sender balance should decrease by ${transferAmount}`, ); assert( postReceiverBalance .sub(preReceiverBalance) .eq(bn(transferAmount)), `Iteration ${i + 1}: Receiver balance should increase by ${transferAmount}`, ); } }); });
0
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/e2e/rpc-interop.test.ts
import { describe, it, assert, beforeAll, expect } from 'vitest'; import { PublicKey, Signer } from '@solana/web3.js'; import { newAccountWithLamports } from '../../src/utils/test-utils'; import { Rpc, createRpc } from '../../src/rpc'; import { LightSystemProgram, bn, compress, createAccount, createAccountWithLamports, defaultTestStateTreeAccounts, deriveAddress, deriveAddressSeed, sleep, } from '../../src'; import { getTestRpc, TestRpc } from '../../src/test-helpers/test-rpc'; import { transfer } from '../../src/actions/transfer'; import { WasmFactory } from '@lightprotocol/hasher.rs'; import { randomBytes } from 'tweetnacl'; describe('rpc-interop', () => { let payer: Signer; let bob: Signer; let rpc: Rpc; let testRpc: TestRpc; let executedTxs = 0; beforeAll(async () => { const lightWasm = await WasmFactory.getInstance(); rpc = createRpc(); testRpc = await getTestRpc(lightWasm); /// These are constant test accounts in between test runs payer = await newAccountWithLamports(rpc, 10e9, 256); bob = await newAccountWithLamports(rpc, 10e9, 256); await compress(rpc, payer, 1e9, payer.publicKey); executedTxs++; }); const transferAmount = 1e4; const numberOfTransfers = 15; it('getCompressedAccountsByOwner [noforester] filter should work', async () => { let accs = await rpc.getCompressedAccountsByOwner(payer.publicKey, { filters: [ { memcmp: { offset: 1, bytes: '5Vf', }, }, ], }); assert.equal(accs.items.length, 0); accs = await rpc.getCompressedAccountsByOwner(payer.publicKey, { dataSlice: { offset: 1, length: 2 }, }); assert.equal(accs.items.length, 1); }); it('getValidityProof [noforester] (inclusion) should match', async () => { const senderAccounts = await rpc.getCompressedAccountsByOwner( payer.publicKey, ); const senderAccountsTest = await testRpc.getCompressedAccountsByOwner( payer.publicKey, ); const hash = bn(senderAccounts.items[0].hash); const hashTest = bn(senderAccountsTest.items[0].hash); // accounts are the same assert.isTrue(hash.eq(hashTest)); const validityProof = await rpc.getValidityProof([hash]); const validityProofTest = await testRpc.getValidityProof([hashTest]); validityProof.leafIndices.forEach((leafIndex, index) => { assert.equal(leafIndex, validityProofTest.leafIndices[index]); }); validityProof.leaves.forEach((leaf, index) => { assert.isTrue(leaf.eq(validityProofTest.leaves[index])); }); validityProof.roots.forEach((elem, index) => { assert.isTrue(elem.eq(validityProofTest.roots[index])); }); validityProof.rootIndices.forEach((elem, index) => { assert.equal(elem, validityProofTest.rootIndices[index]); }); validityProof.merkleTrees.forEach((elem, index) => { assert.isTrue(elem.equals(validityProofTest.merkleTrees[index])); }); validityProof.nullifierQueues.forEach((elem, index) => { assert.isTrue( elem.equals(validityProofTest.nullifierQueues[index]), ); }); /// Executes a transfer using a 'validityProof' from Photon await transfer(rpc, payer, 1e5, payer, bob.publicKey); executedTxs++; /// Executes a transfer using a 'validityProof' directly from a prover. await transfer(testRpc, payer, 1e5, payer, bob.publicKey); executedTxs++; }); it('getValidityProof [noforester] (new-addresses) should match', async () => { const newAddressSeeds = [ new Uint8Array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 42, 42, 42, 14, 15, 16, 11, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, ]), ]; const newAddressSeed = deriveAddressSeed( newAddressSeeds, LightSystemProgram.programId, ); const newAddress = bn(deriveAddress(newAddressSeed).toBuffer()); /// consistent proof metadata for same address const validityProof = await rpc.getValidityProof([], [newAddress]); const validityProofTest = await testRpc.getValidityProof( [], [newAddress], ); validityProof.leafIndices.forEach((leafIndex, index) => { assert.equal(leafIndex, validityProofTest.leafIndices[index]); }); validityProof.leaves.forEach((leaf, index) => { assert.isTrue(leaf.eq(validityProofTest.leaves[index])); }); validityProof.roots.forEach((elem, index) => { assert.isTrue(elem.eq(validityProofTest.roots[index])); }); validityProof.rootIndices.forEach((elem, index) => { assert.equal(elem, validityProofTest.rootIndices[index]); }); validityProof.merkleTrees.forEach((elem, index) => { assert.isTrue(elem.equals(validityProofTest.merkleTrees[index])); }); validityProof.nullifierQueues.forEach((elem, index) => { assert.isTrue( elem.equals(validityProofTest.nullifierQueues[index]), ); }); /// Need a new unique address because the previous one has been created. const newAddressSeedsTest = [ new Uint8Array([ 2, 2, 3, 4, 5, 6, 7, 8, 9, 10, 42, 42, 42, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 32, 29, 30, 31, 32, ]), ]; /// Creates a compressed account with address using a (non-inclusion) /// 'validityProof' from Photon await createAccount( rpc, payer, newAddressSeedsTest, LightSystemProgram.programId, ); executedTxs++; /// Creates a compressed account with address using a (non-inclusion) /// 'validityProof' directly from a prover. await createAccount( testRpc, payer, newAddressSeeds, LightSystemProgram.programId, ); executedTxs++; }); it('getValidityProof [noforester] (combined) should match', async () => { const senderAccountsTest = await testRpc.getCompressedAccountsByOwner( payer.publicKey, ); // wait for photon to be in sync await sleep(3000); const senderAccounts = await rpc.getCompressedAccountsByOwner( payer.publicKey, ); const hashTest = bn(senderAccountsTest.items[0].hash); const hash = bn(senderAccounts.items[0].hash); // accounts are the same assert.isTrue(hash.eq(hashTest)); const newAddressSeeds = [ new Uint8Array([ 1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 42, 32, 42, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 32, 32, 27, 28, 29, 30, 31, 32, ]), ]; const newAddressSeed = deriveAddressSeed( newAddressSeeds, LightSystemProgram.programId, ); const newAddress = bn(deriveAddress(newAddressSeed).toBytes()); const validityProof = await rpc.getValidityProof([hash], [newAddress]); const validityProofTest = await testRpc.getValidityProof( [hashTest], [newAddress], ); // compressedAccountProofs should match const compressedAccountProof = ( await rpc.getMultipleCompressedAccountProofs([hash]) )[0]; const compressedAccountProofTest = ( await testRpc.getMultipleCompressedAccountProofs([hashTest]) )[0]; compressedAccountProof.merkleProof.forEach((proof, index) => { assert.isTrue( proof.eq(compressedAccountProofTest.merkleProof[index]), ); }); // newAddressProofs should match const newAddressProof = ( await rpc.getMultipleNewAddressProofs([newAddress]) )[0]; const newAddressProofTest = ( await testRpc.getMultipleNewAddressProofs([newAddress]) )[0]; assert.isTrue( newAddressProof.indexHashedIndexedElementLeaf.eq( newAddressProofTest.indexHashedIndexedElementLeaf, ), ); assert.isTrue( newAddressProof.leafHigherRangeValue.eq( newAddressProofTest.leafHigherRangeValue, ), ); assert.isTrue( newAddressProof.nextIndex.eq(newAddressProofTest.nextIndex), ); assert.isTrue( newAddressProof.leafLowerRangeValue.eq( newAddressProofTest.leafLowerRangeValue, ), ); assert.isTrue( newAddressProof.merkleTree.equals(newAddressProofTest.merkleTree), ); assert.isTrue( newAddressProof.nullifierQueue.equals( newAddressProofTest.nullifierQueue, ), ); assert.isTrue(newAddressProof.root.eq(newAddressProofTest.root)); assert.isTrue(newAddressProof.value.eq(newAddressProofTest.value)); // validity proof metadata should match validityProof.leafIndices.forEach((leafIndex, index) => { assert.equal(leafIndex, validityProofTest.leafIndices[index]); }); validityProof.leaves.forEach((leaf, index) => { assert.isTrue(leaf.eq(validityProofTest.leaves[index])); }); validityProof.roots.forEach((elem, index) => { assert.isTrue(elem.eq(validityProofTest.roots[index])); }); validityProof.rootIndices.forEach((elem, index) => { assert.equal(elem, validityProofTest.rootIndices[index]); }); validityProof.merkleTrees.forEach((elem, index) => { assert.isTrue(elem.equals(validityProofTest.merkleTrees[index])); }); validityProof.nullifierQueues.forEach((elem, index) => { assert.isTrue( elem.equals(validityProofTest.nullifierQueues[index]), 'Mismatch in nullifierQueues expected: ' + elem + ' got: ' + validityProofTest.nullifierQueues[index], ); }); /// Creates a compressed account with address and lamports using a /// (combined) 'validityProof' from Photon await createAccountWithLamports( rpc, payer, [ new Uint8Array([ 1, 2, 255, 4, 5, 6, 7, 8, 9, 10, 11, 111, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 32, 29, 30, 31, 32, ]), ], 0, LightSystemProgram.programId, ); executedTxs++; }); /// This assumes support for getMultipleNewAddressProofs in Photon. it('getMultipleNewAddressProofs [noforester] should match', async () => { const newAddress = bn( new Uint8Array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 42, 42, 42, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, ]), ); const newAddressProof = ( await rpc.getMultipleNewAddressProofs([newAddress]) )[0]; const newAddressProofTest = ( await testRpc.getMultipleNewAddressProofs([newAddress]) )[0]; assert.isTrue( newAddressProof.indexHashedIndexedElementLeaf.eq( newAddressProofTest.indexHashedIndexedElementLeaf, ), ); assert.isTrue( newAddressProof.leafHigherRangeValue.eq( newAddressProofTest.leafHigherRangeValue, ), `Mismatch in leafHigherRangeValue expected: ${newAddressProofTest.leafHigherRangeValue} got: ${newAddressProof.leafHigherRangeValue}`, ); assert.isTrue( newAddressProof.nextIndex.eq(newAddressProofTest.nextIndex), `Mismatch in leafHigherRangeValue expected: ${newAddressProofTest.nextIndex} got: ${newAddressProof.nextIndex}`, ); assert.isTrue( newAddressProof.leafLowerRangeValue.eq( newAddressProofTest.leafLowerRangeValue, ), ); assert.isTrue( newAddressProof.merkleTree.equals(newAddressProofTest.merkleTree), ); assert.isTrue( newAddressProof.nullifierQueue.equals( newAddressProofTest.nullifierQueue, ), `Mismatch in nullifierQueue expected: ${newAddressProofTest.nullifierQueue} got: ${newAddressProof.nullifierQueue}`, ); assert.isTrue(newAddressProof.root.eq(newAddressProofTest.root)); assert.isTrue(newAddressProof.value.eq(newAddressProofTest.value)); newAddressProof.merkleProofHashedIndexedElementLeaf.forEach( (elem, index) => { const expected = newAddressProofTest.merkleProofHashedIndexedElementLeaf[ index ]; assert.isTrue( elem.eq(expected), `Mismatch in merkleProofHashedIndexedElementLeaf expected: ${expected.toString()} got: ${elem.toString()}`, ); }, ); }); it('getMultipleCompressedAccountProofs in transfer loop should match', async () => { for (let round = 0; round < numberOfTransfers; round++) { const prePayerAccounts = await rpc.getCompressedAccountsByOwner( payer.publicKey, ); const preSenderBalance = prePayerAccounts.items.reduce( (acc, account) => acc.add(account.lamports), bn(0), ); const preReceiverAccounts = await rpc.getCompressedAccountsByOwner( bob.publicKey, ); const preReceiverBalance = preReceiverAccounts.items.reduce( (acc, account) => acc.add(account.lamports), bn(0), ); /// get reference proofs for sender const testProofs = await testRpc.getMultipleCompressedAccountProofs( prePayerAccounts.items.map(account => bn(account.hash)), ); /// get photon proofs for sender const proofs = await rpc.getMultipleCompressedAccountProofs( prePayerAccounts.items.map(account => bn(account.hash)), ); /// compare each proof by node and root assert.equal(testProofs.length, proofs.length); proofs.forEach((proof, index) => { proof.merkleProof.forEach((elem, elemIndex) => { assert.isTrue( bn(elem).eq( bn(testProofs[index].merkleProof[elemIndex]), ), ); }); }); assert.isTrue(bn(proofs[0].root).eq(bn(testProofs[0].root))); await transfer(rpc, payer, transferAmount, payer, bob.publicKey); executedTxs++; const postSenderAccs = await rpc.getCompressedAccountsByOwner( payer.publicKey, ); const postReceiverAccs = await rpc.getCompressedAccountsByOwner( bob.publicKey, ); const postSenderBalance = postSenderAccs.items.reduce( (acc, account) => acc.add(account.lamports), bn(0), ); const postReceiverBalance = postReceiverAccs.items.reduce( (acc, account) => acc.add(account.lamports), bn(0), ); assert( postSenderBalance.sub(preSenderBalance).eq(bn(-transferAmount)), `Iteration ${round + 1}: Sender balance should decrease by ${transferAmount}`, ); assert( postReceiverBalance .sub(preReceiverBalance) .eq(bn(transferAmount)), `Iteration ${round + 1}: Receiver balance should increase by ${transferAmount}`, ); } }); it('getCompressedAccountsByOwner should match', async () => { const senderAccounts = await rpc.getCompressedAccountsByOwner( payer.publicKey, ); const senderAccountsTest = await testRpc.getCompressedAccountsByOwner( payer.publicKey, ); assert.equal( senderAccounts.items.length, senderAccountsTest.items.length, ); senderAccounts.items.forEach((account, index) => { assert.equal( account.owner.toBase58(), senderAccountsTest.items[index].owner.toBase58(), ); assert.isTrue( account.lamports.eq(senderAccountsTest.items[index].lamports), ); }); const receiverAccounts = await rpc.getCompressedAccountsByOwner( bob.publicKey, ); const receiverAccountsTest = await testRpc.getCompressedAccountsByOwner( bob.publicKey, ); assert.equal( receiverAccounts.items.length, receiverAccountsTest.items.length, ); receiverAccounts.items.sort((a, b) => a.lamports.sub(b.lamports).toNumber(), ); receiverAccountsTest.items.sort((a, b) => a.lamports.sub(b.lamports).toNumber(), ); receiverAccounts.items.forEach((account, index) => { assert.equal( account.owner.toBase58(), receiverAccountsTest.items[index].owner.toBase58(), ); assert.isTrue( account.lamports.eq(receiverAccountsTest.items[index].lamports), ); }); }); it('getCompressedAccount should match ', async () => { const senderAccounts = await rpc.getCompressedAccountsByOwner( payer.publicKey, ); const compressedAccount = await rpc.getCompressedAccount( undefined, bn(senderAccounts.items[0].hash), ); const compressedAccountTest = await testRpc.getCompressedAccount( undefined, bn(senderAccounts.items[0].hash), ); assert.isTrue( compressedAccount!.lamports.eq(compressedAccountTest!.lamports), ); assert.isTrue( compressedAccount!.owner.equals(compressedAccountTest!.owner), ); assert.isNull(compressedAccount!.data); assert.isNull(compressedAccountTest!.data); }); it('getMultipleCompressedAccounts should match', async () => { await compress(rpc, payer, 1e9, payer.publicKey); executedTxs++; const senderAccounts = await rpc.getCompressedAccountsByOwner( payer.publicKey, ); const compressedAccounts = await rpc.getMultipleCompressedAccounts( senderAccounts.items.map(account => bn(account.hash)), ); const compressedAccountsTest = await testRpc.getMultipleCompressedAccounts( senderAccounts.items.map(account => bn(account.hash)), ); assert.equal(compressedAccounts.length, compressedAccountsTest.length); compressedAccounts.forEach((account, index) => { assert.isTrue( account.lamports.eq(compressedAccountsTest[index].lamports), ); assert.equal( account.owner.toBase58(), compressedAccountsTest[index].owner.toBase58(), ); assert.isNull(account.data); assert.isNull(compressedAccountsTest[index].data); }); }); it('[test-rpc missing] getCompressionSignaturesForAccount should match', async () => { const senderAccounts = await rpc.getCompressedAccountsByOwner( payer.publicKey, ); const signaturesUnspent = await rpc.getCompressionSignaturesForAccount( bn(senderAccounts.items[0].hash), ); /// most recent therefore unspent account assert.equal(signaturesUnspent.length, 1); /// Note: assumes largest-first selection mechanism const largestAccount = senderAccounts.items.reduce((acc, account) => account.lamports.gt(acc.lamports) ? account : acc, ); await transfer(rpc, payer, 1, payer, bob.publicKey); executedTxs++; const signaturesSpent = await rpc.getCompressionSignaturesForAccount( bn(largestAccount.hash), ); /// 1 spent account, so always 2 signatures. assert.equal(signaturesSpent.length, 2); }); it('[test-rpc missing] getSignaturesForOwner should match', async () => { const signatures = await rpc.getCompressionSignaturesForOwner( payer.publicKey, ); assert.equal(signatures.items.length, executedTxs); }); it('[test-rpc missing] getLatestNonVotingSignatures should match', async () => { const testEnvSetupTxs = 2; let signatures = (await rpc.getLatestNonVotingSignatures()).value.items; assert.isAtLeast(signatures.length, executedTxs + testEnvSetupTxs); signatures = (await rpc.getLatestNonVotingSignatures(2)).value.items; assert.equal(signatures.length, 2); }); it('[test-rpc missing] getLatestCompressionSignatures should match', async () => { const { items: signatures } = ( await rpc.getLatestCompressionSignatures() ).value; assert.isAtLeast(signatures.length, executedTxs); /// Shoudl return 1 using limit param const { items: signatures2, cursor } = ( await rpc.getLatestCompressionSignatures(undefined, 1) ).value; assert.equal(signatures2.length, 1); // wait for photon to be in sync await sleep(3000); const signatures3 = ( await rpc.getLatestCompressionSignatures(cursor!, 1) ).value.items; /// cursor should work assert.notEqual(signatures2[0].signature, signatures3[0].signature); }); it('[test-rpc missing] getCompressedTransaction should match', async () => { const signatures = await rpc.getCompressionSignaturesForOwner( payer.publicKey, ); const compressedTx = await rpc.getTransactionWithCompressionInfo( signatures.items[0].signature, ); /// is transfer assert.equal(compressedTx?.compressionInfo.closedAccounts.length, 1); assert.equal(compressedTx?.compressionInfo.openedAccounts.length, 2); }); it('[test-rpc missing] getCompressionSignaturesForAddress should work', async () => { const seeds = [new Uint8Array(randomBytes(32))]; const seed = deriveAddressSeed(seeds, LightSystemProgram.programId); const addressTree = defaultTestStateTreeAccounts().addressTree; const address = deriveAddress(seed, addressTree); await createAccount(rpc, payer, seeds, LightSystemProgram.programId); // fetch the owners latest account const accounts = await rpc.getCompressedAccountsByOwner( payer.publicKey, ); const latestAccount = accounts.items[0]; // assert the address was indexed assert.isTrue(new PublicKey(latestAccount.address!).equals(address)); const signaturesUnspent = await rpc.getCompressionSignaturesForAddress( new PublicKey(latestAccount.address!), ); /// most recent therefore unspent account assert.equal(signaturesUnspent.items.length, 1); }); it('getCompressedAccount with address param should work ', async () => { const seeds = [new Uint8Array(randomBytes(32))]; const seed = deriveAddressSeed(seeds, LightSystemProgram.programId); const addressTree = defaultTestStateTreeAccounts().addressTree; const addressQueue = defaultTestStateTreeAccounts().addressQueue; const address = deriveAddress(seed, addressTree); await createAccount( rpc, payer, seeds, LightSystemProgram.programId, addressTree, addressQueue, ); // fetch the owners latest account const accounts = await rpc.getCompressedAccountsByOwner( payer.publicKey, ); const latestAccount = accounts.items[0]; assert.isTrue(new PublicKey(latestAccount.address!).equals(address)); const compressedAccountByHash = await rpc.getCompressedAccount( undefined, bn(latestAccount.hash), ); const compressedAccountByAddress = await rpc.getCompressedAccount( bn(latestAccount.address!), undefined, ); await expect( testRpc.getCompressedAccount(bn(latestAccount.address!), undefined), ).rejects.toThrow(); assert.isTrue( bn(compressedAccountByHash!.address!).eq( bn(compressedAccountByAddress!.address!), ), ); }); });
0
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/e2e
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/e2e/browser/rpc.browser.spec.ts
import { test, expect } from '@playwright/test'; import { Rpc, bn, compress, createRpc, defaultTestStateTreeAccounts, newAccountWithLamports, } from '../../../src'; test.describe('RPC in browser', () => { const { merkleTree } = defaultTestStateTreeAccounts(); test.beforeAll(async ({ page }) => { try { const rpc = createRpc(); const payer = await newAccountWithLamports(rpc, 1000005000, 100); await page.goto( 'http://localhost:4004/tests/e2e/browser/test-page.html', ); await page.waitForFunction( () => (window as any).stateless !== undefined, ); await compress(rpc, payer, 1e9, payer.publicKey, merkleTree); } catch (error) { console.log('error: ', error); } }); test.only('getCompressedAccountsByOwner', async ({ page }) => { const result = await page.evaluate(async () => { // @ts-ignore const sdk = window.stateless; const rpc: Rpc = sdk.createRpc(); const payer = sdk.getTestKeypair(100); const compressedAccounts = await rpc.getCompressedAccountsByOwner( payer.publicKey, ); if (!compressedAccounts) throw new Error('No compressed accounts found'); return compressedAccounts; }); expect(result.length).toEqual(1); }); test('getCompressedAccount', async ({ page }) => { const result = await page.evaluate(async () => { //@ts-ignore const sdk = window.stateless; const rpc: Rpc = sdk.createRpc(); const payer = sdk.getTestKeypair(100); const compressedAccounts = await rpc.getCompressedAccountsByOwner( payer.publicKey, ); const hash = compressedAccounts[0].hash; //@ts-ignore const sdk2 = window.stateless; const rpc2: Rpc = sdk2.createRpc(); let account: any; try { account = await rpc2.getCompressedAccount(bn(hash)); } catch (error) { console.log('error: ', error); throw error; } if (!account) throw new Error('No compressed account found'); return { account, owner: payer.publicKey }; }); expect(result.account.owner.equals(result.owner)).toBeTruthy(); }); test('getMultipleCompressedAccounts', async ({ page }) => { const result = await page.evaluate(async () => { //@ts-ignore const sdk = window.stateless; const rpc: Rpc = sdk.createRpc(); const payer = sdk.getTestKeypair(100); const compressedAccounts = await rpc.getCompressedAccountsByOwner( payer.publicKey, ); const hashes = compressedAccounts.map(account => bn(account.hash)); const accounts = await rpc.getMultipleCompressedAccounts(hashes); if (!accounts || accounts.length === 0) throw new Error('No compressed accounts found'); return accounts; }); expect(result.length).toBeGreaterThan(0); }); // TODO: enable // test('getCompressedTokenAccountsByOwner', async ({ page }) => { // const result = await page.evaluate(async () => { // //@ts-ignore // const sdk = window.stateless; // const rpc = sdk.createRpc(); // const payer = sdk.getTestKeypair(100); // const compressedAccounts = await rpc.getCompressedAccountsByOwner( // payer.publicKey, // ); // const hash = compressedAccounts[0].hash; // const accounts = await rpc.getCompressedTokenAccountsByOwner(owner); // if (!accounts || accounts.length === 0) // throw new Error('No token accounts found'); // return accounts; // }); // assert.isTrue(result.length > 0); // }); test('getHealth', async ({ page }) => { const result = await page.evaluate(async () => { //@ts-ignore const sdk = window.stateless; const rpc: Rpc = sdk.createRpc(); const health = await rpc.getHealth(); if (!health) throw new Error('Health check failed'); return health; }); expect(result).toEqual('ok'); }); });
0
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/e2e
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/tests/e2e/browser/test-page.html
<!doctype html> <html lang="en"> <head> <meta charset="UTF-8" /> <title>Test Page</title> </head> <body> <!-- <script type="module" src="/dist/es/index.js"></script> --> <script type="module"> import * as stateless from '/dist/browser/index.js'; window.stateless = stateless; console.log('HTML stateless: ', stateless); </script> </body> </html>
0
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/errors.ts
// TODO: Clean up export enum UtxoErrorCode { NEGATIVE_LAMPORTS = 'NEGATIVE_LAMPORTS', NOT_U64 = 'NOT_U64', BLINDING_EXCEEDS_FIELD_SIZE = 'BLINDING_EXCEEDS_FIELD_SIZE', } export enum SelectInUtxosErrorCode { FAILED_TO_FIND_UTXO_COMBINATION = 'FAILED_TO_FIND_UTXO_COMBINATION', INVALID_NUMBER_OF_IN_UTXOS = 'INVALID_NUMBER_OF_IN_UTXOS', } export enum CreateUtxoErrorCode { OWNER_UNDEFINED = 'OWNER_UNDEFINED', INVALID_OUTPUT_UTXO_LENGTH = 'INVALID_OUTPUT_UTXO_LENGTH', UTXO_DATA_UNDEFINED = 'UTXO_DATA_UNDEFINED', } export enum RpcErrorCode { CONNECTION_UNDEFINED = 'CONNECTION_UNDEFINED', RPC_PUBKEY_UNDEFINED = 'RPC_PUBKEY_UNDEFINED', RPC_METHOD_NOT_IMPLEMENTED = 'RPC_METHOD_NOT_IMPLEMENTED', RPC_INVALID = 'RPC_INVALID', } export enum LookupTableErrorCode { LOOK_UP_TABLE_UNDEFINED = 'LOOK_UP_TABLE_UNDEFINED', LOOK_UP_TABLE_NOT_INITIALIZED = 'LOOK_UP_TABLE_NOT_INITIALIZED', } export enum HashErrorCode { NO_POSEIDON_HASHER_PROVIDED = 'NO_POSEIDON_HASHER_PROVIDED', } export enum ProofErrorCode { INVALID_PROOF = 'INVALID_PROOF', PROOF_INPUT_UNDEFINED = 'PROOF_INPUT_UNDEFINED', PROOF_GENERATION_FAILED = 'PROOF_GENERATION_FAILED', } export enum MerkleTreeErrorCode { MERKLE_TREE_NOT_INITIALIZED = 'MERKLE_TREE_NOT_INITIALIZED', SOL_MERKLE_TREE_UNDEFINED = 'SOL_MERKLE_TREE_UNDEFINED', MERKLE_TREE_UNDEFINED = 'MERKLE_TREE_UNDEFINED', INPUT_UTXO_NOT_INSERTED_IN_MERKLE_TREE = 'INPUT_UTXO_NOT_INSERTED_IN_MERKLE_TREE', MERKLE_TREE_INDEX_UNDEFINED = 'MERKLE_TREE_INDEX_UNDEFINED', MERKLE_TREE_SET_SPACE_UNDEFINED = 'MERKLE_TREE_SET_SPACE_UNDEFINED', } export enum UtilsErrorCode { ACCOUNT_NAME_UNDEFINED_IN_IDL = 'ACCOUNT_NAME_UNDEFINED_IN_IDL', PROPERTY_UNDEFINED = 'PROPERTY_UNDEFINED', LOOK_UP_TABLE_CREATION_FAILED = 'LOOK_UP_TABLE_CREATION_FAILED', UNSUPPORTED_ARCHITECTURE = 'UNSUPPORTED_ARCHITECTURE', UNSUPPORTED_PLATFORM = 'UNSUPPORTED_PLATFORM', ACCOUNTS_UNDEFINED = 'ACCOUNTS_UNDEFINED', INVALID_NUMBER = 'INVALID_NUMBER', } class MetaError extends Error { code: string; functionName: string; codeMessage?: string; constructor(code: string, functionName: string, codeMessage?: string) { super(`${code}: ${codeMessage}`); this.code = code; this.functionName = functionName; this.codeMessage = codeMessage; } } export class UtxoError extends MetaError {} export class SelectInUtxosError extends MetaError {} export class CreateUtxoError extends MetaError {} export class RpcError extends MetaError {} export class LookupTableError extends MetaError {} export class HashError extends MetaError {} export class ProofError extends MetaError {} export class MerkleTreeError extends MetaError {} export class UtilsError extends MetaError {}
0
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js
solana_public_repos/Lightprotocol/light-protocol/js/stateless.js/src/rpc.ts
import { Connection, ConnectionConfig, SolanaJSONRPCError, PublicKey, } from '@solana/web3.js'; import { Buffer } from 'buffer'; import { BalanceResult, CompressedAccountResult, CompressedAccountsByOwnerResult, CompressedProofWithContext, CompressedTokenAccountsByOwnerOrDelegateResult, CompressedTransaction, CompressedTransactionResult, CompressionApiInterface, GetCompressedTokenAccountsByOwnerOrDelegateOptions, HealthResult, HexInputsForProver, MerkeProofResult, MultipleCompressedAccountsResult, NativeBalanceResult, ParsedTokenAccount, SignatureListResult, SignatureListWithCursorResult, SignatureWithMetadata, SlotResult, TokenBalanceListResult, jsonRpcResult, jsonRpcResultAndContext, ValidityProofResult, NewAddressProofResult, LatestNonVotingSignaturesResult, LatestNonVotingSignatures, LatestNonVotingSignaturesResultPaginated, LatestNonVotingSignaturesPaginated, WithContext, GetCompressedAccountsByOwnerConfig, WithCursor, AddressWithTree, HashWithTree, CompressedMintTokenHoldersResult, CompressedMintTokenHolders, TokenBalance, TokenBalanceListResultV2, PaginatedOptions, } from './rpc-interface'; import { MerkleContextWithMerkleProof, BN254, bn, CompressedAccountWithMerkleContext, encodeBN254toBase58, createCompressedAccountWithMerkleContext, createMerkleContext, TokenData, CompressedProof, } from './state'; import { array, create, nullable } from 'superstruct'; import { defaultTestStateTreeAccounts } from './constants'; import { BN } from '@coral-xyz/anchor'; import { toCamelCase, toHex } from './utils/conversion'; import { proofFromJsonStruct, negateAndCompressProof, } from './utils/parse-validity-proof'; /** @internal */ export function parseAccountData({ discriminator, data, dataHash, }: { discriminator: BN; data: string; dataHash: BN; }) { return { discriminator: discriminator.toArray('le', 8), data: Buffer.from(data, 'base64'), dataHash: dataHash.toArray('le', 32), }; } /** @internal */ async function getCompressedTokenAccountsByOwnerOrDelegate( rpc: Rpc, ownerOrDelegate: PublicKey, options: GetCompressedTokenAccountsByOwnerOrDelegateOptions, filterByDelegate: boolean = false, ): Promise<WithCursor<ParsedTokenAccount[]>> { const endpoint = filterByDelegate ? 'getCompressedTokenAccountsByDelegate' : 'getCompressedTokenAccountsByOwner'; const propertyToCheck = filterByDelegate ? 'delegate' : 'owner'; const unsafeRes = await rpcRequest(rpc.compressionApiEndpoint, endpoint, { [propertyToCheck]: ownerOrDelegate.toBase58(), mint: options.mint?.toBase58(), limit: options.limit?.toNumber(), cursor: options.cursor, }); const res = create( unsafeRes, jsonRpcResultAndContext(CompressedTokenAccountsByOwnerOrDelegateResult), ); if ('error' in res) { throw new SolanaJSONRPCError( res.error, `failed to get info for compressed accounts by ${propertyToCheck} ${ownerOrDelegate.toBase58()}`, ); } if (res.result.value === null) { throw new Error('not implemented: NULL result'); } const accounts: ParsedTokenAccount[] = []; res.result.value.items.map(item => { const _account = item.account; const _tokenData = item.tokenData; const compressedAccount: CompressedAccountWithMerkleContext = createCompressedAccountWithMerkleContext( createMerkleContext( _account.tree!, mockNullifierQueue, _account.hash.toArray('be', 32), _account.leafIndex, ), _account.owner, bn(_account.lamports), _account.data ? parseAccountData(_account.data) : undefined, _account.address || undefined, ); const parsed: TokenData = { mint: _tokenData.mint, owner: _tokenData.owner, amount: _tokenData.amount, delegate: _tokenData.delegate, state: ['uninitialized', 'initialized', 'frozen'].indexOf( _tokenData.state, ), tlv: null, }; if ( parsed[propertyToCheck]?.toBase58() !== ownerOrDelegate.toBase58() ) { throw new Error( `RPC returned token account with ${propertyToCheck} different from requested ${propertyToCheck}`, ); } accounts.push({ compressedAccount, parsed, }); }); /// TODO: consider custom or different sort. Most recent here. return { items: accounts.sort( (a, b) => b.compressedAccount.leafIndex - a.compressedAccount.leafIndex, ), cursor: res.result.value.cursor, }; } /** @internal */ function buildCompressedAccountWithMaybeTokenData( accountStructWithOptionalTokenData: any, ): { account: CompressedAccountWithMerkleContext; maybeTokenData: TokenData | null; } { const compressedAccountResult = accountStructWithOptionalTokenData.account; const tokenDataResult = accountStructWithOptionalTokenData.optionalTokenData; const compressedAccount: CompressedAccountWithMerkleContext = createCompressedAccountWithMerkleContext( createMerkleContext( compressedAccountResult.merkleTree, mockNullifierQueue, compressedAccountResult.hash.toArray('be', 32), compressedAccountResult.leafIndex, ), compressedAccountResult.owner, bn(compressedAccountResult.lamports), compressedAccountResult.data ? parseAccountData(compressedAccountResult.data) : undefined, compressedAccountResult.address || undefined, ); if (tokenDataResult === null) { return { account: compressedAccount, maybeTokenData: null }; } const parsed: TokenData = { mint: tokenDataResult.mint, owner: tokenDataResult.owner, amount: tokenDataResult.amount, delegate: tokenDataResult.delegate, state: ['uninitialized', 'initialized', 'frozen'].indexOf( tokenDataResult.state, ), tlv: null, }; return { account: compressedAccount, maybeTokenData: parsed }; } /** * Establish a Compression-compatible JSON RPC connection * * @param endpointOrWeb3JsConnection endpoint to the solana cluster or * Connection object * @param compressionApiEndpoint Endpoint to the compression server * @param proverEndpoint Endpoint to the prover server. defaults * to endpoint * @param connectionConfig Optional connection config */ export function createRpc( endpointOrWeb3JsConnection: string | Connection = 'http://127.0.0.1:8899', compressionApiEndpoint: string = 'http://127.0.0.1:8784', proverEndpoint: string = 'http://127.0.0.1:3001', config?: ConnectionConfig, ): Rpc { const endpoint = typeof endpointOrWeb3JsConnection === 'string' ? endpointOrWeb3JsConnection : endpointOrWeb3JsConnection.rpcEndpoint; return new Rpc(endpoint, compressionApiEndpoint, proverEndpoint, config); } /** @internal */ export const rpcRequest = async ( rpcEndpoint: string, method: string, params: any = [], convertToCamelCase = true, debug = false, ): Promise<any> => { const body = JSON.stringify({ jsonrpc: '2.0', id: 'test-account', method: method, params: params, }); if (debug) { const generateCurlSnippet = () => { const escapedBody = body.replace(/"/g, '\\"'); return `curl -X POST ${rpcEndpoint} \\ -H "Content-Type: application/json" \\ -d "${escapedBody}"`; }; console.log('Debug: Stack trace:'); console.log(new Error().stack); console.log('\nDebug: curl:'); console.log(generateCurlSnippet()); console.log('\n'); } const response = await fetch(rpcEndpoint, { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: body, }); if (!response.ok) { throw new Error(`HTTP error! status: ${response.status}`); } if (convertToCamelCase) { const res = await response.json(); return toCamelCase(res); } return await response.json(); }; /** @internal */ export const proverRequest = async ( proverEndpoint: string, method: 'inclusion' | 'new-address' | 'combined', params: any = [], log = false, ): Promise<CompressedProof> => { let logMsg: string = ''; if (log) { logMsg = `Proof generation for method:${method}`; console.time(logMsg); } let body; if (method === 'inclusion') { body = JSON.stringify({ 'input-compressed-accounts': params }); } else if (method === 'new-address') { body = JSON.stringify({ 'new-addresses': params }); } else if (method === 'combined') { body = JSON.stringify({ 'input-compressed-accounts': params[0], 'new-addresses': params[1], }); } const response = await fetch(`${proverEndpoint}/prove`, { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: body, }); if (!response.ok) { throw new Error(`Error fetching proof: ${response.statusText}`); } const data: any = await response.json(); const parsed = proofFromJsonStruct(data); const compressedProof = negateAndCompressProof(parsed); if (log) console.timeEnd(logMsg); return compressedProof; }; export type NonInclusionMerkleProofInputs = { root: BN; value: BN; leaf_lower_range_value: BN; leaf_higher_range_value: BN; nextIndex: BN; merkle_proof_hashed_indexed_element_leaf: BN[]; index_hashed_indexed_element_leaf: BN; }; export type MerkleContextWithNewAddressProof = { root: BN; rootIndex: number; value: BN; leafLowerRangeValue: BN; leafHigherRangeValue: BN; nextIndex: BN; merkleProofHashedIndexedElementLeaf: BN[]; indexHashedIndexedElementLeaf: BN; merkleTree: PublicKey; nullifierQueue: PublicKey; }; export type NonInclusionJsonStruct = { root: string; value: string; pathIndex: number; pathElements: string[]; leafLowerRangeValue: string; leafHigherRangeValue: string; nextIndex: number; }; export function convertMerkleProofsWithContextToHex( merkleProofsWithContext: MerkleContextWithMerkleProof[], ): HexInputsForProver[] { const inputs: HexInputsForProver[] = []; for (let i = 0; i < merkleProofsWithContext.length; i++) { const input: HexInputsForProver = { root: toHex(merkleProofsWithContext[i].root), pathIndex: merkleProofsWithContext[i].leafIndex, pathElements: merkleProofsWithContext[i].merkleProof.map(hex => toHex(hex), ), leaf: toHex(bn(merkleProofsWithContext[i].hash)), }; inputs.push(input); } return inputs; } export function convertNonInclusionMerkleProofInputsToHex( nonInclusionMerkleProofInputs: MerkleContextWithNewAddressProof[], ): NonInclusionJsonStruct[] { const inputs: NonInclusionJsonStruct[] = []; for (let i = 0; i < nonInclusionMerkleProofInputs.length; i++) { const input: NonInclusionJsonStruct = { root: toHex(nonInclusionMerkleProofInputs[i].root), value: toHex(nonInclusionMerkleProofInputs[i].value), pathIndex: nonInclusionMerkleProofInputs[ i ].indexHashedIndexedElementLeaf.toNumber(), pathElements: nonInclusionMerkleProofInputs[ i ].merkleProofHashedIndexedElementLeaf.map(hex => toHex(hex)), nextIndex: nonInclusionMerkleProofInputs[i].nextIndex.toNumber(), leafLowerRangeValue: toHex( nonInclusionMerkleProofInputs[i].leafLowerRangeValue, ), leafHigherRangeValue: toHex( nonInclusionMerkleProofInputs[i].leafHigherRangeValue, ), }; inputs.push(input); } return inputs; } /// TODO: replace with dynamic nullifierQueue const mockNullifierQueue = defaultTestStateTreeAccounts().nullifierQueue; const mockAddressQueue = defaultTestStateTreeAccounts().addressQueue; /** * */ export class Rpc extends Connection implements CompressionApiInterface { compressionApiEndpoint: string; proverEndpoint: string; /** * Establish a Compression-compatible JSON RPC connection * * @param endpoint Endpoint to the solana cluster * @param compressionApiEndpoint Endpoint to the compression server * @param proverEndpoint Endpoint to the prover server. * @param connectionConfig Optional connection config */ constructor( endpoint: string, compressionApiEndpoint: string, proverEndpoint: string, config?: ConnectionConfig, ) { super(endpoint, config || 'confirmed'); this.compressionApiEndpoint = compressionApiEndpoint; this.proverEndpoint = proverEndpoint; } /** * Fetch the compressed account for the specified account address or hash */ async getCompressedAccount( address?: BN254, hash?: BN254, ): Promise<CompressedAccountWithMerkleContext | null> { if (!hash && !address) { throw new Error('Either hash or address must be provided'); } if (hash && address) { throw new Error('Only one of hash or address must be provided'); } const unsafeRes = await rpcRequest( this.compressionApiEndpoint, 'getCompressedAccount', { hash: hash ? encodeBN254toBase58(hash) : undefined, address: address ? encodeBN254toBase58(address) : undefined, }, ); const res = create( unsafeRes, jsonRpcResultAndContext(nullable(CompressedAccountResult)), ); if ('error' in res) { throw new SolanaJSONRPCError( res.error, `failed to get info for compressed account ${hash ? hash.toString() : address ? address.toString() : ''}`, ); } if (res.result.value === null) { return null; } const item = res.result.value; const account = createCompressedAccountWithMerkleContext( createMerkleContext( item.tree!, mockNullifierQueue, item.hash.toArray('be', 32), item.leafIndex, ), item.owner, bn(item.lamports), item.data ? parseAccountData(item.data) : undefined, item.address || undefined, ); return account; } /** * Fetch the compressed balance for the specified account address or hash */ async getCompressedBalance(address?: BN254, hash?: BN254): Promise<BN> { if (!hash && !address) { throw new Error('Either hash or address must be provided'); } if (hash && address) { throw new Error('Only one of hash or address must be provided'); } const unsafeRes = await rpcRequest( this.compressionApiEndpoint, 'getCompressedBalance', { hash: hash ? encodeBN254toBase58(hash) : undefined, address: address ? encodeBN254toBase58(address) : undefined, }, ); const res = create( unsafeRes, jsonRpcResultAndContext(NativeBalanceResult), ); if ('error' in res) { throw new SolanaJSONRPCError( res.error, `failed to get balance for compressed account ${hash ? hash.toString() : address ? address.toString() : ''}`, ); } if (res.result.value === null) { return bn(0); } return bn(res.result.value); } /// TODO: validate that this is just for sol accounts /** * Fetch the total compressed balance for the specified owner public key */ async getCompressedBalanceByOwner(owner: PublicKey): Promise<BN> { const unsafeRes = await rpcRequest( this.compressionApiEndpoint, 'getCompressedBalanceByOwner', { owner: owner.toBase58() }, ); const res = create( unsafeRes, jsonRpcResultAndContext(NativeBalanceResult), ); if ('error' in res) { throw new SolanaJSONRPCError( res.error, `failed to get balance for compressed account ${owner.toBase58()}`, ); } if (res.result.value === null) { return bn(0); } return bn(res.result.value); } /** * Fetch the latest merkle proof for the specified account hash from the * cluster */ async getCompressedAccountProof( hash: BN254, ): Promise<MerkleContextWithMerkleProof> { const unsafeRes = await rpcRequest( this.compressionApiEndpoint, 'getCompressedAccountProof', { hash: encodeBN254toBase58(hash) }, ); const res = create( unsafeRes, jsonRpcResultAndContext(MerkeProofResult), ); if ('error' in res) { throw new SolanaJSONRPCError( res.error, `failed to get proof for compressed account ${hash.toString()}`, ); } if (res.result.value === null) { throw new Error( `failed to get proof for compressed account ${hash.toString()}`, ); } const value: MerkleContextWithMerkleProof = { hash: res.result.value.hash.toArray('be', 32), merkleTree: res.result.value.merkleTree, leafIndex: res.result.value.leafIndex, merkleProof: res.result.value.proof, nullifierQueue: mockNullifierQueue, // TODO(photon): support nullifierQueue in response. rootIndex: res.result.value.rootSeq % 2400, root: res.result.value.root, }; return value; } /** * Fetch all the account info for multiple compressed accounts specified by * an array of account hashes */ async getMultipleCompressedAccounts( hashes: BN254[], ): Promise<CompressedAccountWithMerkleContext[]> { const unsafeRes = await rpcRequest( this.compressionApiEndpoint, 'getMultipleCompressedAccounts', { hashes: hashes.map(hash => encodeBN254toBase58(hash)) }, ); const res = create( unsafeRes, jsonRpcResultAndContext(MultipleCompressedAccountsResult), ); if ('error' in res) { throw new SolanaJSONRPCError( res.error, `failed to get info for compressed accounts ${hashes.map(hash => encodeBN254toBase58(hash)).join(', ')}`, ); } if (res.result.value === null) { throw new Error( `failed to get info for compressed accounts ${hashes.map(hash => encodeBN254toBase58(hash)).join(', ')}`, ); } const accounts: CompressedAccountWithMerkleContext[] = []; res.result.value.items.map(item => { const account = createCompressedAccountWithMerkleContext( createMerkleContext( item.tree!, mockNullifierQueue, item.hash.toArray('be', 32), item.leafIndex, ), item.owner, bn(item.lamports), item.data ? parseAccountData(item.data) : undefined, item.address || undefined, ); accounts.push(account); }); return accounts.sort((a, b) => b.leafIndex - a.leafIndex); } /** * Fetch the latest merkle proofs for multiple compressed accounts specified * by an array account hashes */ async getMultipleCompressedAccountProofs( hashes: BN254[], ): Promise<MerkleContextWithMerkleProof[]> { const unsafeRes = await rpcRequest( this.compressionApiEndpoint, 'getMultipleCompressedAccountProofs', hashes.map(hash => encodeBN254toBase58(hash)), ); const res = create( unsafeRes, jsonRpcResultAndContext(array(MerkeProofResult)), ); if ('error' in res) { throw new SolanaJSONRPCError( res.error, `failed to get proofs for compressed accounts ${hashes.map(hash => encodeBN254toBase58(hash)).join(', ')}`, ); } if (res.result.value === null) { throw new Error( `failed to get proofs for compressed accounts ${hashes.map(hash => encodeBN254toBase58(hash)).join(', ')}`, ); } const merkleProofs: MerkleContextWithMerkleProof[] = []; for (const proof of res.result.value) { const value: MerkleContextWithMerkleProof = { hash: proof.hash.toArray('be', 32), merkleTree: proof.merkleTree, leafIndex: proof.leafIndex, merkleProof: proof.proof, nullifierQueue: mockAddressQueue, // TODO(photon): support nullifierQueue in response. rootIndex: proof.rootSeq % 2400, root: proof.root, }; merkleProofs.push(value); } return merkleProofs; } /** * Fetch all the compressed accounts owned by the specified public key. * Owner can be a program or user account */ async getCompressedAccountsByOwner( owner: PublicKey, config?: GetCompressedAccountsByOwnerConfig | undefined, ): Promise<WithCursor<CompressedAccountWithMerkleContext[]>> { const unsafeRes = await rpcRequest( this.compressionApiEndpoint, 'getCompressedAccountsByOwner', { owner: owner.toBase58(), filters: config?.filters || [], dataSlice: config?.dataSlice, cursor: config?.cursor, limit: config?.limit?.toNumber(), }, ); const res = create( unsafeRes, jsonRpcResultAndContext(CompressedAccountsByOwnerResult), ); if ('error' in res) { throw new SolanaJSONRPCError( res.error, `failed to get info for compressed accounts owned by ${owner.toBase58()}`, ); } if (res.result.value === null) { return { items: [], cursor: null, }; } const accounts: CompressedAccountWithMerkleContext[] = []; res.result.value.items.map(item => { const account = createCompressedAccountWithMerkleContext( createMerkleContext( item.tree!, mockNullifierQueue, item.hash.toArray('be', 32), item.leafIndex, ), item.owner, bn(item.lamports), item.data ? parseAccountData(item.data) : undefined, item.address || undefined, ); accounts.push(account); }); return { items: accounts.sort((a, b) => b.leafIndex - a.leafIndex), cursor: res.result.value.cursor, }; } /** * Fetch all the compressed token accounts owned by the specified public * key. Owner can be a program or user account */ async getCompressedTokenAccountsByOwner( owner: PublicKey, options?: GetCompressedTokenAccountsByOwnerOrDelegateOptions, ): Promise<WithCursor<ParsedTokenAccount[]>> { if (!options) options = {}; return await getCompressedTokenAccountsByOwnerOrDelegate( this, owner, options, false, ); } /** * Fetch all the compressed accounts delegated to the specified public key. */ async getCompressedTokenAccountsByDelegate( delegate: PublicKey, options?: GetCompressedTokenAccountsByOwnerOrDelegateOptions, ): Promise<WithCursor<ParsedTokenAccount[]>> { if (!options) options = {}; return getCompressedTokenAccountsByOwnerOrDelegate( this, delegate, options, true, ); } /** * Fetch the compressed token balance for the specified account hash */ async getCompressedTokenAccountBalance( hash: BN254, ): Promise<{ amount: BN }> { const unsafeRes = await rpcRequest( this.compressionApiEndpoint, 'getCompressedTokenAccountBalance', { hash: encodeBN254toBase58(hash) }, ); const res = create(unsafeRes, jsonRpcResultAndContext(BalanceResult)); if ('error' in res) { throw new SolanaJSONRPCError( res.error, `failed to get balance for compressed token account ${hash.toString()}`, ); } if (res.result.value === null) { throw new Error( `failed to get balance for compressed token account ${hash.toString()}`, ); } return { amount: bn(res.result.value.amount) }; } /** * @deprecated use {@link getCompressedTokenBalancesByOwnerV2} instead. * * Fetch all the compressed token balances owned by the specified public * key. Can filter by mint. Returns without context. */ async getCompressedTokenBalancesByOwner( owner: PublicKey, options?: GetCompressedTokenAccountsByOwnerOrDelegateOptions, ): Promise<WithCursor<TokenBalance[]>> { if (!options) options = {}; const unsafeRes = await rpcRequest( this.compressionApiEndpoint, 'getCompressedTokenBalancesByOwner', { owner: owner.toBase58(), mint: options.mint?.toBase58(), limit: options.limit?.toNumber(), cursor: options.cursor, }, ); const res = create( unsafeRes, jsonRpcResultAndContext(TokenBalanceListResult), ); if ('error' in res) { throw new SolanaJSONRPCError( res.error, `failed to get compressed token balances for owner ${owner.toBase58()}`, ); } if (res.result.value === null) { throw new Error( `failed to get compressed token balances for owner ${owner.toBase58()}`, ); } const maybeFiltered = options.mint ? res.result.value.tokenBalances.filter( tokenBalance => tokenBalance.mint.toBase58() === options.mint!.toBase58(), ) : res.result.value.tokenBalances; return { items: maybeFiltered, cursor: res.result.value.cursor, }; } /** * Fetch the compressed token balances owned by the specified public * key. Paginated. Can filter by mint. Returns with context. */ async getCompressedTokenBalancesByOwnerV2( owner: PublicKey, options?: GetCompressedTokenAccountsByOwnerOrDelegateOptions, ): Promise<WithContext<WithCursor<TokenBalance[]>>> { if (!options) options = {}; const unsafeRes = await rpcRequest( this.compressionApiEndpoint, 'getCompressedTokenBalancesByOwnerV2', { owner: owner.toBase58(), mint: options.mint?.toBase58(), limit: options.limit?.toNumber(), cursor: options.cursor, }, ); const res = create( unsafeRes, jsonRpcResultAndContext(TokenBalanceListResultV2), ); if ('error' in res) { throw new SolanaJSONRPCError( res.error, `failed to get compressed token balances for owner ${owner.toBase58()}`, ); } if (res.result.value === null) { throw new Error( `failed to get compressed token balances for owner ${owner.toBase58()}`, ); } const maybeFiltered = options.mint ? res.result.value.items.filter( tokenBalance => tokenBalance.mint.toBase58() === options.mint!.toBase58(), ) : res.result.value.items; return { context: res.result.context, value: { items: maybeFiltered, cursor: res.result.value.cursor, }, }; } /** * Returns confirmed compression signatures for transactions involving the specified * account hash forward in time from genesis to the most recent confirmed * block * * @param hash queried account hash */ async getCompressionSignaturesForAccount( hash: BN254, ): Promise<SignatureWithMetadata[]> { const unsafeRes = await rpcRequest( this.compressionApiEndpoint, 'getCompressionSignaturesForAccount', { hash: encodeBN254toBase58(hash) }, ); const res = create( unsafeRes, jsonRpcResultAndContext(SignatureListResult), ); if ('error' in res) { throw new SolanaJSONRPCError( res.error, `failed to get signatures for compressed account ${hash.toString()}`, ); } return res.result.value.items; } /** * Fetch a confirmed or finalized transaction from the cluster. Return with * CompressionInfo */ async getTransactionWithCompressionInfo( signature: string, ): Promise<CompressedTransaction | null> { const unsafeRes = await rpcRequest( this.compressionApiEndpoint, 'getTransactionWithCompressionInfo', { signature }, ); const res = create( unsafeRes, jsonRpcResult(CompressedTransactionResult), ); if ('error' in res) { throw new SolanaJSONRPCError(res.error, 'failed to get slot'); } if (res.result.transaction === null) return null; const closedAccounts: { account: CompressedAccountWithMerkleContext; maybeTokenData: TokenData | null; }[] = []; const openedAccounts: { account: CompressedAccountWithMerkleContext; maybeTokenData: TokenData | null; }[] = []; res.result.compressionInfo.closedAccounts.map(item => { closedAccounts.push(buildCompressedAccountWithMaybeTokenData(item)); }); res.result.compressionInfo.openedAccounts.map(item => { openedAccounts.push(buildCompressedAccountWithMaybeTokenData(item)); }); const calculateTokenBalances = ( accounts: Array<{ account: CompressedAccountWithMerkleContext; maybeTokenData: TokenData | null; }>, ): | Array<{ owner: PublicKey; mint: PublicKey; amount: BN; }> | undefined => { const balances = Object.values( accounts.reduce( (acc, { maybeTokenData }) => { if (maybeTokenData) { const { owner, mint, amount } = maybeTokenData; const key = `${owner.toBase58()}_${mint.toBase58()}`; if (key in acc) { acc[key].amount = acc[key].amount.add(amount); } else { acc[key] = { owner, mint, amount }; } } return acc; }, {} as { [key: string]: { owner: PublicKey; mint: PublicKey; amount: BN; }; }, ), ); return balances.length > 0 ? balances : undefined; }; const preTokenBalances = calculateTokenBalances(closedAccounts); const postTokenBalances = calculateTokenBalances(openedAccounts); return { compressionInfo: { closedAccounts, openedAccounts, preTokenBalances, postTokenBalances, }, transaction: res.result.transaction, }; } /** * Returns confirmed signatures for transactions involving the specified * address forward in time from genesis to the most recent confirmed block * * @param address queried compressed account address */ async getCompressionSignaturesForAddress( address: PublicKey, options?: PaginatedOptions, ): Promise<WithCursor<SignatureWithMetadata[]>> { const unsafeRes = await rpcRequest( this.compressionApiEndpoint, 'getCompressionSignaturesForAddress', { address: address.toBase58(), cursor: options?.cursor, limit: options?.limit?.toNumber(), }, ); const res = create( unsafeRes, jsonRpcResultAndContext(SignatureListWithCursorResult), ); if ('error' in res) { throw new SolanaJSONRPCError( res.error, `failed to get signatures for address ${address.toBase58()}`, ); } if (res.result.value === null) { throw new Error( `failed to get signatures for address ${address.toBase58()}`, ); } return res.result.value; } /** * Returns confirmed signatures for compression transactions involving the * specified account owner forward in time from genesis to the * most recent confirmed block * * @param owner queried owner public key */ async getCompressionSignaturesForOwner( owner: PublicKey, options?: PaginatedOptions, ): Promise<WithCursor<SignatureWithMetadata[]>> { const unsafeRes = await rpcRequest( this.compressionApiEndpoint, 'getCompressionSignaturesForOwner', { owner: owner.toBase58(), cursor: options?.cursor, limit: options?.limit?.toNumber(), }, ); const res = create( unsafeRes, jsonRpcResultAndContext(SignatureListWithCursorResult), ); if ('error' in res) { throw new SolanaJSONRPCError( res.error, `failed to get signatures for owner ${owner.toBase58()}`, ); } if (res.result.value === null) { throw new Error( `failed to get signatures for owner ${owner.toBase58()}`, ); } return res.result.value; } /** * Returns confirmed signatures for compression transactions involving the * specified token account owner forward in time from genesis to the most * recent confirmed block */ async getCompressionSignaturesForTokenOwner( owner: PublicKey, options?: PaginatedOptions, ): Promise<WithCursor<SignatureWithMetadata[]>> { const unsafeRes = await rpcRequest( this.compressionApiEndpoint, 'getCompressionSignaturesForTokenOwner', { owner: owner.toBase58(), cursor: options?.cursor, limit: options?.limit?.toNumber(), }, ); const res = create( unsafeRes, jsonRpcResultAndContext(SignatureListWithCursorResult), ); if ('error' in res) { throw new SolanaJSONRPCError( res.error, `failed to get signatures for owner ${owner.toBase58()}`, ); } if (res.result.value === null) { throw new Error( `failed to get signatures for owner ${owner.toBase58()}`, ); } return res.result.value; } /** * Fetch the current indexer health status */ async getIndexerHealth(): Promise<string> { const unsafeRes = await rpcRequest( this.compressionApiEndpoint, 'getIndexerHealth', ); const res = create(unsafeRes, jsonRpcResult(HealthResult)); if ('error' in res) { throw new SolanaJSONRPCError(res.error, 'failed to get health'); } return res.result; } /** * Ensure that the Compression Indexer has already indexed the transaction */ async confirmTransactionIndexed(slot: number): Promise<boolean> { const startTime = Date.now(); // eslint-disable-next-line no-constant-condition while (true) { const indexerSlot = await this.getIndexerSlot(); if (indexerSlot >= slot) { return true; } if (Date.now() - startTime > 20000) { // 20 seconds throw new Error( 'Timeout: Indexer slot did not reach the required slot within 20 seconds', ); } await new Promise(resolve => setTimeout(resolve, 200)); } } /** * Fetch the current slot that the node is processing */ async getIndexerSlot(): Promise<number> { const unsafeRes = await rpcRequest( this.compressionApiEndpoint, 'getIndexerSlot', ); const res = create(unsafeRes, jsonRpcResult(SlotResult)); if ('error' in res) { throw new SolanaJSONRPCError(res.error, 'failed to get slot'); } return res.result; } /** * Fetch all the compressed token holders for a given mint. Paginated. */ async getCompressedMintTokenHolders( mint: PublicKey, options?: PaginatedOptions, ): Promise<WithContext<WithCursor<CompressedMintTokenHolders[]>>> { const unsafeRes = await rpcRequest( this.compressionApiEndpoint, 'getCompressedMintTokenHolders', { mint: mint.toBase58(), cursor: options?.cursor, limit: options?.limit?.toNumber(), }, ); const res = create( unsafeRes, jsonRpcResultAndContext(CompressedMintTokenHoldersResult), ); if ('error' in res) { throw new SolanaJSONRPCError( res.error, 'failed to get mint token holders', ); } return res.result; } /** * Fetch the latest compression signatures on the cluster. Results are * paginated. */ async getLatestCompressionSignatures( cursor?: string, limit?: number, ): Promise<LatestNonVotingSignaturesPaginated> { const unsafeRes = await rpcRequest( this.compressionApiEndpoint, 'getLatestCompressionSignatures', { limit, cursor }, ); const res = create( unsafeRes, jsonRpcResultAndContext(LatestNonVotingSignaturesResultPaginated), ); if ('error' in res) { throw new SolanaJSONRPCError( res.error, 'failed to get latest non-voting signatures', ); } return res.result; } /** * Fetch all non-voting signatures */ async getLatestNonVotingSignatures( limit?: number, cursor?: string, ): Promise<LatestNonVotingSignatures> { const unsafeRes = await rpcRequest( this.compressionApiEndpoint, 'getLatestNonVotingSignatures', { limit, cursor }, ); const res = create( unsafeRes, jsonRpcResultAndContext(LatestNonVotingSignaturesResult), ); if ('error' in res) { throw new SolanaJSONRPCError( res.error, 'failed to get latest non-voting signatures', ); } return res.result; } /** * Fetch the latest address proofs for new unique addresses specified by an * array of addresses. * * the proof states that said address have not yet been created in * respective address tree. * @param addresses Array of BN254 new addresses * @returns Array of validity proofs for new addresses */ async getMultipleNewAddressProofs(addresses: BN254[]) { const unsafeRes = await rpcRequest( this.compressionApiEndpoint, 'getMultipleNewAddressProofs', addresses.map(address => encodeBN254toBase58(address)), ); const res = create( unsafeRes, jsonRpcResultAndContext(array(NewAddressProofResult)), ); if ('error' in res) { throw new SolanaJSONRPCError( res.error, `failed to get proofs for new addresses ${addresses.map(address => encodeBN254toBase58(address)).join(', ')}`, ); } if (res.result.value === null) { throw new Error( `failed to get proofs for new addresses ${addresses.map(address => encodeBN254toBase58(address)).join(', ')}`, ); } /// Creates proof for each address const newAddressProofs: MerkleContextWithNewAddressProof[] = []; for (const proof of res.result.value) { const _proof: MerkleContextWithNewAddressProof = { root: proof.root, rootIndex: proof.rootSeq % 2400, value: proof.address, leafLowerRangeValue: proof.lowerRangeAddress, leafHigherRangeValue: proof.higherRangeAddress, nextIndex: bn(proof.nextIndex), merkleProofHashedIndexedElementLeaf: proof.proof, indexHashedIndexedElementLeaf: bn(proof.lowElementLeafIndex), merkleTree: proof.merkleTree, nullifierQueue: mockAddressQueue, }; newAddressProofs.push(_proof); } return newAddressProofs; } /** * Advanced usage of getValidityProof: fetches ZKP directly from a custom * non-rpcprover. Note: This uses the proverEndpoint specified in the * constructor. For normal usage, please use {@link getValidityProof} * instead. * * Fetch the latest validity proof for (1) compressed accounts specified by * an array of account hashes. (2) new unique addresses specified by an * array of addresses. * * Validity proofs prove the presence of compressed accounts in state trees * and the non-existence of addresses in address trees, respectively. They * enable verification without recomputing the merkle proof path, thus * lowering verification and data costs. * * @param hashes Array of BN254 hashes. * @param newAddresses Array of BN254 new addresses. * @returns validity proof with context */ async getValidityProofDirect( hashes: BN254[] = [], newAddresses: BN254[] = [], ): Promise<CompressedProofWithContext> { let validityProof: CompressedProofWithContext; if (hashes.length === 0 && newAddresses.length === 0) { throw new Error( 'Empty input. Provide hashes and/or new addresses.', ); } else if (hashes.length > 0 && newAddresses.length === 0) { /// inclusion const merkleProofsWithContext = await this.getMultipleCompressedAccountProofs(hashes); const inputs = convertMerkleProofsWithContextToHex( merkleProofsWithContext, ); const compressedProof = await proverRequest( this.proverEndpoint, 'inclusion', inputs, false, ); validityProof = { compressedProof, roots: merkleProofsWithContext.map(proof => proof.root), rootIndices: merkleProofsWithContext.map( proof => proof.rootIndex, ), leafIndices: merkleProofsWithContext.map( proof => proof.leafIndex, ), leaves: merkleProofsWithContext.map(proof => bn(proof.hash)), merkleTrees: merkleProofsWithContext.map( proof => proof.merkleTree, ), nullifierQueues: merkleProofsWithContext.map( proof => proof.nullifierQueue, ), }; } else if (hashes.length === 0 && newAddresses.length > 0) { /// new-address const newAddressProofs: MerkleContextWithNewAddressProof[] = await this.getMultipleNewAddressProofs(newAddresses); const inputs = convertNonInclusionMerkleProofInputsToHex(newAddressProofs); const compressedProof = await proverRequest( this.proverEndpoint, 'new-address', inputs, false, ); validityProof = { compressedProof, roots: newAddressProofs.map(proof => proof.root), rootIndices: newAddressProofs.map(proof => proof.rootIndex), leafIndices: newAddressProofs.map(proof => proof.nextIndex.toNumber(), ), leaves: newAddressProofs.map(proof => bn(proof.value)), merkleTrees: newAddressProofs.map(proof => proof.merkleTree), nullifierQueues: newAddressProofs.map( proof => proof.nullifierQueue, ), }; } else if (hashes.length > 0 && newAddresses.length > 0) { /// combined const merkleProofsWithContext = await this.getMultipleCompressedAccountProofs(hashes); const inputs = convertMerkleProofsWithContextToHex( merkleProofsWithContext, ); const newAddressProofs: MerkleContextWithNewAddressProof[] = await this.getMultipleNewAddressProofs(newAddresses); const newAddressInputs = convertNonInclusionMerkleProofInputsToHex(newAddressProofs); const compressedProof = await proverRequest( this.proverEndpoint, 'combined', [inputs, newAddressInputs], false, ); validityProof = { compressedProof, roots: merkleProofsWithContext .map(proof => proof.root) .concat(newAddressProofs.map(proof => proof.root)), rootIndices: merkleProofsWithContext .map(proof => proof.rootIndex) .concat(newAddressProofs.map(proof => proof.rootIndex)), leafIndices: merkleProofsWithContext .map(proof => proof.leafIndex) .concat( newAddressProofs.map( proof => proof.nextIndex.toNumber(), // TODO: support >32bit ), ), leaves: merkleProofsWithContext .map(proof => bn(proof.hash)) .concat(newAddressProofs.map(proof => bn(proof.value))), merkleTrees: merkleProofsWithContext .map(proof => proof.merkleTree) .concat(newAddressProofs.map(proof => proof.merkleTree)), nullifierQueues: merkleProofsWithContext .map(proof => proof.nullifierQueue) .concat( newAddressProofs.map(proof => proof.nullifierQueue), ), }; } else throw new Error('Invalid input'); return validityProof; } /** * Fetch the latest validity proof for (1) compressed accounts specified by * an array of account hashes. (2) new unique addresses specified by an * array of addresses. * * Validity proofs prove the presence of compressed accounts in state trees * and the non-existence of addresses in address trees, respectively. They * enable verification without recomputing the merkle proof path, thus * lowering verification and data costs. * * @param hashes Array of BN254 hashes. * @param newAddresses Array of BN254 new addresses. * @returns validity proof with context */ async getValidityProof( hashes: BN254[] = [], newAddresses: BN254[] = [], ): Promise<CompressedProofWithContext> { const defaultAddressTreePublicKey = defaultTestStateTreeAccounts().addressTree; const defaultAddressQueuePublicKey = defaultTestStateTreeAccounts().addressQueue; const defaultStateTreePublicKey = defaultTestStateTreeAccounts().merkleTree; const defaultStateQueuePublicKey = defaultTestStateTreeAccounts().nullifierQueue; const formattedHashes = hashes.map(item => { return { hash: item, tree: defaultStateTreePublicKey, queue: defaultStateQueuePublicKey, }; }); const formattedNewAddresses = newAddresses.map(item => { return { address: item, tree: defaultAddressTreePublicKey, queue: defaultAddressQueuePublicKey, }; }); return this.getValidityProofV0(formattedHashes, formattedNewAddresses); } /** * Fetch the latest validity proof for (1) compressed accounts specified by * an array of account hashes. (2) new unique addresses specified by an * array of addresses. * * Validity proofs prove the presence of compressed accounts in state trees * and the non-existence of addresses in address trees, respectively. They * enable verification without recomputing the merkle proof path, thus * lowering verification and data costs. * * @param hashes Array of { hash: BN254, tree: PublicKey, queue: PublicKey }. * @param newAddresses Array of { address: BN254, tree: PublicKey, queue: PublicKey }. * @returns validity proof with context */ async getValidityProofV0( hashes: HashWithTree[] = [], newAddresses: AddressWithTree[] = [], ): Promise<CompressedProofWithContext> { const { value } = await this.getValidityProofAndRpcContext( hashes, newAddresses, ); return value; } /** * Fetch the latest validity proof for (1) compressed accounts specified by * an array of account hashes. (2) new unique addresses specified by an * array of addresses. Returns with context slot. * * Validity proofs prove the presence of compressed accounts in state trees * and the non-existence of addresses in address trees, respectively. They * enable verification without recomputing the merkle proof path, thus * lowering verification and data costs. * * @param hashes Array of BN254 hashes. * @param newAddresses Array of BN254 new addresses. Optionally specify the * tree and queue for each address. Default to public * state tree/queue. * @returns validity proof with context */ async getValidityProofAndRpcContext( hashes: HashWithTree[] = [], newAddresses: AddressWithTree[] = [], ): Promise<WithContext<CompressedProofWithContext>> { const unsafeRes = await rpcRequest( this.compressionApiEndpoint, 'getValidityProof', { hashes: hashes.map(({ hash }) => encodeBN254toBase58(hash)), newAddressesWithTrees: newAddresses.map( ({ address, tree }) => ({ address: encodeBN254toBase58(address), tree: tree.toBase58(), }), ), }, ); const res = create( unsafeRes, jsonRpcResultAndContext(ValidityProofResult), ); if ('error' in res) { throw new SolanaJSONRPCError( res.error, `failed to get ValidityProof for compressed accounts ${hashes.map(hash => hash.toString())}`, ); } const result = res.result.value; if (result === null) { throw new Error( `failed to get ValidityProof for compressed accounts ${hashes.map(hash => hash.toString())}`, ); } const value: CompressedProofWithContext = { compressedProof: result.compressedProof, merkleTrees: result.merkleTrees, leafIndices: result.leafIndices, nullifierQueues: [ ...hashes.map(({ queue }) => queue), ...newAddresses.map(({ queue }) => queue), ], rootIndices: result.rootIndices, roots: result.roots, leaves: result.leaves, }; return { value, context: res.result.context }; } }
0