repo_id
stringclasses 279
values | file_path
stringlengths 43
179
| content
stringlengths 1
4.18M
| __index_level_0__
int64 0
0
|
|---|---|---|---|
solana_public_repos/Lightprotocol/light-protocol/test-programs/account-compression-test
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/account-compression-test/tests/group_authority_tests.rs
|
#![cfg(feature = "test-sbf")]
use account_compression::errors::AccountCompressionErrorCode;
use account_compression::{
self, utils::constants::GROUP_AUTHORITY_SEED, GroupAuthority, RegisteredProgram, ID,
};
use anchor_lang::{system_program, InstructionData, ToAccountMetas};
use light_program_test::test_env::{get_group_pda, OLD_SYSTEM_PROGRAM_ID_TEST_KEYPAIR};
use light_program_test::test_rpc::ProgramTestRpcConnection;
use light_test_utils::{airdrop_lamports, assert_rpc_error, RpcConnection};
use solana_program_test::ProgramTest;
use solana_sdk::{
instruction::{AccountMeta, Instruction},
pubkey::Pubkey,
signature::{Keypair, Signer},
transaction::Transaction,
};
use std::str::FromStr;
/// Tests:
/// 1. Create group authority
/// 2. Update group authority
/// 3. Cannot update with invalid authority
/// 4. Add program to group
/// 5. Cannot add program to group with invalid authority
#[tokio::test]
async fn test_create_and_update_group() {
let mut program_test = ProgramTest::default();
program_test.add_program("account_compression", ID, None);
let system_program_id =
Pubkey::from_str("SySTEM1eSU2p4BGQfQpimFEWWSC1XDFeun3Nqzz3rT7").unwrap();
program_test.add_program("light_system_program", system_program_id, None);
program_test.set_compute_max_units(1_400_000u64);
let context = program_test.start_with_context().await;
let mut context = ProgramTestRpcConnection { context };
let seed = Keypair::new();
let group_accounts = Pubkey::find_program_address(
&[GROUP_AUTHORITY_SEED, seed.pubkey().to_bytes().as_slice()],
&ID,
);
let instruction_data = account_compression::instruction::InitializeGroupAuthority {
authority: context.get_payer().pubkey(),
};
let instruction = Instruction {
program_id: ID,
accounts: vec![
AccountMeta::new(context.get_payer().pubkey(), true),
AccountMeta::new(seed.pubkey(), true),
AccountMeta::new(group_accounts.0, false),
AccountMeta::new_readonly(system_program::ID, false),
],
data: instruction_data.data(),
};
let latest_blockhash = context.get_latest_blockhash().await.unwrap();
let transaction = Transaction::new_signed_with_payer(
&[instruction],
Some(&context.get_payer().pubkey()),
&vec![&context.get_payer(), &seed],
latest_blockhash,
);
context.process_transaction(transaction).await.unwrap();
let group_authority = context
.get_anchor_account::<GroupAuthority>(&group_accounts.0)
.await
.unwrap()
.unwrap();
assert_eq!(group_authority.authority, context.get_payer().pubkey());
assert_eq!(group_authority.seed, seed.pubkey());
let updated_keypair = Keypair::new();
let update_group_authority_ix = account_compression::instruction::UpdateGroupAuthority {
authority: updated_keypair.pubkey(),
};
// update with new authority
let instruction = Instruction {
program_id: ID,
accounts: vec![
AccountMeta::new(context.get_payer().pubkey(), true),
AccountMeta::new(group_accounts.0, false),
AccountMeta::new_readonly(updated_keypair.pubkey(), false),
],
data: update_group_authority_ix.data(),
};
let latest_blockhash = context.get_latest_blockhash().await.unwrap();
let transaction = Transaction::new_signed_with_payer(
&[instruction],
Some(&context.get_payer().pubkey()),
&vec![&context.get_payer()],
latest_blockhash,
);
context.process_transaction(transaction).await.unwrap();
let group_authority = context
.get_anchor_account::<GroupAuthority>(&group_accounts.0)
.await
.unwrap()
.unwrap();
assert_eq!(group_authority.authority, updated_keypair.pubkey());
assert_eq!(group_authority.seed, seed.pubkey());
// update with old authority should fail
let update_group_authority_ix = account_compression::instruction::UpdateGroupAuthority {
authority: context.get_payer().pubkey(),
};
let instruction = Instruction {
program_id: ID,
accounts: vec![
AccountMeta::new(context.get_payer().pubkey(), true),
AccountMeta::new(group_accounts.0, false),
AccountMeta::new_readonly(updated_keypair.pubkey(), false),
],
data: update_group_authority_ix.data(),
};
let latest_blockhash = context.get_latest_blockhash().await.unwrap();
let transaction = Transaction::new_signed_with_payer(
&[instruction],
Some(&context.get_payer().pubkey()),
&vec![&context.get_payer()],
latest_blockhash,
);
let update_error = context.process_transaction(transaction).await;
assert!(update_error.is_err());
airdrop_lamports(&mut context, &updated_keypair.pubkey(), 1_000_000_000)
.await
.unwrap();
let system_program_id_keypair =
Keypair::from_bytes(&OLD_SYSTEM_PROGRAM_ID_TEST_KEYPAIR).unwrap();
// add new program to group
let registered_program_pda = Pubkey::find_program_address(
&[system_program_id_keypair.pubkey().to_bytes().as_slice()],
&ID,
)
.0;
let register_program_ix = account_compression::instruction::RegisterProgramToGroup {};
let instruction = Instruction {
program_id: ID,
accounts: vec![
AccountMeta::new(updated_keypair.pubkey(), true),
AccountMeta::new(system_program_id_keypair.pubkey(), true),
AccountMeta::new(registered_program_pda, false),
AccountMeta::new(group_accounts.0, false),
AccountMeta::new_readonly(system_program::ID, false),
],
data: register_program_ix.data(),
};
let transaction = Transaction::new_signed_with_payer(
&[instruction],
Some(&updated_keypair.pubkey()),
&vec![&updated_keypair, &system_program_id_keypair],
context.get_latest_blockhash().await.unwrap(),
);
context.process_transaction(transaction).await.unwrap();
let registered_program_account = context
.get_anchor_account::<RegisteredProgram>(®istered_program_pda)
.await
.unwrap()
.unwrap();
assert_eq!(
registered_program_account.registered_program_id,
system_program_id_keypair.pubkey()
);
assert_eq!(
registered_program_account.group_authority_pda,
group_accounts.0
);
// add new program to group with invalid authority
let other_program_keypair = Keypair::new();
let other_program_id = other_program_keypair.pubkey();
let registered_program_pda =
Pubkey::find_program_address(&[other_program_id.to_bytes().as_slice()], &ID).0;
let register_program_ix = account_compression::instruction::RegisterProgramToGroup {};
let instruction = Instruction {
program_id: ID,
accounts: vec![
AccountMeta::new(context.get_payer().pubkey(), true),
AccountMeta::new(other_program_id, true),
AccountMeta::new(registered_program_pda, false),
AccountMeta::new(group_accounts.0, false),
AccountMeta::new_readonly(system_program::ID, false),
],
data: register_program_ix.data(),
};
let latest_blockhash = context.get_latest_blockhash().await.unwrap();
let transaction = Transaction::new_signed_with_payer(
&[instruction],
Some(&context.get_payer().pubkey()),
&vec![&context.get_payer(), &other_program_keypair],
latest_blockhash,
);
let result = context.process_transaction(transaction).await;
assert_rpc_error(
result,
0,
AccountCompressionErrorCode::InvalidAuthority.into(),
)
.unwrap();
let registered_program_pda = Pubkey::find_program_address(
&[system_program_id_keypair.pubkey().to_bytes().as_slice()],
&ID,
)
.0;
// deregister program with invalid authority
{
let close_recipient = Pubkey::new_unique();
let deregister_program_ix = account_compression::instruction::DeregisterProgram {};
let accounts = account_compression::accounts::DeregisterProgram {
authority: context.get_payer().pubkey(),
registered_program_pda,
group_authority_pda: group_accounts.0,
close_recipient,
};
let instruction = Instruction {
program_id: ID,
accounts: accounts.to_account_metas(Some(true)),
data: deregister_program_ix.data(),
};
let payer = context.get_payer().insecure_clone();
let result = context
.create_and_send_transaction(&[instruction], &payer.pubkey(), &[&payer])
.await;
assert_rpc_error(
result,
0,
AccountCompressionErrorCode::InvalidAuthority.into(),
)
.unwrap();
}
// deregister program with invalid group
{
let invalid_group_authority = Keypair::new();
context
.airdrop_lamports(&invalid_group_authority.pubkey(), 1_000_000_000)
.await
.unwrap();
let invalid_group = get_group_pda(invalid_group_authority.pubkey());
let instruction_data = account_compression::instruction::InitializeGroupAuthority {
authority: invalid_group_authority.pubkey(),
};
let instruction = Instruction {
program_id: ID,
accounts: vec![
AccountMeta::new(invalid_group_authority.pubkey(), true),
AccountMeta::new(invalid_group_authority.pubkey(), true),
AccountMeta::new(invalid_group, false),
AccountMeta::new_readonly(system_program::ID, false),
],
data: instruction_data.data(),
};
context
.create_and_send_transaction(
&[instruction],
&invalid_group_authority.pubkey(),
&[&invalid_group_authority],
)
.await
.unwrap();
let close_recipient = Pubkey::new_unique();
let deregister_program_ix = account_compression::instruction::DeregisterProgram {};
let accounts = account_compression::accounts::DeregisterProgram {
authority: invalid_group_authority.pubkey(),
registered_program_pda,
group_authority_pda: invalid_group,
close_recipient,
};
let instruction = Instruction {
program_id: ID,
accounts: accounts.to_account_metas(Some(true)),
data: deregister_program_ix.data(),
};
let result = context
.create_and_send_transaction(
&[instruction],
&invalid_group_authority.pubkey(),
&[&invalid_group_authority],
)
.await;
assert_rpc_error(result, 0, AccountCompressionErrorCode::InvalidGroup.into()).unwrap();
}
// successfully deregister program
{
let close_recipient = Pubkey::new_unique();
let deregister_program_ix = account_compression::instruction::DeregisterProgram {};
let accounts = account_compression::accounts::DeregisterProgram {
authority: updated_keypair.pubkey(),
registered_program_pda,
group_authority_pda: group_accounts.0,
close_recipient,
};
let instruction = Instruction {
program_id: ID,
accounts: accounts.to_account_metas(Some(true)),
data: deregister_program_ix.data(),
};
context
.create_and_send_transaction(
&[instruction],
&updated_keypair.pubkey(),
&[&updated_keypair],
)
.await
.unwrap();
let closed_registered_program_account =
context.get_account(registered_program_pda).await.unwrap();
assert!(closed_registered_program_account.is_none());
let recipient_balance = context.get_balance(&close_recipient).await.unwrap();
let rent_exemption = context
.get_minimum_balance_for_rent_exemption(RegisteredProgram::LEN)
.await
.unwrap();
assert_eq!(recipient_balance, rent_exemption);
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/account-compression-test
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/account-compression-test/tests/merkle_tree_tests.rs
|
#![cfg(feature = "test-sbf")]
use std::{collections::HashMap, mem};
use account_compression::{
self,
errors::AccountCompressionErrorCode,
queue_from_bytes_copy,
sdk::{create_initialize_merkle_tree_instruction, create_insert_leaves_instruction},
state::{queue_from_bytes_zero_copy_mut, QueueAccount},
utils::constants::{STATE_MERKLE_TREE_CANOPY_DEPTH, STATE_MERKLE_TREE_HEIGHT},
AddressMerkleTreeConfig, AddressQueueConfig, NullifierQueueConfig, QueueType,
StateMerkleTreeAccount, StateMerkleTreeConfig, ID, SAFETY_MARGIN,
};
use anchor_lang::{error::ErrorCode, system_program, InstructionData, ToAccountMetas};
use light_concurrent_merkle_tree::{
errors::ConcurrentMerkleTreeError, event::MerkleTreeEvent,
zero_copy::ConcurrentMerkleTreeZeroCopyMut,
};
use light_hash_set::HashSetError;
use light_hasher::{zero_bytes::poseidon::ZERO_BYTES, Hasher, Poseidon};
use light_merkle_tree_reference::MerkleTree;
use light_program_test::test_rpc::ProgramTestRpcConnection;
use light_test_utils::assert_queue::assert_nullifier_queue_initialized;
use light_test_utils::state_tree_rollover::StateMerkleTreeRolloverMode;
use light_test_utils::{
airdrop_lamports, assert_rpc_error, create_account_instruction,
create_address_merkle_tree_and_queue_account_with_assert, get_concurrent_merkle_tree,
get_hash_set, AccountZeroCopy, RpcConnection, RpcError,
};
use light_test_utils::{
assert_merkle_tree::assert_merkle_tree_initialized,
state_tree_rollover::{
assert_rolled_over_pair, perform_state_merkle_tree_roll_over,
set_state_merkle_tree_next_index,
},
};
use light_utils::bigint::bigint_to_be_bytes_array;
use num_bigint::{BigUint, ToBigUint};
use solana_program_test::ProgramTest;
use solana_sdk::{
account::AccountSharedData,
instruction::{AccountMeta, Instruction},
signature::{Keypair, Signature, Signer},
transaction::Transaction,
};
use solana_sdk::{account::WritableAccount, pubkey::Pubkey};
/// Tests:
/// 1. Functional: Initialize nullifier queue
/// 2. Functional: Insert into nullifier queue
/// 3. Failing: Insert the same elements into nullifier queue again (3 and 1 element(s))
/// 4. Failing: Insert into nullifier queue with invalid authority
/// 5. Functional: Insert one element into nullifier queue
async fn test_init_and_insert_into_nullifier_queue(
merkle_tree_config: &StateMerkleTreeConfig,
queue_config: &NullifierQueueConfig,
) {
let mut program_test = ProgramTest::default();
program_test.add_program("account_compression", ID, None);
program_test.add_program(
"spl_noop",
Pubkey::new_from_array(account_compression::utils::constants::NOOP_PUBKEY),
None,
);
let merkle_tree_keypair = Keypair::new();
let merkle_tree_pubkey = merkle_tree_keypair.pubkey();
let nullifier_queue_keypair = Keypair::new();
let nullifier_queue_pubkey = nullifier_queue_keypair.pubkey();
program_test.set_compute_max_units(1_400_000u64);
let context = program_test.start_with_context().await;
let mut rpc = ProgramTestRpcConnection { context };
let payer_pubkey = rpc.get_payer().pubkey();
fail_initialize_state_merkle_tree_and_nullifier_queue_invalid_sizes(
&mut rpc,
&payer_pubkey,
&merkle_tree_keypair,
&nullifier_queue_keypair,
merkle_tree_config,
queue_config,
)
.await;
fail_initialize_state_merkle_tree_and_nullifier_queue_invalid_config(
&mut rpc,
&payer_pubkey,
&merkle_tree_keypair,
&nullifier_queue_keypair,
merkle_tree_config,
queue_config,
)
.await;
functional_1_initialize_state_merkle_tree_and_nullifier_queue(
&mut rpc,
&payer_pubkey,
&merkle_tree_keypair,
&nullifier_queue_keypair,
merkle_tree_config,
queue_config,
)
.await;
let merkle_tree_keypair_2 = Keypair::new();
let nullifier_queue_keypair_2 = Keypair::new();
functional_1_initialize_state_merkle_tree_and_nullifier_queue(
&mut rpc,
&payer_pubkey,
&merkle_tree_keypair_2,
&nullifier_queue_keypair_2,
merkle_tree_config,
queue_config,
)
.await;
functional_2_test_insert_into_nullifier_queues(
&mut rpc,
&nullifier_queue_pubkey,
&merkle_tree_pubkey,
)
.await;
fail_3_insert_same_elements_into_nullifier_queue(
&mut rpc,
&nullifier_queue_pubkey,
&merkle_tree_pubkey,
vec![[3u8; 32], [1u8; 32], [1u8; 32]],
)
.await;
fail_3_insert_same_elements_into_nullifier_queue(
&mut rpc,
&nullifier_queue_pubkey,
&merkle_tree_pubkey,
vec![[1u8; 32]],
)
.await;
fail_4_insert_with_invalid_signer(
&mut rpc,
&nullifier_queue_pubkey,
&merkle_tree_pubkey,
vec![[3u8; 32]],
)
.await;
functional_5_test_insert_into_nullifier_queue(
&mut rpc,
&nullifier_queue_pubkey,
&merkle_tree_pubkey,
)
.await;
let queue_tree_pair = (nullifier_queue_pubkey, merkle_tree_pubkey);
let queue_tree_pair_2 = (
nullifier_queue_keypair_2.pubkey(),
merkle_tree_keypair_2.pubkey(),
);
let nullifier_1 = [10u8; 32];
let nullifier_2 = [20u8; 32];
// CHECK: nullifiers inserted into correct queue with 2 queues
functional_6_test_insert_into_two_nullifier_queues(
&mut rpc,
&[nullifier_1, nullifier_2],
&[queue_tree_pair, queue_tree_pair_2],
)
.await;
let nullifier_1 = [11u8; 32];
let nullifier_2 = [21u8; 32];
let nullifier_3 = [31u8; 32];
let nullifier_4 = [41u8; 32];
// CHECK: nullifiers inserted into correct queue with 2 queues and not ordered
functional_7_test_insert_into_two_nullifier_queues_not_ordered(
&mut rpc,
&[nullifier_1, nullifier_2, nullifier_3, nullifier_4],
&[
queue_tree_pair,
queue_tree_pair_2,
queue_tree_pair,
queue_tree_pair_2,
],
)
.await;
}
#[tokio::test]
async fn test_init_and_insert_into_nullifier_queue_default() {
test_init_and_insert_into_nullifier_queue(
&StateMerkleTreeConfig::default(),
&NullifierQueueConfig::default(),
)
.await
}
#[tokio::test]
async fn test_init_and_insert_into_nullifier_queue_custom() {
for changelog_size in [1, 1000, 2000] {
for roots_size in [1, 1000, 2000] {
if roots_size < changelog_size {
continue;
}
for queue_capacity in [5003, 6857, 7901] {
test_init_and_insert_into_nullifier_queue(
&StateMerkleTreeConfig {
height: STATE_MERKLE_TREE_HEIGHT as u32,
changelog_size,
roots_size,
canopy_depth: STATE_MERKLE_TREE_CANOPY_DEPTH,
network_fee: Some(5000),
rollover_threshold: Some(95),
close_threshold: None,
},
&NullifierQueueConfig {
capacity: queue_capacity,
sequence_threshold: roots_size + SAFETY_MARGIN,
network_fee: None,
},
)
.await;
}
}
}
}
/// Tests:
/// (Since nullifier queue and address queue use the same code, we only need to test one)
/// Show that we cannot insert into a full queue.
/// 1. try to insert into queue to generate the full error
/// 2. nullify one
/// 3. try to insert again it should still generate the full error
/// 4. advance Merkle tree seq until one before it would work check that it still fails
/// 5. advance Merkle tree seq by one and check that inserting works now
/// 6.try inserting again it should fail with full error
async fn test_full_nullifier_queue(
merkle_tree_config: &StateMerkleTreeConfig,
queue_config: &NullifierQueueConfig,
) {
let mut program_test = ProgramTest::default();
program_test.add_program("account_compression", ID, None);
program_test.add_program(
"spl_noop",
Pubkey::new_from_array(account_compression::utils::constants::NOOP_PUBKEY),
None,
);
let merkle_tree_keypair = Keypair::new();
let merkle_tree_pubkey = merkle_tree_keypair.pubkey();
let nullifier_queue_keypair = Keypair::new();
let nullifier_queue_pubkey = nullifier_queue_keypair.pubkey();
program_test.set_compute_max_units(1_400_000u64);
let context = program_test.start_with_context().await;
let mut rpc = ProgramTestRpcConnection { context };
let payer_pubkey = rpc.get_payer().pubkey();
functional_1_initialize_state_merkle_tree_and_nullifier_queue(
&mut rpc,
&payer_pubkey,
&merkle_tree_keypair,
&nullifier_queue_keypair,
merkle_tree_config,
queue_config,
)
.await;
let leaf: [u8; 32] = bigint_to_be_bytes_array(&1.to_biguint().unwrap()).unwrap();
// append a leaf so that we have a leaf to nullify
let mut reference_merkle_tree_1 = MerkleTree::<Poseidon>::new(
STATE_MERKLE_TREE_HEIGHT as usize,
STATE_MERKLE_TREE_CANOPY_DEPTH as usize,
);
functional_3_append_leaves_to_merkle_tree(
&mut rpc,
&mut [&mut reference_merkle_tree_1],
&vec![merkle_tree_pubkey],
&vec![(0u8, leaf)],
)
.await;
let lamports_queue_accounts = rpc
.get_account(nullifier_queue_pubkey)
.await
.unwrap()
.unwrap()
.lamports
+ rpc
.get_account(merkle_tree_pubkey)
.await
.unwrap()
.unwrap()
.lamports
* 2;
// fills queue with increasing values starting from 0
// -> in this process inserts leaf with value 1 into queue
// all elements are marked with sequence number 2400
set_nullifier_queue_to_full(
&mut rpc,
&nullifier_queue_pubkey,
0,
lamports_queue_accounts,
)
.await;
let initial_value = 309005;
let element: [u8; 32] = bigint_to_be_bytes_array(&initial_value.to_biguint().unwrap()).unwrap();
// CHECK 1
fail_insert_into_full_queue(
&mut rpc,
&nullifier_queue_pubkey,
&merkle_tree_pubkey,
vec![element],
)
.await;
let mut reference_merkle_tree = MerkleTree::<Poseidon>::new(26, 10);
reference_merkle_tree.append(&leaf).unwrap();
let merkle_tree = get_concurrent_merkle_tree::<
StateMerkleTreeAccount,
ProgramTestRpcConnection,
Poseidon,
26,
>(&mut rpc, merkle_tree_pubkey)
.await;
assert_eq!(merkle_tree.root(), reference_merkle_tree.root());
let leaf_index = reference_merkle_tree.get_leaf_index(&leaf).unwrap() as u64;
let element_index = unsafe {
get_hash_set::<QueueAccount, ProgramTestRpcConnection>(&mut rpc, nullifier_queue_pubkey)
.await
.find_element_index(&BigUint::from_bytes_be(&leaf), None)
.unwrap()
};
// CHECK 2
nullify(
&mut rpc,
&merkle_tree_pubkey,
&nullifier_queue_pubkey,
queue_config,
&mut reference_merkle_tree,
&leaf,
merkle_tree.changelog_index() as u64,
element_index.unwrap() as u16,
leaf_index,
)
.await
.unwrap();
// CHECK 3
fail_insert_into_full_queue(
&mut rpc,
&nullifier_queue_pubkey,
&merkle_tree_pubkey,
vec![element],
)
.await;
// Advance to sequence threshold + 1 (expected sequence number of the last
// element - 1).
set_state_merkle_tree_sequence(
&mut rpc,
&merkle_tree_pubkey,
queue_config.sequence_threshold + 1,
lamports_queue_accounts,
)
.await;
// CHECK 4
fail_insert_into_full_queue(
&mut rpc,
&nullifier_queue_pubkey,
&merkle_tree_pubkey,
vec![element],
)
.await;
// TODO: add e2e test in compressed pda program for this
// Advance to sequence threshold + 2 (expected sequence number of the last
// element).
set_state_merkle_tree_sequence(
&mut rpc,
&merkle_tree_pubkey,
queue_config.sequence_threshold + 2,
lamports_queue_accounts,
)
.await;
let payer = rpc.get_payer().insecure_clone();
let account = rpc
.get_account(nullifier_queue_pubkey)
.await
.unwrap()
.unwrap();
let mut data = account.data.clone();
let nullifier_queue = &mut unsafe { queue_from_bytes_zero_copy_mut(&mut data).unwrap() };
let replacement_start_value = 606;
let replacement_value = find_overlapping_probe_index(
1,
replacement_start_value,
nullifier_queue.hash_set.get_capacity(),
);
// CHECK: 5
let element: [u8; 32] =
bigint_to_be_bytes_array(&replacement_value.to_biguint().unwrap()).unwrap();
insert_into_single_nullifier_queue(
&[element],
&payer,
&payer,
&nullifier_queue_pubkey,
&merkle_tree_pubkey,
&mut rpc,
)
.await
.unwrap();
// CHECK: 6
let element: [u8; 32] = bigint_to_be_bytes_array(&30000.to_biguint().unwrap()).unwrap();
fail_insert_into_full_queue(
&mut rpc,
&nullifier_queue_pubkey,
&merkle_tree_pubkey,
vec![element],
)
.await;
}
#[tokio::test]
async fn test_full_nullifier_queue_default() {
test_full_nullifier_queue(
&StateMerkleTreeConfig::default(),
&NullifierQueueConfig::default(),
)
.await
}
/// Insert nullifiers failing tests
/// Test:
/// 1. no nullifiers
/// 2. mismatch remaining accounts and addresses
/// 3. invalid queue accounts:
/// 3.1 pass non queue account as queue account
/// 3.2 pass address queue account
/// 3.3 pass non associated queue account
/// 4. invalid Merkle tree accounts:
/// 4.1 pass non Merkle tree account as Merkle tree account
/// 4.2 pass non associated Merkle tree account
async fn failing_queue(
merkle_tree_config: &StateMerkleTreeConfig,
queue_config: &NullifierQueueConfig,
) {
let mut program_test = ProgramTest::default();
program_test.add_program("account_compression", ID, None);
program_test.add_program(
"spl_noop",
Pubkey::new_from_array(account_compression::utils::constants::NOOP_PUBKEY),
None,
);
let merkle_tree_keypair = Keypair::new();
let merkle_tree_pubkey = merkle_tree_keypair.pubkey();
let nullifier_queue_keypair = Keypair::new();
let nullifier_queue_pubkey = nullifier_queue_keypair.pubkey();
program_test.set_compute_max_units(1_400_000u64);
let context = program_test.start_with_context().await;
let mut rpc = ProgramTestRpcConnection { context };
let payer = rpc.get_payer().insecure_clone();
let payer_pubkey = rpc.get_payer().pubkey();
functional_1_initialize_state_merkle_tree_and_nullifier_queue(
&mut rpc,
&payer_pubkey,
&merkle_tree_keypair,
&nullifier_queue_keypair,
merkle_tree_config,
queue_config,
)
.await;
let merkle_tree_keypair_2 = Keypair::new();
let nullifier_queue_keypair_2 = Keypair::new();
functional_1_initialize_state_merkle_tree_and_nullifier_queue(
&mut rpc,
&payer_pubkey,
&merkle_tree_keypair_2,
&nullifier_queue_keypair_2,
merkle_tree_config,
queue_config,
)
.await;
let address_merkle_tree_keypair = Keypair::new();
let address_queue_keypair = Keypair::new();
create_address_merkle_tree_and_queue_account_with_assert(
&payer,
false,
&mut rpc,
&address_merkle_tree_keypair,
&address_queue_keypair,
None,
None,
&AddressMerkleTreeConfig::default(),
&AddressQueueConfig::default(),
1,
)
.await
.unwrap();
let queue_tree_pair = (nullifier_queue_pubkey, merkle_tree_pubkey);
// CHECK 1: no nullifiers as input
let result =
insert_into_nullifier_queues(&[], &payer, &payer, &[queue_tree_pair], &mut rpc).await;
assert_rpc_error(
result,
0,
AccountCompressionErrorCode::InputElementsEmpty.into(),
)
.unwrap();
let nullifier_1 = [1u8; 32];
// CHECK 2: Number of leaves/addresses leaves mismatch
let result = insert_into_nullifier_queues(
&[nullifier_1],
&payer,
&payer,
&[queue_tree_pair, queue_tree_pair],
&mut rpc,
)
.await;
assert_rpc_error(
result,
0,
AccountCompressionErrorCode::NumberOfLeavesMismatch.into(),
)
.unwrap();
// CHECK 3.1: pass non queue account as queue account
let result = insert_into_nullifier_queues(
&[nullifier_1],
&payer,
&payer,
&[(merkle_tree_pubkey, merkle_tree_pubkey)],
&mut rpc,
)
.await;
assert_rpc_error(result, 0, ErrorCode::AccountDiscriminatorMismatch.into()).unwrap();
// CHECK 3.2: pass address queue account instead of nullifier queue account
let result = insert_into_nullifier_queues(
&[nullifier_1],
&payer,
&payer,
&[(address_queue_keypair.pubkey(), merkle_tree_pubkey)],
&mut rpc,
)
.await;
assert_rpc_error(
result,
0,
AccountCompressionErrorCode::InvalidQueueType.into(),
)
.unwrap();
let nullifier_2 = [2u8; 32];
// CHECK 3.3: pass non associated queue account
let result = insert_into_nullifier_queues(
&[nullifier_2],
&payer,
&payer,
&[(nullifier_queue_keypair_2.pubkey(), merkle_tree_pubkey)],
&mut rpc,
)
.await;
assert_rpc_error(
result,
0,
AccountCompressionErrorCode::MerkleTreeAndQueueNotAssociated.into(),
)
.unwrap();
// CHECK 4.1: pass non Merkle tree account
// Triggering a discriminator mismatch error is not possibly
// by passing an invalid Merkle tree account.
// A non Merkle tree account cannot be associated with a queue account.
// Hence the instruction fails with MerkleTreeAndQueueNotAssociated.
// The Merkle tree account will not be deserialized.
let result = insert_into_nullifier_queues(
&[nullifier_1],
&payer,
&payer,
&[(
nullifier_queue_keypair.pubkey(),
nullifier_queue_keypair.pubkey(),
)],
&mut rpc,
)
.await;
assert_rpc_error(
result,
0,
AccountCompressionErrorCode::MerkleTreeAndQueueNotAssociated.into(),
)
.unwrap();
// CHECK 4.2: pass non associated Merkle tree account
let result = insert_into_nullifier_queues(
&[nullifier_1],
&payer,
&payer,
&[(
nullifier_queue_keypair.pubkey(),
merkle_tree_keypair_2.pubkey(),
)],
&mut rpc,
)
.await;
assert_rpc_error(
result,
0,
AccountCompressionErrorCode::MerkleTreeAndQueueNotAssociated.into(),
)
.unwrap();
}
#[tokio::test]
async fn test_failing_queue_default() {
failing_queue(
&StateMerkleTreeConfig::default(),
&NullifierQueueConfig::default(),
)
.await
}
/// Tests:
/// 1. Should fail: not ready for rollover
/// 2. Should fail: merkle tree and queue not associated (invalid tree)
/// 3. Should fail: merkle tree and queue not associated (invalid queue)
/// 4. Should succeed: rollover state merkle tree
/// 5. Should fail: merkle tree already rolled over
async fn test_init_and_rollover_state_merkle_tree(
merkle_tree_config: &StateMerkleTreeConfig,
queue_config: &NullifierQueueConfig,
) {
let mut program_test = ProgramTest::default();
program_test.add_program("account_compression", ID, None);
program_test.add_program(
"spl_noop",
Pubkey::new_from_array(account_compression::utils::constants::NOOP_PUBKEY),
None,
);
let merkle_tree_keypair = Keypair::new();
let merkle_tree_pubkey = merkle_tree_keypair.pubkey();
let nullifier_queue_keypair = Keypair::new();
let nullifier_queue_pubkey = nullifier_queue_keypair.pubkey();
program_test.set_compute_max_units(1_400_000u64);
let context = program_test.start_with_context().await;
let mut context = ProgramTestRpcConnection { context };
let payer_pubkey = context.get_payer().pubkey();
functional_1_initialize_state_merkle_tree_and_nullifier_queue(
&mut context,
&payer_pubkey,
&merkle_tree_keypair,
&nullifier_queue_keypair,
merkle_tree_config,
queue_config,
)
.await;
let merkle_tree_keypair_2 = Keypair::new();
let merkle_tree_pubkey_2 = merkle_tree_keypair_2.pubkey();
let nullifier_queue_keypair_2 = Keypair::new();
functional_1_initialize_state_merkle_tree_and_nullifier_queue(
&mut context,
&payer_pubkey,
&merkle_tree_keypair_2,
&nullifier_queue_keypair_2,
merkle_tree_config,
queue_config,
)
.await;
let required_next_index = 2u64.pow(26) * merkle_tree_config.rollover_threshold.unwrap() / 100;
let failing_next_index = required_next_index - 1;
let lamports_queue_accounts = context
.get_account(nullifier_queue_pubkey)
.await
.unwrap()
.unwrap()
.lamports
+ context
.get_account(merkle_tree_pubkey)
.await
.unwrap()
.unwrap()
.lamports
* 2;
set_state_merkle_tree_next_index(
&mut context,
&merkle_tree_pubkey,
failing_next_index,
lamports_queue_accounts,
)
.await;
let new_nullifier_queue_keypair = Keypair::new();
let new_state_merkle_tree_keypair = Keypair::new();
let result = perform_state_merkle_tree_roll_over(
&mut context,
&new_nullifier_queue_keypair,
&new_state_merkle_tree_keypair,
&merkle_tree_pubkey,
&nullifier_queue_pubkey,
merkle_tree_config,
queue_config,
None,
)
.await;
assert_rpc_error(
result,
2,
AccountCompressionErrorCode::NotReadyForRollover.into(),
)
.unwrap();
let result = perform_state_merkle_tree_roll_over(
&mut context,
&new_nullifier_queue_keypair,
&new_state_merkle_tree_keypair,
&merkle_tree_pubkey,
&nullifier_queue_pubkey,
merkle_tree_config,
queue_config,
Some(StateMerkleTreeRolloverMode::QueueInvalidSize),
)
.await;
assert_rpc_error(
result,
2,
AccountCompressionErrorCode::InvalidAccountSize.into(),
)
.unwrap();
let result = perform_state_merkle_tree_roll_over(
&mut context,
&new_nullifier_queue_keypair,
&new_state_merkle_tree_keypair,
&merkle_tree_pubkey,
&nullifier_queue_pubkey,
merkle_tree_config,
queue_config,
Some(StateMerkleTreeRolloverMode::TreeInvalidSize),
)
.await;
assert_rpc_error(
result,
2,
AccountCompressionErrorCode::InvalidAccountSize.into(),
)
.unwrap();
set_state_merkle_tree_next_index(
&mut context,
&merkle_tree_pubkey,
required_next_index,
lamports_queue_accounts,
)
.await;
let result = perform_state_merkle_tree_roll_over(
&mut context,
&new_nullifier_queue_keypair,
&new_state_merkle_tree_keypair,
&merkle_tree_pubkey,
&nullifier_queue_keypair_2.pubkey(),
merkle_tree_config,
queue_config,
None,
)
.await;
assert_rpc_error(
result,
2,
AccountCompressionErrorCode::MerkleTreeAndQueueNotAssociated.into(),
)
.unwrap();
let result = perform_state_merkle_tree_roll_over(
&mut context,
&new_nullifier_queue_keypair,
&new_state_merkle_tree_keypair,
&merkle_tree_pubkey_2,
&nullifier_queue_keypair.pubkey(),
merkle_tree_config,
queue_config,
None,
)
.await;
assert_rpc_error(
result,
2,
AccountCompressionErrorCode::MerkleTreeAndQueueNotAssociated.into(),
)
.unwrap();
let signer_prior_balance = context
.get_account(payer_pubkey)
.await
.unwrap()
.unwrap()
.lamports;
let rollover_signature_and_slot = perform_state_merkle_tree_roll_over(
&mut context,
&new_nullifier_queue_keypair,
&new_state_merkle_tree_keypair,
&merkle_tree_pubkey,
&nullifier_queue_pubkey,
merkle_tree_config,
queue_config,
None,
)
.await
.unwrap();
let payer: Keypair = context.get_payer().insecure_clone();
assert_rolled_over_pair(
&payer.pubkey(),
&mut context,
&signer_prior_balance,
&merkle_tree_pubkey,
&nullifier_queue_pubkey,
&new_state_merkle_tree_keypair.pubkey(),
&new_nullifier_queue_keypair.pubkey(),
rollover_signature_and_slot.1,
0,
3,
)
.await;
let failing_new_nullifier_queue_keypair = Keypair::new();
let failing_new_state_merkle_tree_keypair = Keypair::new();
let result = perform_state_merkle_tree_roll_over(
&mut context,
&failing_new_nullifier_queue_keypair,
&failing_new_state_merkle_tree_keypair,
&merkle_tree_pubkey,
&nullifier_queue_pubkey,
merkle_tree_config,
queue_config,
None,
)
.await;
assert_rpc_error(
result,
2,
AccountCompressionErrorCode::MerkleTreeAlreadyRolledOver.into(),
)
.unwrap();
}
#[tokio::test]
async fn test_init_and_rollover_state_merkle_tree_default() {
test_init_and_rollover_state_merkle_tree(
&StateMerkleTreeConfig::default(),
&NullifierQueueConfig::default(),
)
.await
}
#[tokio::test]
async fn test_init_and_rollover_state_merkle_tree_custom() {
for changelog_size in [1, 1000, 2000] {
for roots_size in [1, 1000, 2000] {
if roots_size < changelog_size {
continue;
}
for queue_capacity in [5003, 6857, 7901] {
test_init_and_rollover_state_merkle_tree(
&StateMerkleTreeConfig {
height: STATE_MERKLE_TREE_HEIGHT as u32,
changelog_size,
roots_size,
canopy_depth: STATE_MERKLE_TREE_CANOPY_DEPTH,
network_fee: Some(5000),
rollover_threshold: Some(95),
close_threshold: None,
},
&NullifierQueueConfig {
capacity: queue_capacity,
sequence_threshold: roots_size + SAFETY_MARGIN,
network_fee: None,
},
)
.await;
}
}
}
}
/// Tests:
/// 1. Functional: Initialize merkle tree
/// 2. Failing: mismatching leaf and merkle tree accounts number
/// 3. Failing: pass invalid Merkle tree account
/// 4. Functional: Append leaves to merkle tree
/// 5. Functional: Append leaves to multiple merkle trees not-ordered
/// 6. Failing: Append leaves with invalid authority
async fn test_append_functional_and_failing(
merkle_tree_config: &StateMerkleTreeConfig,
queue_config: &NullifierQueueConfig,
) {
let mut program_test = ProgramTest::default();
program_test.add_program("account_compression", ID, None);
program_test.add_program(
"spl_noop",
Pubkey::new_from_array(account_compression::utils::constants::NOOP_PUBKEY),
None,
);
program_test.set_compute_max_units(1_400_000u64);
let context = program_test.start_with_context().await;
let mut context = ProgramTestRpcConnection { context };
let payer_pubkey = context.get_payer().pubkey();
let merkle_tree_keypair = Keypair::new();
let queue_keypair = Keypair::new();
// CHECK 1
let merkle_tree_pubkey = functional_1_initialize_state_merkle_tree_and_nullifier_queue(
&mut context,
&payer_pubkey,
&merkle_tree_keypair,
&queue_keypair,
merkle_tree_config,
queue_config,
)
.await;
let merkle_tree_keypair_2 = Keypair::new();
let queue_keypair_2 = Keypair::new();
let merkle_tree_pubkey_2 = functional_1_initialize_state_merkle_tree_and_nullifier_queue(
&mut context,
&payer_pubkey,
&merkle_tree_keypair_2,
&queue_keypair_2,
merkle_tree_config,
queue_config,
)
.await;
// CHECK: 2 fail append with invalid inputs (mismatching leaf and merkle tree accounts)
fail_2_append_leaves_with_invalid_inputs(
&mut context,
&[merkle_tree_pubkey],
vec![(0, [1u8; 32]), (1, [2u8; 32])],
AccountCompressionErrorCode::NotAllLeavesProcessed.into(),
)
.await
.unwrap();
// CHECK: 3 fail append with invalid inputs (pass invalid Merkle tree account)
fail_2_append_leaves_with_invalid_inputs(
&mut context,
&[queue_keypair.pubkey()],
vec![(0, [1u8; 32])],
ErrorCode::AccountDiscriminatorMismatch.into(),
)
.await
.unwrap();
// CHECK: 4 append leaves to merkle tree
let leaves = (0u8..=139)
.map(|i| {
(
0,
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, i,
],
)
})
.collect::<Vec<(u8, [u8; 32])>>();
let mut reference_merkle_tree_1 = MerkleTree::<Poseidon>::new(
STATE_MERKLE_TREE_HEIGHT as usize,
STATE_MERKLE_TREE_CANOPY_DEPTH as usize,
);
functional_3_append_leaves_to_merkle_tree(
&mut context,
&mut [&mut reference_merkle_tree_1],
&vec![merkle_tree_pubkey],
&leaves,
)
.await;
let leaves = vec![
(0, [1u8; 32]),
(1, [2u8; 32]),
(2, [3u8; 32]),
(3, [4u8; 32]),
];
let mut reference_merkle_tree_2 = MerkleTree::<Poseidon>::new(
STATE_MERKLE_TREE_HEIGHT as usize,
STATE_MERKLE_TREE_CANOPY_DEPTH as usize,
);
// CHECK: 5 append leaves to multiple merkle trees not-ordered
functional_3_append_leaves_to_merkle_tree(
&mut context,
&mut [&mut reference_merkle_tree_1, &mut reference_merkle_tree_2],
&vec![
merkle_tree_pubkey,
merkle_tree_pubkey_2,
merkle_tree_pubkey,
merkle_tree_pubkey_2,
],
&leaves,
)
.await;
// CHECK 6: fail append with invalid authority
fail_4_append_leaves_with_invalid_authority(&mut context, &merkle_tree_pubkey).await;
}
#[tokio::test]
async fn test_append_functional_and_failing_default() {
test_append_functional_and_failing(
&StateMerkleTreeConfig::default(),
&NullifierQueueConfig::default(),
)
.await
}
/// Tests:
/// 1. Functional: nullify leaf
/// 2. Failing: nullify leaf with invalid leaf index
/// 3. Failing: nullify leaf with invalid leaf queue index
/// 4. Failing: nullify leaf with invalid change log index
/// 5. Functional: nullify other leaf
/// 6. Failing: nullify leaf with nullifier queue that is not associated with the merkle tree
async fn test_nullify_leaves(
merkle_tree_config: &StateMerkleTreeConfig,
queue_config: &NullifierQueueConfig,
) {
let mut program_test = ProgramTest::default();
program_test.add_program("account_compression", ID, None);
program_test.add_program(
"spl_noop",
Pubkey::new_from_array(account_compression::utils::constants::NOOP_PUBKEY),
None,
);
let merkle_tree_keypair = Keypair::new();
let merkle_tree_pubkey = merkle_tree_keypair.pubkey();
let nullifier_queue_keypair = Keypair::new();
let nullifier_queue_pubkey = nullifier_queue_keypair.pubkey();
program_test.set_compute_max_units(1_400_000u64);
let context = program_test.start_with_context().await;
let mut context = ProgramTestRpcConnection { context };
let payer = context.get_payer().insecure_clone();
let payer_pubkey = context.get_payer().pubkey();
functional_1_initialize_state_merkle_tree_and_nullifier_queue(
&mut context,
&payer_pubkey,
&merkle_tree_keypair,
&nullifier_queue_keypair,
merkle_tree_config,
queue_config,
)
.await;
let other_merkle_tree_keypair = Keypair::new();
let invalid_nullifier_queue_keypair = Keypair::new();
let invalid_nullifier_queue_pubkey = invalid_nullifier_queue_keypair.pubkey();
functional_1_initialize_state_merkle_tree_and_nullifier_queue(
&mut context,
&payer_pubkey,
&other_merkle_tree_keypair,
&invalid_nullifier_queue_keypair,
merkle_tree_config,
queue_config,
)
.await;
let elements = vec![(0, [1u8; 32]), (0, [2u8; 32])];
let mut reference_merkle_tree = MerkleTree::<Poseidon>::new(
merkle_tree_config.height as usize,
merkle_tree_config.canopy_depth as usize,
);
functional_3_append_leaves_to_merkle_tree(
&mut context,
&mut [&mut reference_merkle_tree],
&vec![merkle_tree_pubkey],
&elements,
)
.await;
insert_into_single_nullifier_queue(
&elements
.iter()
.map(|element| element.1)
.collect::<Vec<[u8; 32]>>(),
&payer,
&payer,
&nullifier_queue_pubkey,
&merkle_tree_pubkey,
&mut context,
)
.await
.unwrap();
let mut reference_merkle_tree = MerkleTree::<Poseidon>::new(
merkle_tree_config.height as usize,
merkle_tree_config.canopy_depth as usize,
);
reference_merkle_tree.append(&elements[0].1).unwrap();
reference_merkle_tree.append(&elements[1].1).unwrap();
let leaf_queue_index = {
let account = context
.get_account(nullifier_queue_pubkey)
.await
.unwrap()
.unwrap();
let mut data = account.data.clone();
let nullifier_queue = &mut unsafe { queue_from_bytes_copy(&mut data).unwrap() };
let (_, index) = nullifier_queue
.find_element(&BigUint::from_bytes_be(&elements[0].1), None)
.unwrap()
.unwrap();
index
};
let element_index = reference_merkle_tree
.get_leaf_index(&elements[0].1)
.unwrap() as u64;
let element_one_index = reference_merkle_tree
.get_leaf_index(&elements[1].1)
.unwrap() as u64;
nullify(
&mut context,
&merkle_tree_pubkey,
&nullifier_queue_pubkey,
queue_config,
&mut reference_merkle_tree,
&elements[0].1,
2,
leaf_queue_index as u16,
element_index,
)
.await
.unwrap();
// 2. nullify with invalid leaf index
let invalid_element_index = 0;
let valid_changelog_index = 3;
let valid_leaf_queue_index = {
let account = context
.get_account(nullifier_queue_pubkey)
.await
.unwrap()
.unwrap();
let mut data = account.data.clone();
let nullifier_queue = &mut unsafe { queue_from_bytes_copy(&mut data).unwrap() };
let (_, index) = nullifier_queue
.find_element(&BigUint::from_bytes_be(&elements[1].1), None)
.unwrap()
.unwrap();
index as u16
};
let result = nullify(
&mut context,
&merkle_tree_pubkey,
&nullifier_queue_pubkey,
queue_config,
&mut reference_merkle_tree,
&elements[1].1,
valid_changelog_index,
valid_leaf_queue_index,
invalid_element_index,
)
.await;
assert_rpc_error(
result,
0,
ConcurrentMerkleTreeError::InvalidProof([0; 32], [0; 32]).into(),
)
.unwrap();
// 3. nullify with invalid leaf queue index
let valid_element_index = 1;
let invalid_leaf_queue_index = 0;
let result = nullify(
&mut context,
&merkle_tree_pubkey,
&nullifier_queue_pubkey,
queue_config,
&mut reference_merkle_tree,
&elements[1].1,
valid_changelog_index,
invalid_leaf_queue_index,
valid_element_index,
)
.await;
assert_rpc_error(result, 0, AccountCompressionErrorCode::LeafNotFound.into()).unwrap();
// 4. nullify with invalid change log index
let invalid_changelog_index = 0;
let result = nullify(
&mut context,
&merkle_tree_pubkey,
&nullifier_queue_pubkey,
queue_config,
&mut reference_merkle_tree,
&elements[1].1,
invalid_changelog_index,
valid_leaf_queue_index,
element_one_index,
)
.await;
// returns LeafNotFound why?
assert_rpc_error(
result,
0,
ConcurrentMerkleTreeError::CannotUpdateLeaf.into(),
)
.unwrap();
// 5. nullify other leaf
nullify(
&mut context,
&merkle_tree_pubkey,
&nullifier_queue_pubkey,
queue_config,
&mut reference_merkle_tree,
&elements[1].1,
valid_changelog_index,
valid_leaf_queue_index,
valid_element_index,
)
.await
.unwrap();
// 6. nullify leaf with nullifier queue that is not associated with the
// merkle tree
let result = nullify(
&mut context,
&merkle_tree_pubkey,
&invalid_nullifier_queue_pubkey,
queue_config,
&mut reference_merkle_tree,
&elements[0].1,
2,
valid_leaf_queue_index,
element_index,
)
.await;
assert_rpc_error(
result,
0,
AccountCompressionErrorCode::MerkleTreeAndQueueNotAssociated.into(),
)
.unwrap();
}
#[tokio::test]
async fn test_nullify_leaves_default() {
test_nullify_leaves(
&StateMerkleTreeConfig::default(),
&NullifierQueueConfig::default(),
)
.await
}
async fn functional_2_test_insert_into_nullifier_queues<R: RpcConnection>(
rpc: &mut R,
nullifier_queue_pubkey: &Pubkey,
merkle_tree_pubkey: &Pubkey,
) {
let payer = rpc.get_payer().insecure_clone();
let elements = vec![[1_u8; 32], [2_u8; 32]];
insert_into_single_nullifier_queue(
&elements,
&payer,
&payer,
nullifier_queue_pubkey,
merkle_tree_pubkey,
rpc,
)
.await
.unwrap();
let array = unsafe { get_hash_set::<QueueAccount, R>(rpc, *nullifier_queue_pubkey).await };
let element_0 = BigUint::from_bytes_be(&elements[0]);
let (array_element_0, _) = array.find_element(&element_0, None).unwrap().unwrap();
assert_eq!(array_element_0.value_bytes(), [1u8; 32]);
assert_eq!(array_element_0.sequence_number(), None);
let element_1 = BigUint::from_bytes_be(&elements[1]);
let (array_element_1, _) = array.find_element(&element_1, None).unwrap().unwrap();
assert_eq!(array_element_1.value_bytes(), [2u8; 32]);
assert_eq!(array_element_1.sequence_number(), None);
}
async fn fail_3_insert_same_elements_into_nullifier_queue<R: RpcConnection>(
context: &mut R,
nullifier_queue_pubkey: &Pubkey,
merkle_tree_pubkey: &Pubkey,
elements: Vec<[u8; 32]>,
) {
let payer = context.get_payer().insecure_clone();
let result = insert_into_single_nullifier_queue(
&elements,
&payer,
&payer,
nullifier_queue_pubkey,
merkle_tree_pubkey,
context,
)
.await;
assert_rpc_error(
result,
0,
HashSetError::ElementAlreadyExists.into(), // Invalid proof
)
.unwrap();
}
async fn fail_4_insert_with_invalid_signer<R: RpcConnection>(
rpc: &mut R,
nullifier_queue_pubkey: &Pubkey,
merkle_tree_pubkey: &Pubkey,
elements: Vec<[u8; 32]>,
) {
let invalid_signer = Keypair::new();
airdrop_lamports(rpc, &invalid_signer.pubkey(), 1_000_000_000)
.await
.unwrap();
let result = insert_into_single_nullifier_queue(
&elements,
&invalid_signer,
&invalid_signer,
nullifier_queue_pubkey,
merkle_tree_pubkey,
rpc,
)
.await;
assert_rpc_error(
result,
0,
AccountCompressionErrorCode::InvalidAuthority.into(),
)
.unwrap();
}
async fn functional_5_test_insert_into_nullifier_queue<R: RpcConnection>(
rpc: &mut R,
nullifier_queue_pubkey: &Pubkey,
merkle_tree_pubkey: &Pubkey,
) {
let payer = rpc.get_payer().insecure_clone();
let element = 3_u32.to_biguint().unwrap();
let elements = vec![bigint_to_be_bytes_array(&element).unwrap()];
insert_into_single_nullifier_queue(
&elements,
&payer,
&payer,
nullifier_queue_pubkey,
merkle_tree_pubkey,
rpc,
)
.await
.unwrap();
let array = unsafe { get_hash_set::<QueueAccount, R>(rpc, *nullifier_queue_pubkey).await };
let (array_element, _) = array.find_element(&element, None).unwrap().unwrap();
assert_eq!(array_element.value_biguint(), element);
assert_eq!(array_element.sequence_number(), None);
}
async fn insert_into_single_nullifier_queue<R: RpcConnection>(
elements: &[[u8; 32]],
fee_payer: &Keypair,
payer: &Keypair,
nullifier_queue_pubkey: &Pubkey,
merkle_tree_pubkey: &Pubkey,
context: &mut R,
) -> Result<Signature, RpcError> {
let instruction_data = account_compression::instruction::InsertIntoNullifierQueues {
nullifiers: elements.to_vec(),
};
let accounts = account_compression::accounts::InsertIntoQueues {
fee_payer: fee_payer.pubkey(),
authority: payer.pubkey(),
registered_program_pda: None,
system_program: system_program::ID,
};
let mut remaining_accounts = Vec::with_capacity(elements.len() * 2);
remaining_accounts.extend(
vec![
vec![
AccountMeta::new(*nullifier_queue_pubkey, false),
AccountMeta::new(*merkle_tree_pubkey, false)
];
elements.len()
]
.iter()
.flat_map(|x| x.to_vec())
.collect::<Vec<AccountMeta>>(),
);
let instruction = Instruction {
program_id: ID,
accounts: [accounts.to_account_metas(Some(true)), remaining_accounts].concat(),
data: instruction_data.data(),
};
let latest_blockhash = context.get_latest_blockhash().await.unwrap();
let transaction = Transaction::new_signed_with_payer(
&[instruction],
Some(&fee_payer.pubkey()),
&vec![fee_payer, payer],
latest_blockhash,
);
context.process_transaction(transaction.clone()).await
}
async fn insert_into_nullifier_queues<R: RpcConnection>(
elements: &[[u8; 32]],
fee_payer: &Keypair,
payer: &Keypair,
pubkeys: &[(Pubkey, Pubkey)],
context: &mut R,
) -> Result<Signature, RpcError> {
let instruction_data = account_compression::instruction::InsertIntoNullifierQueues {
nullifiers: elements.to_vec(),
};
let accounts = account_compression::accounts::InsertIntoQueues {
fee_payer: fee_payer.pubkey(),
authority: payer.pubkey(),
registered_program_pda: None,
system_program: system_program::ID,
};
let mut remaining_accounts = Vec::with_capacity(elements.len() * 2);
for (nullifier_queue_pubkey, merkle_tree_pubkey) in pubkeys.iter() {
remaining_accounts.push(AccountMeta::new(*nullifier_queue_pubkey, false));
remaining_accounts.push(AccountMeta::new(*merkle_tree_pubkey, false));
}
let instruction = Instruction {
program_id: ID,
accounts: [accounts.to_account_metas(Some(true)), remaining_accounts].concat(),
data: instruction_data.data(),
};
let latest_blockhash = context.get_latest_blockhash().await.unwrap();
let transaction = Transaction::new_signed_with_payer(
&[instruction],
Some(&fee_payer.pubkey()),
&vec![fee_payer, payer],
latest_blockhash,
);
context.process_transaction(transaction.clone()).await
}
#[allow(clippy::too_many_arguments)]
async fn initialize_state_merkle_tree_and_nullifier_queue<R: RpcConnection>(
rpc: &mut R,
payer_pubkey: &Pubkey,
merkle_tree_keypair: &Keypair,
queue_keypair: &Keypair,
merkle_tree_config: &StateMerkleTreeConfig,
queue_config: &NullifierQueueConfig,
merkle_tree_size: usize,
queue_size: usize,
forester: Option<Pubkey>,
) -> Result<Signature, RpcError> {
let merkle_tree_account_create_ix = create_account_instruction(
&rpc.get_payer().pubkey(),
merkle_tree_size,
rpc.get_minimum_balance_for_rent_exemption(merkle_tree_size)
.await
.unwrap(),
&ID,
Some(merkle_tree_keypair),
);
let nullifier_queue_account_create_ix = create_account_instruction(
payer_pubkey,
queue_size,
rpc.get_minimum_balance_for_rent_exemption(queue_size)
.await
.unwrap(),
&ID,
Some(queue_keypair),
);
let merkle_tree_pubkey = merkle_tree_keypair.pubkey();
let instruction = create_initialize_merkle_tree_instruction(
rpc.get_payer().pubkey(),
None,
merkle_tree_pubkey,
queue_keypair.pubkey(),
merkle_tree_config.clone(),
queue_config.clone(),
None,
forester,
1,
);
let latest_blockhash = rpc.get_latest_blockhash().await.unwrap();
let transaction = Transaction::new_signed_with_payer(
&[
merkle_tree_account_create_ix,
nullifier_queue_account_create_ix,
instruction,
],
Some(&rpc.get_payer().pubkey()),
&vec![&rpc.get_payer(), &merkle_tree_keypair, queue_keypair],
latest_blockhash,
);
rpc.process_transaction(transaction.clone()).await
}
pub async fn fail_initialize_state_merkle_tree_and_nullifier_queue_invalid_sizes<
R: RpcConnection,
>(
rpc: &mut R,
payer_pubkey: &Pubkey,
merkle_tree_keypair: &Keypair,
queue_keypair: &Keypair,
merkle_tree_config: &StateMerkleTreeConfig,
queue_config: &NullifierQueueConfig,
) {
let valid_tree_size = StateMerkleTreeAccount::size(
merkle_tree_config.height as usize,
merkle_tree_config.changelog_size as usize,
merkle_tree_config.roots_size as usize,
merkle_tree_config.canopy_depth as usize,
);
let valid_queue_size = QueueAccount::size(queue_config.capacity as usize).unwrap();
// NOTE: Starting from 0 to the account struct size triggers a panic in Anchor
// macros (sadly, not assertable...), which happens earlier than our
// serialization error.
// Our recoverable error is thrown for ranges from the struct size
// (+ discriminator) up to the expected account size.
for invalid_tree_size in
(8 + mem::size_of::<StateMerkleTreeAccount>()..valid_tree_size).step_by(200_000)
{
for invalid_queue_size in
(8 + mem::size_of::<QueueAccount>()..valid_queue_size).step_by(50_000)
{
let result = initialize_state_merkle_tree_and_nullifier_queue(
rpc,
payer_pubkey,
merkle_tree_keypair,
queue_keypair,
merkle_tree_config,
queue_config,
invalid_tree_size,
invalid_queue_size,
None,
)
.await;
assert_rpc_error(
result,
2,
AccountCompressionErrorCode::InvalidAccountSize.into(),
)
.unwrap();
}
}
}
/// Tries to initzalize Merkle tree and queue with unsupported configuration
/// parameters:
///
/// 1. Merkle tree height (different than 26).
/// 2. Merkle tree canopy depth (different than 10).
/// 3. Merkle tree changelog size (zero).
/// 4. Merkle tree roots size (zero).
/// 5. Merkle tree close threshold (any).
/// 6. Queue sequence threshold (lower than roots + safety margin).
pub async fn fail_initialize_state_merkle_tree_and_nullifier_queue_invalid_config<
R: RpcConnection,
>(
rpc: &mut R,
payer_pubkey: &Pubkey,
merkle_tree_keypair: &Keypair,
queue_keypair: &Keypair,
merkle_tree_config: &StateMerkleTreeConfig,
queue_config: &NullifierQueueConfig,
) {
let merkle_tree_size = StateMerkleTreeAccount::size(
merkle_tree_config.height as usize,
merkle_tree_config.changelog_size as usize,
merkle_tree_config.roots_size as usize,
merkle_tree_config.canopy_depth as usize,
);
let queue_size = QueueAccount::size(queue_config.capacity as usize).unwrap();
for invalid_height in (0..26).step_by(5) {
let mut merkle_tree_config = merkle_tree_config.clone();
merkle_tree_config.height = invalid_height;
let result = initialize_state_merkle_tree_and_nullifier_queue(
rpc,
payer_pubkey,
merkle_tree_keypair,
queue_keypair,
&merkle_tree_config,
queue_config,
merkle_tree_size,
queue_size,
None,
)
.await;
assert_rpc_error(
result,
2,
AccountCompressionErrorCode::UnsupportedHeight.into(),
)
.unwrap();
}
for invalid_height in (27..50).step_by(5) {
let mut merkle_tree_config = merkle_tree_config.clone();
merkle_tree_config.height = invalid_height;
let result = initialize_state_merkle_tree_and_nullifier_queue(
rpc,
payer_pubkey,
merkle_tree_keypair,
queue_keypair,
&merkle_tree_config,
queue_config,
merkle_tree_size,
queue_size,
None,
)
.await;
assert_rpc_error(
result,
2,
AccountCompressionErrorCode::UnsupportedHeight.into(),
)
.unwrap();
}
for invalid_canopy_depth in (0..10).step_by(3) {
let mut merkle_tree_config = merkle_tree_config.clone();
merkle_tree_config.canopy_depth = invalid_canopy_depth;
let result = initialize_state_merkle_tree_and_nullifier_queue(
rpc,
payer_pubkey,
merkle_tree_keypair,
queue_keypair,
&merkle_tree_config,
queue_config,
merkle_tree_size,
queue_size,
None,
)
.await;
assert_rpc_error(
result,
2,
AccountCompressionErrorCode::UnsupportedCanopyDepth.into(),
)
.unwrap();
}
{
let mut merkle_tree_config = merkle_tree_config.clone();
merkle_tree_config.changelog_size = 0;
let merkle_tree_size = StateMerkleTreeAccount::size(
merkle_tree_config.height as usize,
merkle_tree_config.changelog_size as usize,
merkle_tree_config.roots_size as usize,
merkle_tree_config.canopy_depth as usize,
);
let result = initialize_state_merkle_tree_and_nullifier_queue(
rpc,
payer_pubkey,
merkle_tree_keypair,
queue_keypair,
&merkle_tree_config,
queue_config,
merkle_tree_size,
queue_size,
None,
)
.await;
assert_rpc_error(result, 2, ConcurrentMerkleTreeError::ChangelogZero.into()).unwrap();
}
{
let mut merkle_tree_config = merkle_tree_config.clone();
merkle_tree_config.roots_size = 0;
let merkle_tree_size = StateMerkleTreeAccount::size(
merkle_tree_config.height as usize,
merkle_tree_config.changelog_size as usize,
merkle_tree_config.roots_size as usize,
merkle_tree_config.canopy_depth as usize,
);
let result = initialize_state_merkle_tree_and_nullifier_queue(
rpc,
payer_pubkey,
merkle_tree_keypair,
queue_keypair,
&merkle_tree_config,
queue_config,
merkle_tree_size,
queue_size,
None,
)
.await;
assert_rpc_error(result, 2, ConcurrentMerkleTreeError::RootsZero.into()).unwrap();
}
for invalid_close_threshold in (0..100).step_by(20) {
let mut merkle_tree_config = merkle_tree_config.clone();
merkle_tree_config.close_threshold = Some(invalid_close_threshold);
let result = initialize_state_merkle_tree_and_nullifier_queue(
rpc,
payer_pubkey,
merkle_tree_keypair,
queue_keypair,
&merkle_tree_config,
queue_config,
merkle_tree_size,
queue_size,
None,
)
.await;
assert_rpc_error(
result,
2,
AccountCompressionErrorCode::UnsupportedCloseThreshold.into(),
)
.unwrap();
}
for invalid_sequence_threshold in
(0..merkle_tree_config.roots_size + SAFETY_MARGIN).step_by(200)
{
let mut queue_config = queue_config.clone();
queue_config.sequence_threshold = invalid_sequence_threshold;
let result = initialize_state_merkle_tree_and_nullifier_queue(
rpc,
payer_pubkey,
merkle_tree_keypair,
queue_keypair,
merkle_tree_config,
&queue_config,
merkle_tree_size,
queue_size,
None,
)
.await;
assert_rpc_error(
result,
2,
AccountCompressionErrorCode::InvalidSequenceThreshold.into(),
)
.unwrap();
}
}
async fn functional_1_initialize_state_merkle_tree_and_nullifier_queue<R: RpcConnection>(
rpc: &mut R,
payer_pubkey: &Pubkey,
merkle_tree_keypair: &Keypair,
queue_keypair: &Keypair,
merkle_tree_config: &StateMerkleTreeConfig,
queue_config: &NullifierQueueConfig,
) -> Pubkey {
let merkle_tree_size = StateMerkleTreeAccount::size(
merkle_tree_config.height as usize,
merkle_tree_config.changelog_size as usize,
merkle_tree_config.roots_size as usize,
merkle_tree_config.canopy_depth as usize,
);
let queue_size = QueueAccount::size(queue_config.capacity as usize).unwrap();
let forester = Pubkey::new_unique();
initialize_state_merkle_tree_and_nullifier_queue(
rpc,
payer_pubkey,
merkle_tree_keypair,
queue_keypair,
merkle_tree_config,
queue_config,
merkle_tree_size,
queue_size,
Some(forester),
)
.await
.unwrap();
assert_merkle_tree_initialized(
rpc,
&merkle_tree_keypair.pubkey(),
&queue_keypair.pubkey(),
merkle_tree_config.height as usize,
merkle_tree_config.changelog_size as usize,
merkle_tree_config.roots_size as usize,
merkle_tree_config.canopy_depth as usize,
1,
1,
0,
&Poseidon::zero_bytes()[0],
merkle_tree_config.rollover_threshold,
merkle_tree_config.close_threshold,
merkle_tree_config.network_fee.unwrap(),
payer_pubkey,
)
.await;
assert_nullifier_queue_initialized(
rpc,
&queue_keypair.pubkey(),
queue_config,
&merkle_tree_keypair.pubkey(),
merkle_tree_config,
QueueType::NullifierQueue,
1,
None,
Some(forester),
payer_pubkey,
)
.await;
merkle_tree_keypair.pubkey()
}
pub async fn fail_2_append_leaves_with_invalid_inputs<R: RpcConnection>(
context: &mut R,
merkle_tree_pubkeys: &[Pubkey],
leaves: Vec<(u8, [u8; 32])>,
expected_error: u32,
) -> Result<(), RpcError> {
let instruction_data = account_compression::instruction::AppendLeavesToMerkleTrees { leaves };
let accounts = account_compression::accounts::AppendLeaves {
fee_payer: context.get_payer().pubkey(),
authority: context.get_payer().pubkey(),
registered_program_pda: None,
system_program: system_program::ID,
};
let instruction = Instruction {
program_id: ID,
accounts: [
accounts.to_account_metas(Some(true)),
merkle_tree_pubkeys
.iter()
.map(|merkle_tree_pubkey| AccountMeta::new(*merkle_tree_pubkey, false))
.collect::<Vec<AccountMeta>>(),
]
.concat(),
data: instruction_data.data(),
};
let latest_blockhash = context.get_latest_blockhash().await.unwrap();
let transaction = Transaction::new_signed_with_payer(
&[instruction],
Some(&context.get_payer().pubkey()),
&vec![&context.get_payer()],
latest_blockhash,
);
let result = context.process_transaction(transaction).await;
assert_rpc_error(result, 0, expected_error)
}
pub async fn functional_3_append_leaves_to_merkle_tree<R: RpcConnection>(
context: &mut R,
reference_merkle_trees: &mut [&mut MerkleTree<Poseidon>],
merkle_tree_pubkeys: &Vec<Pubkey>,
leaves: &Vec<(u8, [u8; 32])>,
) {
let payer = context.get_payer().insecure_clone();
let mut hash_map = HashMap::<Pubkey, (Vec<[u8; 32]>, u64, usize, usize)>::new();
for (i, leaf) in leaves {
let pre_account_mt = context
.get_account(merkle_tree_pubkeys[(*i) as usize])
.await
.unwrap()
.unwrap();
let old_merkle_tree =
get_concurrent_merkle_tree::<StateMerkleTreeAccount, R, Poseidon, 26>(
context,
merkle_tree_pubkeys[(*i) as usize],
)
.await;
hash_map
.entry(merkle_tree_pubkeys[(*i) as usize])
.or_insert_with(|| {
(
Vec::<[u8; 32]>::new(),
pre_account_mt.lamports,
old_merkle_tree.next_index(),
*i as usize,
)
})
.0
.push(*leaf);
}
let instruction = [create_insert_leaves_instruction(
leaves.clone(),
context.get_payer().pubkey(),
context.get_payer().pubkey(),
(*merkle_tree_pubkeys).clone(),
)];
context
.create_and_send_transaction(&instruction, &payer.pubkey(), &[&payer, &payer])
.await
.unwrap();
for (pubkey, (leaves, lamports, next_index, mt_index)) in hash_map.iter() {
let num_leaves = leaves.len();
let post_account_mt = context.get_account(*pubkey).await.unwrap().unwrap();
let merkle_tree = AccountZeroCopy::<StateMerkleTreeAccount>::new(context, *pubkey).await;
let merkle_tree_deserialized = merkle_tree.deserialized();
let roll_over_fee = merkle_tree_deserialized
.metadata
.rollover_metadata
.rollover_fee
* (num_leaves as u64);
let merkle_tree =
get_concurrent_merkle_tree::<StateMerkleTreeAccount, R, Poseidon, 26>(context, *pubkey)
.await;
assert_eq!(merkle_tree.next_index(), next_index + num_leaves);
let leaves: Vec<&[u8; 32]> = leaves.iter().collect();
let reference_merkle_tree = &mut reference_merkle_trees[*mt_index];
reference_merkle_tree.append_batch(&leaves).unwrap();
assert_eq!(merkle_tree.root(), reference_merkle_tree.root());
assert_eq!(lamports + roll_over_fee, post_account_mt.lamports);
let changelog_entry = merkle_tree
.changelog
.get(merkle_tree.changelog_index())
.unwrap();
let path = reference_merkle_tree
.get_path_of_leaf(merkle_tree.current_index(), true)
.unwrap();
assert!(changelog_entry.path.eq_to(path));
}
}
pub async fn fail_4_append_leaves_with_invalid_authority<R: RpcConnection>(
rpc: &mut R,
merkle_tree_pubkey: &Pubkey,
) {
let invalid_autority = Keypair::new();
airdrop_lamports(rpc, &invalid_autority.pubkey(), 1_000_000_000)
.await
.unwrap();
let instruction_data = account_compression::instruction::AppendLeavesToMerkleTrees {
leaves: vec![(0, [1u8; 32])],
};
let accounts = account_compression::accounts::AppendLeaves {
fee_payer: rpc.get_payer().pubkey(),
authority: invalid_autority.pubkey(),
registered_program_pda: None,
system_program: system_program::ID,
};
let instruction = Instruction {
program_id: ID,
accounts: [
accounts.to_account_metas(Some(true)),
vec![AccountMeta::new(*merkle_tree_pubkey, false)],
]
.concat(),
data: instruction_data.data(),
};
let latest_blockhash = rpc.get_latest_blockhash().await.unwrap();
let transaction = Transaction::new_signed_with_payer(
&[instruction],
Some(&invalid_autority.pubkey()),
&vec![&rpc.get_payer(), &invalid_autority],
latest_blockhash,
);
let remaining_accounts_mismatch_error = rpc.process_transaction(transaction).await;
assert!(remaining_accounts_mismatch_error.is_err());
}
#[allow(clippy::too_many_arguments)]
pub async fn nullify<R: RpcConnection>(
rpc: &mut R,
merkle_tree_pubkey: &Pubkey,
nullifier_queue_pubkey: &Pubkey,
nullifier_queue_config: &NullifierQueueConfig,
reference_merkle_tree: &mut MerkleTree<Poseidon>,
element: &[u8; 32],
change_log_index: u64,
leaf_queue_index: u16,
element_index: u64,
) -> Result<(), RpcError> {
let payer = rpc.get_payer().insecure_clone();
let proof: Vec<[u8; 32]> = reference_merkle_tree
.get_proof_of_leaf(element_index as usize, false)
.unwrap()
.to_array::<16>()
.unwrap()
.to_vec();
let instructions = [
account_compression::nullify_leaves::sdk_nullify::create_nullify_instruction(
vec![change_log_index].as_slice(),
vec![leaf_queue_index].as_slice(),
vec![element_index].as_slice(),
vec![proof].as_slice(),
&rpc.get_payer().pubkey(),
merkle_tree_pubkey,
nullifier_queue_pubkey,
),
];
let event = rpc
.create_and_send_transaction_with_event::<MerkleTreeEvent>(
&instructions,
&payer.pubkey(),
&[&payer],
None,
)
.await?;
let merkle_tree = get_concurrent_merkle_tree::<StateMerkleTreeAccount, R, Poseidon, 26>(
rpc,
*merkle_tree_pubkey,
)
.await;
reference_merkle_tree
.update(&ZERO_BYTES[0], element_index as usize)
.unwrap();
assert_eq!(merkle_tree.root(), reference_merkle_tree.root());
let account = rpc
.get_account(*nullifier_queue_pubkey)
.await
.unwrap()
.unwrap();
let mut data = account.data.clone();
let nullifier_queue = &mut unsafe { queue_from_bytes_zero_copy_mut(&mut data).unwrap() };
let array_element = nullifier_queue
.get_bucket(leaf_queue_index.into())
.unwrap()
.unwrap();
assert_eq!(&array_element.value_bytes(), element);
assert_eq!(
array_element.sequence_number(),
Some(merkle_tree.sequence_number() + nullifier_queue_config.sequence_threshold as usize)
);
let event = event.unwrap().0;
match event {
MerkleTreeEvent::V1(_) => panic!("Expected V2 event"),
MerkleTreeEvent::V2(event_v1) => {
assert_eq!(event_v1.id, merkle_tree_pubkey.to_bytes());
assert_eq!(event_v1.nullified_leaves_indices[0], element_index);
}
MerkleTreeEvent::V3(_) => panic!("Expected V2 event"),
}
Ok(())
}
pub async fn set_nullifier_queue_to_full<R: RpcConnection>(
rpc: &mut R,
nullifier_queue_pubkey: &Pubkey,
left_over_indices: usize,
lamports: u64,
) {
let mut account = rpc
.get_account(*nullifier_queue_pubkey)
.await
.unwrap()
.unwrap();
let mut data = account.data.clone();
let capacity;
{
let hash_set = &mut unsafe { queue_from_bytes_zero_copy_mut(&mut data).unwrap() };
capacity = hash_set.hash_set.get_capacity() - left_over_indices;
println!("capacity: {}", capacity);
let arbitrary_sequence_number = 0;
for i in 0..capacity {
hash_set
.insert(&i.to_biguint().unwrap(), arbitrary_sequence_number)
.unwrap();
}
}
assert_ne!(account.data, data);
account.data = data;
let mut account_share_data = AccountSharedData::from(account);
account_share_data.set_lamports(lamports);
rpc.set_account(nullifier_queue_pubkey, &account_share_data);
let account = rpc
.get_account(*nullifier_queue_pubkey)
.await
.unwrap()
.unwrap();
let mut data = account.data.clone();
let nullifier_queue = &mut unsafe { queue_from_bytes_zero_copy_mut(&mut data).unwrap() };
for i in 0..capacity {
assert!(nullifier_queue
.contains(&i.to_biguint().unwrap(), None)
.unwrap());
}
}
fn find_overlapping_probe_index(
initial_value: usize,
start_replacement_value: usize,
capacity_values: usize,
) -> usize {
for salt in 0..capacity_values {
let replacement_value = start_replacement_value + salt;
for i in 0..20 {
let probe_index = (initial_value + i.to_biguint().unwrap() * i.to_biguint().unwrap())
% capacity_values.to_biguint().unwrap();
let replacement_probe_index = (replacement_value
+ i.to_biguint().unwrap() * i.to_biguint().unwrap())
% capacity_values.to_biguint().unwrap();
if probe_index == replacement_probe_index {
return replacement_value;
}
}
}
panic!("No value with overlapping probe index found!");
}
async fn fail_insert_into_full_queue<R: RpcConnection>(
context: &mut R,
nullifier_queue_pubkey: &Pubkey,
merkle_tree_pubkey: &Pubkey,
elements: Vec<[u8; 32]>,
) {
let payer = context.get_payer().insecure_clone();
let result = insert_into_single_nullifier_queue(
&elements,
&payer,
&payer,
nullifier_queue_pubkey,
merkle_tree_pubkey,
context,
)
.await;
assert_rpc_error(result, 0, HashSetError::Full.into()).unwrap();
}
pub async fn set_state_merkle_tree_sequence<R: RpcConnection>(
rpc: &mut R,
merkle_tree_pubkey: &Pubkey,
sequence_number: u64,
lamports: u64,
) {
let mut merkle_tree = rpc.get_account(*merkle_tree_pubkey).await.unwrap().unwrap();
{
let merkle_tree_deserialized =
&mut ConcurrentMerkleTreeZeroCopyMut::<Poseidon, 26>::from_bytes_zero_copy_mut(
&mut merkle_tree.data[8 + mem::size_of::<StateMerkleTreeAccount>()..],
)
.unwrap();
unsafe {
*merkle_tree_deserialized.sequence_number = sequence_number as usize;
}
}
let mut account_share_data = AccountSharedData::from(merkle_tree);
account_share_data.set_lamports(lamports);
rpc.set_account(merkle_tree_pubkey, &account_share_data);
let mut merkle_tree = rpc.get_account(*merkle_tree_pubkey).await.unwrap().unwrap();
let merkle_tree_deserialized =
ConcurrentMerkleTreeZeroCopyMut::<Poseidon, 26>::from_bytes_zero_copy_mut(
&mut merkle_tree.data[8 + mem::size_of::<StateMerkleTreeAccount>()..],
)
.unwrap();
assert_eq!(
merkle_tree_deserialized.sequence_number() as u64,
sequence_number
);
}
pub async fn assert_element_inserted_in_nullifier_queue(
rpc: &mut ProgramTestRpcConnection,
nullifier_queue_pubkey: &Pubkey,
nullifier: [u8; 32],
) {
let array = unsafe {
get_hash_set::<QueueAccount, ProgramTestRpcConnection>(rpc, *nullifier_queue_pubkey).await
};
let nullifier_bn = BigUint::from_bytes_be(&nullifier);
let (array_element, _) = array.find_element(&nullifier_bn, None).unwrap().unwrap();
assert_eq!(array_element.value_bytes(), nullifier);
assert_eq!(array_element.sequence_number(), None);
}
async fn functional_6_test_insert_into_two_nullifier_queues(
rpc: &mut ProgramTestRpcConnection,
nullifiers: &[[u8; 32]],
queue_tree_pairs: &[(Pubkey, Pubkey)],
) {
let payer = rpc.get_payer().insecure_clone();
insert_into_nullifier_queues(nullifiers, &payer, &payer, queue_tree_pairs, rpc)
.await
.unwrap();
assert_element_inserted_in_nullifier_queue(rpc, &queue_tree_pairs[0].0, nullifiers[0]).await;
assert_element_inserted_in_nullifier_queue(rpc, &queue_tree_pairs[1].0, nullifiers[1]).await;
}
async fn functional_7_test_insert_into_two_nullifier_queues_not_ordered(
rpc: &mut ProgramTestRpcConnection,
nullifiers: &[[u8; 32]],
queue_tree_pairs: &[(Pubkey, Pubkey)],
) {
let payer = rpc.get_payer().insecure_clone();
insert_into_nullifier_queues(nullifiers, &payer, &payer, queue_tree_pairs, rpc)
.await
.unwrap();
assert_element_inserted_in_nullifier_queue(rpc, &queue_tree_pairs[0].0, nullifiers[0]).await;
assert_element_inserted_in_nullifier_queue(rpc, &queue_tree_pairs[0].0, nullifiers[2]).await;
assert_element_inserted_in_nullifier_queue(rpc, &queue_tree_pairs[1].0, nullifiers[1]).await;
assert_element_inserted_in_nullifier_queue(rpc, &queue_tree_pairs[1].0, nullifiers[3]).await;
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/account-compression-test
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/account-compression-test/tests/address_merkle_tree_tests.rs
|
#![cfg(feature = "test-sbf")]
use std::mem;
use account_compression::{
errors::AccountCompressionErrorCode,
state::QueueAccount,
utils::constants::{ADDRESS_MERKLE_TREE_CANOPY_DEPTH, ADDRESS_MERKLE_TREE_HEIGHT},
AddressMerkleTreeAccount, AddressMerkleTreeConfig, AddressQueueConfig, ID, SAFETY_MARGIN,
};
use anchor_lang::error::ErrorCode;
use ark_bn254::Fr;
use ark_ff::{BigInteger, PrimeField, UniformRand};
use light_bounded_vec::BoundedVecError;
use light_concurrent_merkle_tree::errors::ConcurrentMerkleTreeError;
use light_hash_set::{HashSet, HashSetError};
use light_hasher::Poseidon;
use light_indexed_merkle_tree::{array::IndexedArray, errors::IndexedMerkleTreeError, reference};
use light_program_test::test_env::NOOP_PROGRAM_ID;
use light_program_test::test_rpc::ProgramTestRpcConnection;
use light_test_utils::{
address_tree_rollover::perform_address_merkle_tree_roll_over,
create_address_merkle_tree_and_queue_account_with_assert, test_forester::update_merkle_tree,
};
use light_test_utils::{
address_tree_rollover::{
assert_rolled_over_address_merkle_tree_and_queue, set_address_merkle_tree_next_index,
},
test_forester::{empty_address_queue_test, insert_addresses},
};
use light_test_utils::{
airdrop_lamports, assert_rpc_error, create_account_instruction, get_hash_set,
get_indexed_merkle_tree, AddressMerkleTreeAccounts, AddressMerkleTreeBundle, FeeConfig,
RpcConnection, RpcError,
};
use light_utils::bigint::bigint_to_be_bytes_array;
use num_bigint::ToBigUint;
use rand::thread_rng;
use solana_program_test::ProgramTest;
use solana_sdk::{
pubkey::Pubkey,
signature::{Keypair, Signature, Signer},
transaction::Transaction,
};
/// Tests insertion of addresses to the queue, dequeuing and Merkle tree update.
/// 1. create address Merkle tree and queue accounts
/// 2. inserts two addresses to the queue
/// 3. inserts two addresses into the address Merkle tree
/// 4. insert third address
async fn address_queue_and_tree_functional(
merkle_tree_config: &AddressMerkleTreeConfig,
queue_config: &AddressQueueConfig,
) {
// CHECK: 1 create address Merkle tree and queue accounts
let (mut context, _, mut address_merkle_tree_bundle) =
test_setup_with_address_merkle_tree(merkle_tree_config, queue_config).await;
let payer = context.get_payer().insecure_clone();
let address_queue_pubkey = address_merkle_tree_bundle.accounts.queue;
let address_merkle_tree_pubkey = address_merkle_tree_bundle.accounts.merkle_tree;
// Insert a pair of addresses.
let address1 = 30_u32.to_biguint().unwrap();
let address2 = 10_u32.to_biguint().unwrap();
let addresses: Vec<[u8; 32]> = vec![
bigint_to_be_bytes_array(&address1).unwrap(),
bigint_to_be_bytes_array(&address2).unwrap(),
];
// CHECK: 2 inserts two addresses to the queue
insert_addresses(
&mut context,
address_queue_pubkey,
address_merkle_tree_pubkey,
addresses.clone(),
)
.await
.unwrap();
let address_queue = unsafe {
get_hash_set::<QueueAccount, ProgramTestRpcConnection>(&mut context, address_queue_pubkey)
.await
};
assert!(address_queue.contains(&address1, None).unwrap());
assert!(address_queue.contains(&address2, None).unwrap());
// CHECK: 3 inserts two addresses into the address Merkle tree
empty_address_queue_test(
&payer,
&mut context,
&mut address_merkle_tree_bundle,
true,
0,
false,
)
.await
.unwrap();
let address3 = 20_u32.to_biguint().unwrap();
let addresses: Vec<[u8; 32]> = vec![bigint_to_be_bytes_array(&address3).unwrap()];
insert_addresses(
&mut context,
address_queue_pubkey,
address_merkle_tree_pubkey,
addresses,
)
.await
.unwrap();
let address_queue = unsafe {
get_hash_set::<QueueAccount, ProgramTestRpcConnection>(&mut context, address_queue_pubkey)
.await
};
address_queue
.find_element(&address3, None)
.unwrap()
.unwrap();
// CHECK: 4 insert third address which is inbetween the first two addresses
empty_address_queue_test(
&payer,
&mut context,
&mut address_merkle_tree_bundle,
true,
0,
false,
)
.await
.unwrap();
}
#[tokio::test]
async fn test_address_queue_and_tree_functional_default() {
address_queue_and_tree_functional(
&AddressMerkleTreeConfig::default(),
&AddressQueueConfig::default(),
)
.await
}
#[tokio::test]
async fn test_address_queue_and_tree_functional_custom() {
for changelog_size in [1, 1000, 2000] {
for roots_size in [1, 1000, 2000] {
if roots_size < changelog_size {
continue;
}
for queue_capacity in [5003, 6857, 7901] {
for address_changelog_size in (250..1000).step_by(250) {
address_queue_and_tree_functional(
&AddressMerkleTreeConfig {
height: ADDRESS_MERKLE_TREE_HEIGHT as u32,
changelog_size,
roots_size,
canopy_depth: ADDRESS_MERKLE_TREE_CANOPY_DEPTH,
address_changelog_size,
network_fee: Some(5000),
rollover_threshold: Some(95),
close_threshold: None,
},
&AddressQueueConfig {
capacity: queue_capacity,
sequence_threshold: roots_size + SAFETY_MARGIN,
network_fee: None,
},
)
.await;
}
}
}
}
}
#[allow(clippy::too_many_arguments)]
async fn initialize_address_merkle_tree_and_queue<R: RpcConnection>(
context: &mut R,
payer: &Keypair,
merkle_tree_keypair: &Keypair,
queue_keypair: &Keypair,
merkle_tree_config: &AddressMerkleTreeConfig,
queue_config: &AddressQueueConfig,
merkle_tree_size: usize,
queue_size: usize,
) -> Result<Signature, RpcError> {
let queue_account_create_ix = create_account_instruction(
&payer.pubkey(),
queue_size,
context
.get_minimum_balance_for_rent_exemption(queue_size)
.await
.unwrap(),
&ID,
Some(queue_keypair),
);
let mt_account_create_ix = create_account_instruction(
&payer.pubkey(),
merkle_tree_size,
context
.get_minimum_balance_for_rent_exemption(merkle_tree_size)
.await
.unwrap(),
&ID,
Some(merkle_tree_keypair),
);
let instruction =
account_compression::sdk::create_initialize_address_merkle_tree_and_queue_instruction(
0,
payer.pubkey(),
None,
None,
Some(Pubkey::new_unique()),
merkle_tree_keypair.pubkey(),
queue_keypair.pubkey(),
merkle_tree_config.clone(),
queue_config.clone(),
);
let transaction = Transaction::new_signed_with_payer(
&[queue_account_create_ix, mt_account_create_ix, instruction],
Some(&payer.pubkey()),
&vec![&payer, &queue_keypair, &merkle_tree_keypair],
context.get_latest_blockhash().await.unwrap(),
);
context.process_transaction(transaction.clone()).await
}
#[tokio::test]
async fn test_address_queue_and_tree_invalid_sizes() {
let mut program_test = ProgramTest::default();
program_test.add_program("account_compression", ID, None);
program_test.add_program("spl_noop", NOOP_PROGRAM_ID, None);
let context = program_test.start_with_context().await;
let mut context = ProgramTestRpcConnection { context };
let payer = context.get_payer().insecure_clone();
let address_merkle_tree_keypair = Keypair::new();
let address_queue_keypair = Keypair::new();
let queue_config = AddressQueueConfig::default();
let merkle_tree_config = AddressMerkleTreeConfig::default();
let valid_queue_size =
QueueAccount::size(account_compression::utils::constants::ADDRESS_QUEUE_VALUES as usize)
.unwrap();
let valid_tree_size = AddressMerkleTreeAccount::size(
merkle_tree_config.height as usize,
merkle_tree_config.changelog_size as usize,
merkle_tree_config.roots_size as usize,
merkle_tree_config.canopy_depth as usize,
merkle_tree_config.address_changelog_size as usize,
);
// NOTE: Starting from 0 to the account struct size triggers a panic in Anchor
// macros (sadly, not assertable...), which happens earlier than our
// serialization error.
// Our recoverable error is thrown for ranges from the struct size
// (+ discriminator) up to the expected account size.
// Invalid MT size + invalid queue size.
for tree_size in
(8 + mem::size_of::<AddressMerkleTreeAccount>()..=valid_tree_size).step_by(200_000)
{
for queue_size in (8 + mem::size_of::<QueueAccount>()..=valid_queue_size).step_by(50_000) {
let result = initialize_address_merkle_tree_and_queue(
&mut context,
&payer,
&address_merkle_tree_keypair,
&address_queue_keypair,
&merkle_tree_config,
&queue_config,
tree_size,
queue_size,
)
.await;
assert_rpc_error(
result,
2,
AccountCompressionErrorCode::InvalidAccountSize.into(),
)
.unwrap()
}
}
// Invalid MT size + valid queue size.
for tree_size in
(8 + mem::size_of::<AddressMerkleTreeAccount>()..=valid_tree_size).step_by(200_000)
{
let result = initialize_address_merkle_tree_and_queue(
&mut context,
&payer,
&address_merkle_tree_keypair,
&address_queue_keypair,
&merkle_tree_config,
&queue_config,
tree_size,
valid_queue_size,
)
.await;
assert_rpc_error(
result,
2,
AccountCompressionErrorCode::InvalidAccountSize.into(),
)
.unwrap()
}
// Valid MT size + invalid queue size.
for queue_size in (8 + mem::size_of::<QueueAccount>()..=valid_queue_size).step_by(50_000) {
let result = initialize_address_merkle_tree_and_queue(
&mut context,
&payer,
&address_merkle_tree_keypair,
&address_queue_keypair,
&merkle_tree_config,
&queue_config,
valid_tree_size,
queue_size,
)
.await;
assert_rpc_error(
result,
2,
AccountCompressionErrorCode::InvalidAccountSize.into(),
)
.unwrap()
}
}
/// Tries to initzalize Merkle tree and queue with unsupported configuration
/// parameters:
///
/// 1. Merkle tree height (different than 26).
/// 2. Merkle tree canopy depth (different than 10).
/// 3. Merkle tree changelog size (zero).
/// 4. Merkle tree roots size (zero).
/// 5. Merkle tree close threshold (any).
/// 6. Queue sequence threshold (lower than roots + safety margin).
#[tokio::test]
async fn test_address_queue_and_tree_invalid_config() {
let mut program_test = ProgramTest::default();
program_test.add_program("account_compression", ID, None);
program_test.add_program("spl_noop", NOOP_PROGRAM_ID, None);
let context = program_test.start_with_context().await;
let mut context = ProgramTestRpcConnection { context };
let payer = context.get_payer().insecure_clone();
let address_merkle_tree_keypair = Keypair::new();
let address_queue_keypair = Keypair::new();
let queue_config = AddressQueueConfig::default();
let merkle_tree_config = AddressMerkleTreeConfig::default();
let queue_size =
QueueAccount::size(account_compression::utils::constants::ADDRESS_QUEUE_VALUES as usize)
.unwrap();
let tree_size = AddressMerkleTreeAccount::size(
merkle_tree_config.height as usize,
merkle_tree_config.changelog_size as usize,
merkle_tree_config.roots_size as usize,
merkle_tree_config.canopy_depth as usize,
merkle_tree_config.address_changelog_size as usize,
);
for invalid_height in (0..26).step_by(5) {
let mut merkle_tree_config = merkle_tree_config.clone();
merkle_tree_config.height = invalid_height;
let result = initialize_address_merkle_tree_and_queue(
&mut context,
&payer,
&address_merkle_tree_keypair,
&address_queue_keypair,
&merkle_tree_config,
&queue_config,
tree_size,
queue_size,
)
.await;
println!("Invalid result: {}", result.as_ref().unwrap_err());
assert_rpc_error(
result,
2,
AccountCompressionErrorCode::UnsupportedHeight.into(),
)
.unwrap();
}
for invalid_height in (27..50).step_by(5) {
let mut merkle_tree_config = merkle_tree_config.clone();
merkle_tree_config.height = invalid_height;
let result = initialize_address_merkle_tree_and_queue(
&mut context,
&payer,
&address_merkle_tree_keypair,
&address_queue_keypair,
&merkle_tree_config,
&queue_config,
tree_size,
queue_size,
)
.await;
assert_rpc_error(
result,
2,
AccountCompressionErrorCode::UnsupportedHeight.into(),
)
.unwrap();
}
for invalid_canopy_depth in (0..10).step_by(3) {
let mut merkle_tree_config = merkle_tree_config.clone();
merkle_tree_config.canopy_depth = invalid_canopy_depth;
let result = initialize_address_merkle_tree_and_queue(
&mut context,
&payer,
&address_merkle_tree_keypair,
&address_queue_keypair,
&merkle_tree_config,
&queue_config,
tree_size,
queue_size,
)
.await;
assert_rpc_error(
result,
2,
AccountCompressionErrorCode::UnsupportedCanopyDepth.into(),
)
.unwrap();
}
{
let mut merkle_tree_config = merkle_tree_config.clone();
merkle_tree_config.changelog_size = 0;
let tree_size = AddressMerkleTreeAccount::size(
merkle_tree_config.height as usize,
merkle_tree_config.changelog_size as usize,
merkle_tree_config.roots_size as usize,
merkle_tree_config.canopy_depth as usize,
merkle_tree_config.address_changelog_size as usize,
);
let result = initialize_address_merkle_tree_and_queue(
&mut context,
&payer,
&address_merkle_tree_keypair,
&address_queue_keypair,
&merkle_tree_config,
&queue_config,
tree_size,
queue_size,
)
.await;
assert_rpc_error(result, 2, ConcurrentMerkleTreeError::ChangelogZero.into()).unwrap();
}
{
let mut merkle_tree_config = merkle_tree_config.clone();
merkle_tree_config.roots_size = 0;
let tree_size = AddressMerkleTreeAccount::size(
merkle_tree_config.height as usize,
merkle_tree_config.changelog_size as usize,
merkle_tree_config.roots_size as usize,
merkle_tree_config.canopy_depth as usize,
merkle_tree_config.address_changelog_size as usize,
);
let result = initialize_address_merkle_tree_and_queue(
&mut context,
&payer,
&address_merkle_tree_keypair,
&address_queue_keypair,
&merkle_tree_config,
&queue_config,
tree_size,
queue_size,
)
.await;
assert_rpc_error(result, 2, ConcurrentMerkleTreeError::RootsZero.into()).unwrap();
}
for invalid_close_threshold in (0..100).step_by(20) {
let mut merkle_tree_config = merkle_tree_config.clone();
merkle_tree_config.close_threshold = Some(invalid_close_threshold);
let result = initialize_address_merkle_tree_and_queue(
&mut context,
&payer,
&address_merkle_tree_keypair,
&address_queue_keypair,
&merkle_tree_config,
&queue_config,
tree_size,
queue_size,
)
.await;
assert_rpc_error(
result,
2,
AccountCompressionErrorCode::UnsupportedCloseThreshold.into(),
)
.unwrap();
}
for invalid_sequence_threshold in
(0..merkle_tree_config.roots_size + SAFETY_MARGIN).step_by(200)
{
let mut queue_config = queue_config.clone();
queue_config.sequence_threshold = invalid_sequence_threshold;
let result = initialize_address_merkle_tree_and_queue(
&mut context,
&payer,
&address_merkle_tree_keypair,
&address_queue_keypair,
&merkle_tree_config,
&queue_config,
tree_size,
queue_size,
)
.await;
assert_rpc_error(
result,
2,
AccountCompressionErrorCode::InvalidSequenceThreshold.into(),
)
.unwrap();
}
}
/// Try to insert an address to the tree while providing invalid parameters.
///
/// Such invalid insertion needs to be performed manually, without relayer's
/// help (which would always insert that nullifier correctly).
/// Tests:
/// 1. cannot insert the same address twice
/// 2. cannot insert an address with an invalid low address
/// 2.1 cannot insert an address with an invalid low address (NewElementGreaterOrEqualToNextElement)
/// 2.2 cannot insert an address with an invalid low address (LowElementGreaterOrEqualToNewElement)
/// 3.1 invalid value index (element does not exist)
/// 3.2 invalid value index (element has a sequence number)
/// 4. invalid low element index
/// 5. invalid low element value
/// 6. invalid low element next index
/// 7. invalid low element next value
/// 8. invalid low element proof
/// 9. invalid changelog index (lower)
/// 10. invalid changelog index (higher)
/// 11. invalid indexed changelog index (higher)
/// 12. invalid queue account
/// 13. invalid Merkle tree account
/// 14. non-associated Merkle tree
async fn update_address_merkle_tree_failing_tests(
merkle_tree_config: &AddressMerkleTreeConfig,
queue_config: &AddressQueueConfig,
) {
let (mut context, payer, mut address_merkle_tree_bundle) =
test_setup_with_address_merkle_tree(merkle_tree_config, queue_config).await;
let address_queue_pubkey = address_merkle_tree_bundle.accounts.queue;
let address_merkle_tree_pubkey = address_merkle_tree_bundle.accounts.merkle_tree;
// Insert a pair of addresses, correctly. Just do it with relayer.
let address1 = 30_u32.to_biguint().unwrap();
let address2 = 10_u32.to_biguint().unwrap();
let addresses: Vec<[u8; 32]> = vec![
bigint_to_be_bytes_array(&address1).unwrap(),
bigint_to_be_bytes_array(&address2).unwrap(),
];
insert_addresses(
&mut context,
address_queue_pubkey,
address_merkle_tree_pubkey,
addresses.clone(),
)
.await
.unwrap();
empty_address_queue_test(
&payer,
&mut context,
&mut address_merkle_tree_bundle,
true,
0,
false,
)
.await
.unwrap();
// CHECK: 1 cannot insert the same address twice
let result = insert_addresses(
&mut context,
address_queue_pubkey,
address_merkle_tree_pubkey,
vec![bigint_to_be_bytes_array::<32>(&address1).unwrap()],
)
.await;
assert_rpc_error(result, 0, HashSetError::ElementAlreadyExists.into()).unwrap();
let result = insert_addresses(
&mut context,
address_queue_pubkey,
address_merkle_tree_pubkey,
vec![bigint_to_be_bytes_array::<32>(&address2).unwrap()],
)
.await;
assert_rpc_error(result, 0, HashSetError::ElementAlreadyExists.into()).unwrap();
// Insert address3=20 for subsequent failing tests.
let address3 = 20_u32.to_biguint().unwrap();
let address3_bytes = bigint_to_be_bytes_array::<32>(&address3).unwrap();
insert_addresses(
&mut context,
address_queue_pubkey,
address_merkle_tree_pubkey,
vec![address3_bytes],
)
.await
.unwrap();
let address4 = 21_u32.to_biguint().unwrap();
let address4_bytes = bigint_to_be_bytes_array::<32>(&address4).unwrap();
insert_addresses(
&mut context,
address_queue_pubkey,
address_merkle_tree_pubkey,
vec![address4_bytes],
)
.await
.unwrap();
let address_queue = unsafe {
get_hash_set::<QueueAccount, ProgramTestRpcConnection>(&mut context, address_queue_pubkey)
.await
};
// CHECK: 2.1 cannot insert an address with an invalid low address
test_with_invalid_low_element(
&mut context,
address_queue_pubkey,
address_merkle_tree_pubkey,
&address_queue,
&address_merkle_tree_bundle,
0,
IndexedMerkleTreeError::NewElementGreaterOrEqualToNextElement.into(),
)
.await;
// CHECK: 2.2 cannot insert an address with an invalid low address
test_with_invalid_low_element(
&mut context,
address_queue_pubkey,
address_merkle_tree_pubkey,
&address_queue,
&address_merkle_tree_bundle,
1,
IndexedMerkleTreeError::LowElementGreaterOrEqualToNewElement.into(),
)
.await;
let (address, address_hashset_index) = address_queue.first_no_seq().unwrap().unwrap();
let (low_element, low_element_next_value) = address_merkle_tree_bundle
.indexed_array
.find_low_element_for_nonexistent(&address.value_biguint())
.unwrap();
// Get the Merkle proof for updating low element.
let low_element_proof = address_merkle_tree_bundle
.merkle_tree
.get_proof_of_leaf(low_element.index, false)
.unwrap();
let value_index = address_hashset_index;
// CHECK: 3.1 invalid value index (value doesn't exist)
let invalid_value_index = 10;
// unwraps on a None value onchain.
update_merkle_tree(
&mut context,
&payer,
address_queue_pubkey,
address_merkle_tree_pubkey,
invalid_value_index,
low_element.index as u64,
bigint_to_be_bytes_array(&low_element.value).unwrap(),
low_element.next_index as u64,
bigint_to_be_bytes_array(&low_element_next_value).unwrap(),
low_element_proof.to_array().unwrap(),
None,
None,
true,
0,
false,
)
.await
.unwrap_err();
// CHECK: 3.2 invalid value index (value has a sequence number)
let invalid_value_index = 0;
// unwraps on a None value onchain.
update_merkle_tree(
&mut context,
&payer,
address_queue_pubkey,
address_merkle_tree_pubkey,
invalid_value_index,
low_element.index as u64,
bigint_to_be_bytes_array(&low_element.value).unwrap(),
low_element.next_index as u64,
bigint_to_be_bytes_array(&low_element_next_value).unwrap(),
low_element_proof.to_array().unwrap(),
None,
None,
true,
0,
false,
)
.await
.unwrap_err();
// CHECK: 4 invalid low element index
let invalid_lower_element_index = low_element.index - 1;
let error_invalid_low_element_index = update_merkle_tree(
&mut context,
&payer,
address_queue_pubkey,
address_merkle_tree_pubkey,
value_index,
invalid_lower_element_index as u64,
bigint_to_be_bytes_array(&low_element.value).unwrap(),
low_element.next_index as u64,
bigint_to_be_bytes_array(&low_element_next_value).unwrap(),
low_element_proof.to_array().unwrap(),
None,
None,
true,
0,
false,
)
.await;
assert_rpc_error(
error_invalid_low_element_index,
0,
ConcurrentMerkleTreeError::InvalidProof([0; 32], [0; 32]).into(),
)
.unwrap();
// CHECK: 5 invalid low element value
let invalid_low_element_value = [0u8; 32];
let error_invalid_low_element_value = update_merkle_tree(
&mut context,
&payer,
address_queue_pubkey,
address_merkle_tree_pubkey,
value_index,
low_element.index as u64,
invalid_low_element_value,
low_element.next_index as u64,
bigint_to_be_bytes_array(&low_element_next_value).unwrap(),
low_element_proof.to_array().unwrap(),
None,
None,
true,
0,
false,
)
.await;
assert_rpc_error(
error_invalid_low_element_value,
0,
ConcurrentMerkleTreeError::InvalidProof([0; 32], [0; 32]).into(),
)
.unwrap();
// CHECK: 6 invalid low element next index
let invalid_low_element_next_index = 1;
let error_invalid_low_element_next_index = update_merkle_tree(
&mut context,
&payer,
address_queue_pubkey,
address_merkle_tree_pubkey,
value_index,
low_element.index as u64,
bigint_to_be_bytes_array(&low_element.value).unwrap(),
invalid_low_element_next_index,
bigint_to_be_bytes_array(&low_element_next_value).unwrap(),
low_element_proof.to_array().unwrap(),
None,
None,
true,
0,
false,
)
.await;
assert_rpc_error(
error_invalid_low_element_next_index,
0,
ConcurrentMerkleTreeError::InvalidProof([0; 32], [0; 32]).into(),
)
.unwrap();
// CHECK: 7 invalid low element next value
let invalid_low_element_next_value = [9u8; 32];
let error_invalid_low_element_next_value = update_merkle_tree(
&mut context,
&payer,
address_queue_pubkey,
address_merkle_tree_pubkey,
value_index,
low_element.index as u64,
bigint_to_be_bytes_array(&low_element.value).unwrap(),
low_element.next_index as u64,
invalid_low_element_next_value,
low_element_proof.to_array().unwrap(),
None,
None,
true,
0,
false,
)
.await;
assert_rpc_error(
error_invalid_low_element_next_value,
0,
ConcurrentMerkleTreeError::InvalidProof([0; 32], [0; 32]).into(),
)
.unwrap();
// CHECK: 8 invalid low element proof
let mut invalid_low_element_proof = low_element_proof.to_array().unwrap();
invalid_low_element_proof.get_mut(0).unwrap()[0] = 0;
let error_invalid_low_element_proof = update_merkle_tree(
&mut context,
&payer,
address_queue_pubkey,
address_merkle_tree_pubkey,
value_index,
low_element.index as u64,
bigint_to_be_bytes_array(&low_element.value).unwrap(),
low_element.next_index as u64,
bigint_to_be_bytes_array(&low_element_next_value).unwrap(),
invalid_low_element_proof,
None,
None,
true,
0,
false,
)
.await;
assert_rpc_error(
error_invalid_low_element_proof,
0,
ConcurrentMerkleTreeError::InvalidProof([0; 32], [0; 32]).into(),
)
.unwrap();
let address_merkle_tree = get_indexed_merkle_tree::<
AddressMerkleTreeAccount,
ProgramTestRpcConnection,
Poseidon,
usize,
26,
16,
>(&mut context, address_merkle_tree_pubkey)
.await;
let changelog_index = address_merkle_tree.changelog_index();
if merkle_tree_config.changelog_size >= 2 {
// CHECK: 9 invalid changelog index (lower)
let invalid_changelog_index_low = changelog_index - 2;
let error_invalid_changelog_index_low = update_merkle_tree(
&mut context,
&payer,
address_queue_pubkey,
address_merkle_tree_pubkey,
value_index,
low_element.index as u64,
bigint_to_be_bytes_array(&low_element.value).unwrap(),
low_element.next_index as u64,
bigint_to_be_bytes_array(&low_element_next_value).unwrap(),
low_element_proof.to_array().unwrap(),
Some(invalid_changelog_index_low as u16),
None,
true,
0,
false,
)
.await;
assert_rpc_error(
error_invalid_changelog_index_low,
0,
ConcurrentMerkleTreeError::CannotUpdateLeaf.into(),
)
.unwrap();
// CHECK: 10 invalid changelog index (higher)
let invalid_changelog_index_high = changelog_index + 2;
let error_invalid_changelog_index_high = update_merkle_tree(
&mut context,
&payer,
address_queue_pubkey,
address_merkle_tree_pubkey,
value_index,
low_element.index as u64,
bigint_to_be_bytes_array(&low_element.value).unwrap(),
low_element.next_index as u64,
bigint_to_be_bytes_array(&low_element_next_value).unwrap(),
low_element_proof.to_array().unwrap(),
Some(invalid_changelog_index_high as u16),
None,
true,
0,
false,
)
.await;
assert_rpc_error(
error_invalid_changelog_index_high,
0,
BoundedVecError::IterFromOutOfBounds.into(),
)
.unwrap();
}
let indexed_changelog_index = address_merkle_tree.indexed_changelog_index();
if merkle_tree_config.address_changelog_size >= 2 {
// CHECK: 11 invalid indexed changelog index (higher)
let invalid_indexed_changelog_index_high = indexed_changelog_index + 1;
let error_invalid_indexed_changelog_index_high = update_merkle_tree(
&mut context,
&payer,
address_queue_pubkey,
address_merkle_tree_pubkey,
value_index,
low_element.index as u64,
bigint_to_be_bytes_array(&low_element.value).unwrap(),
low_element.next_index as u64,
bigint_to_be_bytes_array(&low_element_next_value).unwrap(),
low_element_proof.to_array().unwrap(),
None,
Some(invalid_indexed_changelog_index_high as u16),
true,
0,
false,
)
.await;
assert_rpc_error(
error_invalid_indexed_changelog_index_high,
0,
BoundedVecError::IterFromOutOfBounds.into(),
)
.unwrap();
}
// CHECK: 12 invalid queue account
let invalid_queue = address_merkle_tree_pubkey;
let error_invalid_queue = update_merkle_tree(
&mut context,
&payer,
invalid_queue,
address_merkle_tree_pubkey,
value_index,
low_element.index as u64,
bigint_to_be_bytes_array(&low_element.value).unwrap(),
low_element.next_index as u64,
bigint_to_be_bytes_array(&low_element_next_value).unwrap(),
low_element_proof.to_array().unwrap(),
None,
None,
true,
0,
false,
)
.await;
assert_rpc_error(
error_invalid_queue,
0,
ErrorCode::AccountDiscriminatorMismatch.into(),
)
.unwrap();
// CHECK: 13 invalid Merkle tree account
let indexed_changelog_index = address_merkle_tree.indexed_changelog_index();
let invalid_merkle_tree = address_queue_pubkey;
let error_invalid_merkle_tree = update_merkle_tree(
&mut context,
&payer,
address_queue_pubkey,
invalid_merkle_tree,
value_index,
low_element.index as u64,
bigint_to_be_bytes_array(&low_element.value).unwrap(),
low_element.next_index as u64,
bigint_to_be_bytes_array(&low_element_next_value).unwrap(),
low_element_proof.to_array().unwrap(),
Some(changelog_index as u16),
Some(indexed_changelog_index as u16),
true,
0,
false,
)
.await;
assert_rpc_error(
error_invalid_merkle_tree,
0,
ErrorCode::AccountDiscriminatorMismatch.into(),
)
.unwrap();
let invalid_address_merkle_tree_keypair = Keypair::new();
let invalid_address_queue_keypair = Keypair::new();
create_address_merkle_tree_and_queue_account_with_assert(
&payer,
false,
&mut context,
&invalid_address_merkle_tree_keypair,
&invalid_address_queue_keypair,
None,
None,
merkle_tree_config,
queue_config,
2,
)
.await
.unwrap();
// CHECK: 14 non-associated Merkle tree
let invalid_merkle_tree = invalid_address_merkle_tree_keypair.pubkey();
let error_non_associated_merkle_tree = update_merkle_tree(
&mut context,
&payer,
address_queue_pubkey,
invalid_merkle_tree,
value_index,
low_element.index as u64,
bigint_to_be_bytes_array(&low_element.value).unwrap(),
low_element.next_index as u64,
bigint_to_be_bytes_array(&low_element_next_value).unwrap(),
low_element_proof.to_array().unwrap(),
Some(changelog_index as u16),
None,
true,
0,
false,
)
.await;
assert_rpc_error(
error_non_associated_merkle_tree,
0,
AccountCompressionErrorCode::MerkleTreeAndQueueNotAssociated.into(),
)
.unwrap();
}
#[tokio::test]
async fn update_address_merkle_tree_failing_tests_default() {
update_address_merkle_tree_failing_tests(
&AddressMerkleTreeConfig::default(),
&AddressQueueConfig::default(),
)
.await
}
async fn update_address_merkle_tree_wrap_around(
merkle_tree_config: &AddressMerkleTreeConfig,
queue_config: &AddressQueueConfig,
) {
let (mut context, payer, mut address_merkle_tree_bundle) =
test_setup_with_address_merkle_tree(merkle_tree_config, queue_config).await;
let address_queue_pubkey = address_merkle_tree_bundle.accounts.queue;
let address_merkle_tree_pubkey = address_merkle_tree_bundle.accounts.merkle_tree;
// Insert a pair of addresses, correctly. Just do it with relayer.
let address1 = 30_u32.to_biguint().unwrap();
let address2 = 10_u32.to_biguint().unwrap();
let addresses: Vec<[u8; 32]> = vec![
bigint_to_be_bytes_array(&address1).unwrap(),
bigint_to_be_bytes_array(&address2).unwrap(),
];
let (low_element, low_element_next_value) = address_merkle_tree_bundle
.indexed_array
.find_low_element_for_nonexistent(&address1)
.unwrap();
// Get the Merkle proof for updating low element.
let low_element_proof = address_merkle_tree_bundle
.merkle_tree
.get_proof_of_leaf(low_element.index, false)
.unwrap();
// Wrap around the indexed changelog with conflicting elements.
let mut rng = thread_rng();
for _ in (0..merkle_tree_config.address_changelog_size).step_by(10) {
let addresses: Vec<[u8; 32]> = (0..10)
.map(|_| {
Fr::rand(&mut rng)
.into_bigint()
.to_bytes_be()
.try_into()
.unwrap()
})
.collect::<Vec<_>>();
insert_addresses(
&mut context,
address_queue_pubkey,
address_merkle_tree_pubkey,
addresses,
)
.await
.unwrap();
empty_address_queue_test(
&payer,
&mut context,
&mut address_merkle_tree_bundle,
true,
0,
false,
)
.await
.unwrap();
}
insert_addresses(
&mut context,
address_queue_pubkey,
address_merkle_tree_pubkey,
addresses.clone(),
)
.await
.unwrap();
let address_queue = unsafe {
get_hash_set::<QueueAccount, ProgramTestRpcConnection>(&mut context, address_queue_pubkey)
.await
};
let value_index = address_queue
.find_element_index(&address1, None)
.unwrap()
.unwrap();
let error = update_merkle_tree(
&mut context,
&payer,
address_queue_pubkey,
address_merkle_tree_pubkey,
value_index as u16,
low_element.index as u64,
bigint_to_be_bytes_array(&low_element.value).unwrap(),
low_element.next_index as u64,
bigint_to_be_bytes_array(&low_element_next_value).unwrap(),
low_element_proof.to_array().unwrap(),
None,
None,
true,
0,
false,
)
.await;
assert_rpc_error(
error,
0,
ConcurrentMerkleTreeError::InvalidProof([0; 32], [0; 32]).into(),
)
.unwrap();
}
#[tokio::test]
async fn update_address_merkle_tree_wrap_around_default() {
update_address_merkle_tree_wrap_around(
&AddressMerkleTreeConfig::default(),
&AddressQueueConfig::default(),
)
.await
}
#[tokio::test]
async fn update_address_merkle_tree_wrap_around_custom() {
let changelog_size = 250;
let queue_capacity = 5003;
let roots_size = changelog_size * 2;
for address_changelog_size in (250..1000).step_by(250) {
println!(
"changelog_size {} queue_capacity {} address_changelog_size {}",
changelog_size, queue_capacity, address_changelog_size
);
update_address_merkle_tree_wrap_around(
&AddressMerkleTreeConfig {
height: ADDRESS_MERKLE_TREE_HEIGHT as u32,
changelog_size,
roots_size,
canopy_depth: ADDRESS_MERKLE_TREE_CANOPY_DEPTH,
address_changelog_size,
network_fee: Some(5000),
rollover_threshold: Some(95),
close_threshold: None,
},
&AddressQueueConfig {
capacity: queue_capacity,
sequence_threshold: roots_size + SAFETY_MARGIN,
network_fee: None,
},
)
.await;
}
}
/// Tests address Merkle tree and queue rollover.
/// 1. Not ready for rollover after init.
/// 2. Not ready for rollover after setting next index to required value - 1.
/// 3. Merkle tree and queue not associated (Invalid queue).
/// 4. Merkle tree and queue not associated (Invalid Merkle tree).
/// 5. Successful rollover.
/// 6. Attempt to rollover already rolled over Queue and Merkle tree.
async fn address_merkle_tree_and_queue_rollover(
merkle_tree_config: &AddressMerkleTreeConfig,
queue_config: &AddressQueueConfig,
) {
let (mut context, payer, bundle) =
test_setup_with_address_merkle_tree(merkle_tree_config, queue_config).await;
let address_merkle_tree_pubkey = bundle.accounts.merkle_tree;
let address_queue_pubkey = bundle.accounts.queue;
let address_merkle_tree_keypair_2 = Keypair::new();
let address_queue_keypair_2 = Keypair::new();
create_address_merkle_tree_and_queue_account_with_assert(
&payer,
false,
&mut context,
&address_merkle_tree_keypair_2,
&address_queue_keypair_2,
None,
None,
merkle_tree_config,
queue_config,
2,
)
.await
.unwrap();
let required_next_index = 2u64.pow(26) * merkle_tree_config.rollover_threshold.unwrap() / 100;
let failing_next_index = required_next_index - 1;
let new_queue_keypair = Keypair::new();
let new_address_merkle_tree_keypair = Keypair::new();
// CHECK 1: Not ready for rollover after init.
let result = perform_address_merkle_tree_roll_over(
&mut context,
&new_queue_keypair,
&new_address_merkle_tree_keypair,
&address_merkle_tree_pubkey,
&address_queue_pubkey,
merkle_tree_config,
queue_config,
)
.await;
assert_rpc_error(
result,
2,
AccountCompressionErrorCode::NotReadyForRollover.into(),
)
.unwrap();
let rollover_costs = context
.get_account(address_queue_pubkey)
.await
.unwrap()
.unwrap()
.lamports
+ context
.get_account(address_merkle_tree_pubkey)
.await
.unwrap()
.unwrap()
.lamports;
// Airdrop sufficient funds to address queue to reimburse the rollover costs.
airdrop_lamports(&mut context, &address_queue_pubkey, rollover_costs)
.await
.unwrap();
let address_merkle_tree_lamports = context
.get_account(address_merkle_tree_pubkey)
.await
.unwrap()
.unwrap()
.lamports;
set_address_merkle_tree_next_index(
&mut context,
&address_merkle_tree_pubkey,
failing_next_index,
address_merkle_tree_lamports,
)
.await;
// CHECK 2: Not ready for rollover after setting next index to required value - 1.
let result = perform_address_merkle_tree_roll_over(
&mut context,
&new_queue_keypair,
&new_address_merkle_tree_keypair,
&address_merkle_tree_pubkey,
&address_queue_pubkey,
merkle_tree_config,
queue_config,
)
.await;
assert_rpc_error(
result,
2,
AccountCompressionErrorCode::NotReadyForRollover.into(),
)
.unwrap();
set_address_merkle_tree_next_index(
&mut context,
&address_merkle_tree_pubkey,
required_next_index,
address_merkle_tree_lamports,
)
.await;
// CHECK 3: Merkle tree and queue not associated invalid queue.
let result = perform_address_merkle_tree_roll_over(
&mut context,
&new_queue_keypair,
&new_address_merkle_tree_keypair,
&address_merkle_tree_pubkey,
&address_queue_keypair_2.pubkey(),
merkle_tree_config,
queue_config,
)
.await;
assert_rpc_error(
result,
2,
AccountCompressionErrorCode::MerkleTreeAndQueueNotAssociated.into(),
)
.unwrap();
// CHECK 4: Merkle tree and queue not associated invalid Merkle tree.
let result = perform_address_merkle_tree_roll_over(
&mut context,
&new_queue_keypair,
&new_address_merkle_tree_keypair,
&address_merkle_tree_keypair_2.pubkey(),
&address_queue_pubkey,
merkle_tree_config,
queue_config,
)
.await;
assert_rpc_error(
result,
2,
AccountCompressionErrorCode::MerkleTreeAndQueueNotAssociated.into(),
)
.unwrap();
let signer_prior_balance = context
.get_account(payer.pubkey())
.await
.unwrap()
.unwrap()
.lamports;
// CHECK 5: Successful rollover.
perform_address_merkle_tree_roll_over(
&mut context,
&new_queue_keypair,
&new_address_merkle_tree_keypair,
&address_merkle_tree_pubkey,
&address_queue_pubkey,
merkle_tree_config,
queue_config,
)
.await
.unwrap();
let payer: Keypair = context.get_payer().insecure_clone();
assert_rolled_over_address_merkle_tree_and_queue(
&payer.pubkey(),
&mut context,
&signer_prior_balance,
&address_merkle_tree_pubkey,
&address_queue_pubkey,
&new_address_merkle_tree_keypair.pubkey(),
&new_queue_keypair.pubkey(),
)
.await;
let failing_new_nullifier_queue_keypair = Keypair::new();
let failing_new_state_merkle_tree_keypair = Keypair::new();
// CHECK 6: Attempt to rollover already rolled over Queue and Merkle tree.
let result = perform_address_merkle_tree_roll_over(
&mut context,
&failing_new_nullifier_queue_keypair,
&failing_new_state_merkle_tree_keypair,
&address_merkle_tree_pubkey,
&address_queue_pubkey,
merkle_tree_config,
queue_config,
)
.await;
assert_rpc_error(
result,
2,
AccountCompressionErrorCode::MerkleTreeAlreadyRolledOver.into(),
)
.unwrap();
}
#[tokio::test]
async fn test_address_merkle_tree_and_queue_rollover_default() {
address_merkle_tree_and_queue_rollover(
&AddressMerkleTreeConfig::default(),
&AddressQueueConfig::default(),
)
.await
}
#[tokio::test]
async fn test_address_merkle_tree_and_queue_rollover_custom() {
for changelog_size in [1, 1000, 2000] {
for roots_size in [1, 1000, 2000] {
if roots_size < changelog_size {
continue;
}
for queue_capacity in [5003, 6857, 7901] {
for address_changelog_size in (250..1000).step_by(250) {
address_merkle_tree_and_queue_rollover(
&AddressMerkleTreeConfig {
height: ADDRESS_MERKLE_TREE_HEIGHT as u32,
changelog_size,
roots_size,
canopy_depth: ADDRESS_MERKLE_TREE_CANOPY_DEPTH,
address_changelog_size,
network_fee: Some(5000),
rollover_threshold: Some(95),
close_threshold: None,
},
&AddressQueueConfig {
capacity: queue_capacity,
sequence_threshold: roots_size + SAFETY_MARGIN,
network_fee: None,
},
)
.await;
}
}
}
}
}
pub async fn test_setup_with_address_merkle_tree(
merkle_tree_config: &AddressMerkleTreeConfig,
queue_config: &AddressQueueConfig,
) -> (
ProgramTestRpcConnection, // rpc
Keypair, // payer
AddressMerkleTreeBundle,
) {
let mut program_test = ProgramTest::default();
program_test.add_program("account_compression", ID, None);
program_test.add_program("spl_noop", NOOP_PROGRAM_ID, None);
let context = program_test.start_with_context().await;
let mut context = ProgramTestRpcConnection { context };
let payer = context.get_payer().insecure_clone();
let address_merkle_tree_keypair = Keypair::new();
let address_queue_keypair = Keypair::new();
create_address_merkle_tree_and_queue_account_with_assert(
&payer,
false,
&mut context,
&address_merkle_tree_keypair,
&address_queue_keypair,
None,
None,
merkle_tree_config,
queue_config,
1,
)
.await
.unwrap();
// Local indexing array and queue. We will use them to get the correct
// elements and Merkle proofs, which we will modify later, to pass invalid
// values. 😈
let mut local_indexed_array = Box::<IndexedArray<Poseidon, usize>>::default();
local_indexed_array.init().unwrap();
let mut local_merkle_tree = Box::new(
reference::IndexedMerkleTree::<Poseidon, usize>::new(
ADDRESS_MERKLE_TREE_HEIGHT as usize,
ADDRESS_MERKLE_TREE_CANOPY_DEPTH as usize,
)
.unwrap(),
);
local_merkle_tree.init().unwrap();
let address_merkle_tree_bundle = AddressMerkleTreeBundle {
merkle_tree: local_merkle_tree,
indexed_array: local_indexed_array,
accounts: AddressMerkleTreeAccounts {
merkle_tree: address_merkle_tree_keypair.pubkey(),
queue: address_queue_keypair.pubkey(),
},
rollover_fee: FeeConfig::default().address_queue_rollover as i64,
};
(context, payer, address_merkle_tree_bundle)
}
pub async fn test_with_invalid_low_element(
context: &mut ProgramTestRpcConnection,
address_queue_pubkey: Pubkey,
address_merkle_tree_pubkey: Pubkey,
address_queue: &HashSet,
address_merkle_tree_bundle: &AddressMerkleTreeBundle,
index: usize,
expected_error: u32,
) {
let payer = context.get_payer().insecure_clone();
let (_, address_hashset_index) = address_queue.first_no_seq().unwrap().unwrap();
let low_element = address_merkle_tree_bundle.indexed_array.get(index).unwrap();
let low_element_next_value = address_merkle_tree_bundle
.indexed_array
.get(low_element.next_index())
.unwrap()
.value
.clone();
// Get the Merkle proof for updating low element.
let low_element_proof = address_merkle_tree_bundle
.merkle_tree
.get_proof_of_leaf(low_element.index, false)
.unwrap();
let value_index = address_hashset_index;
// unwraps on a None value onchain.
let error_invalid_low_element = update_merkle_tree(
context,
&payer,
address_queue_pubkey,
address_merkle_tree_pubkey,
value_index,
low_element.index as u64,
bigint_to_be_bytes_array(&low_element.value).unwrap(),
low_element.next_index as u64,
bigint_to_be_bytes_array(&low_element_next_value).unwrap(),
low_element_proof.to_array().unwrap(),
None,
None,
true,
0,
false,
)
.await;
assert_rpc_error(error_invalid_low_element, 0, expected_error).unwrap();
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/account-compression-test
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/account-compression-test/src/lib.rs
|
// placeholder
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-programs
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/registry-test/Cargo.toml
|
[package]
name = "registry-test"
version = "1.1.0"
description = "Created with Anchor"
edition = "2021"
[lib]
crate-type = ["cdylib", "lib"]
name = "registry_test"
[features]
no-entrypoint = []
no-idl = []
no-log-ix-name = []
cpi = ["no-entrypoint"]
test-sbf = []
custom-heap = []
default = ["custom-heap"]
[dependencies]
[dev-dependencies]
solana-program-test = { workspace = true }
light-test-utils = { version = "1.2.0", path = "../../test-utils", features=["devenv"] }
light-program-test = { workspace = true, features = ["devenv"] }
reqwest = "0.11.26"
tokio = { workspace = true }
light-prover-client = {path = "../../circuit-lib/light-prover-client", features = ["devenv"] }
num-bigint = "0.4.6"
num-traits = "0.2.19"
spl-token = { workspace = true }
anchor-spl = { workspace = true }
anchor-lang = { workspace = true }
forester-utils = { workspace = true }
light-registry = { workspace = true }
light-compressed-token = { workspace = true }
light-system-program = { workspace = true }
account-compression = { workspace = true }
light-hasher = {path = "../../merkle-tree/hasher"}
light-concurrent-merkle-tree = {path = "../../merkle-tree/concurrent"}
light-indexed-merkle-tree = {path = "../../merkle-tree/indexed"}
light-utils = {path = "../../utils"}
light-verifier = {path = "../../circuit-lib/verifier"}
solana-cli-output = { workspace = true }
serde_json = "1.0.133"
solana-sdk = { workspace = true }
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-programs
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/registry-test/Xargo.toml
|
[target.bpfel-unknown-unknown.dependencies.std]
features = []
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/registry-test
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/registry-test/tests/tests.rs
|
#![cfg(feature = "test-sbf")]
use account_compression::{
AddressMerkleTreeConfig, AddressQueueConfig, NullifierQueueConfig, StateMerkleTreeConfig,
};
use anchor_lang::{InstructionData, ToAccountMetas};
use forester_utils::forester_epoch::get_epoch_phases;
use light_program_test::test_env::{
create_address_merkle_tree_and_queue_account, create_state_merkle_tree_and_queue_account,
deregister_program_with_registry_program, get_test_env_accounts, initialize_new_group,
register_program_with_registry_program, setup_accounts, setup_test_programs,
setup_test_programs_with_accounts, setup_test_programs_with_accounts_with_protocol_config,
EnvAccountKeypairs, GROUP_PDA_SEED_TEST_KEYPAIR, OLD_REGISTRY_ID_TEST_KEYPAIR,
};
use light_program_test::test_rpc::ProgramTestRpcConnection;
use light_registry::account_compression_cpi::sdk::{
create_nullify_instruction, create_update_address_merkle_tree_instruction,
CreateNullifyInstructionInputs, UpdateAddressMerkleTreeInstructionInputs,
};
use light_registry::errors::RegistryError;
use light_registry::protocol_config::state::{ProtocolConfig, ProtocolConfigPda};
use light_registry::sdk::{
create_finalize_registration_instruction, create_report_work_instruction,
create_update_forester_pda_weight_instruction,
};
use light_registry::utils::{
get_cpi_authority_pda, get_forester_epoch_pda_from_authority, get_forester_pda,
get_protocol_config_pda_address,
};
use light_registry::{ForesterConfig, ForesterEpochPda, ForesterPda};
use light_test_utils::assert_epoch::{
assert_epoch_pda, assert_finalized_epoch_registration, assert_registered_forester_pda,
assert_report_work, fetch_epoch_and_forester_pdas,
};
use light_test_utils::e2e_test_env::init_program_test_env;
use light_test_utils::test_forester::{empty_address_queue_test, nullify_compressed_accounts};
use light_test_utils::{
assert_rpc_error, create_address_merkle_tree_and_queue_account_with_assert,
create_rollover_address_merkle_tree_instructions,
create_rollover_state_merkle_tree_instructions, register_test_forester, update_test_forester,
Epoch, RpcConnection, SolanaRpcConnection, SolanaRpcUrl, TreeAccounts, TreeType,
};
use solana_sdk::{
instruction::Instruction,
native_token::LAMPORTS_PER_SOL,
pubkey::Pubkey,
signature::{read_keypair_file, Keypair},
signer::Signer,
};
use std::collections::HashSet;
#[test]
fn test_protocol_config_active_phase_continuity() {
let devnet_config = ProtocolConfig {
genesis_slot: 0,
min_weight: 1,
slot_length: 10,
registration_phase_length: 100,
active_phase_length: 1000,
report_work_phase_length: 100,
network_fee: 5000,
cpi_context_size: 20488,
finalize_counter_limit: 100,
place_holder: Pubkey::default(),
place_holder_a: 0,
place_holder_b: 0,
place_holder_c: 0,
place_holder_d: 0,
place_holder_e: 0,
place_holder_f: 0,
};
let mainnet_config = ProtocolConfig {
genesis_slot: 286142505,
min_weight: 1,
slot_length: 50,
registration_phase_length: 216000,
active_phase_length: 432000,
report_work_phase_length: 216000,
network_fee: 5000,
cpi_context_size: 20488,
finalize_counter_limit: 100,
place_holder: Pubkey::default(),
place_holder_a: 0,
place_holder_b: 0,
place_holder_c: 0,
place_holder_d: 0,
place_holder_e: 0,
place_holder_f: 0,
};
let configs = vec![devnet_config, mainnet_config];
for config in configs {
test_protocol_config_active_phase_continuity_for_config(config);
}
}
// Test that each slot is active in exactly one epoch
fn test_protocol_config_active_phase_continuity_for_config(config: ProtocolConfig) {
// Test for 10 epochs
let epochs = 10;
let total_slots_to_test = config.active_phase_length * epochs;
for slot in config.genesis_slot..(config.genesis_slot + total_slots_to_test) {
if slot < config.genesis_slot + config.registration_phase_length {
// assert that is registration phase
assert_eq!(config.get_latest_register_epoch(slot).unwrap(), 0);
continue;
}
let mut active_epochs = HashSet::new();
for offset in -1..1 {
let epoch = config.get_current_epoch(slot) as i64 + offset;
if epoch < 0 {
continue;
}
let phases = get_epoch_phases(&config, epoch as u64);
if slot >= phases.active.start && slot <= phases.active.end {
active_epochs.insert(epoch);
}
}
assert_eq!(
active_epochs.len(),
1,
"Slot {} should be active in exactly one epoch, but was active in {} epochs. Protocol config: {:?}",
slot,
active_epochs.len(),
config
);
}
}
#[tokio::test]
async fn test_initialize_protocol_config() {
let rpc = setup_test_programs(None).await;
let mut rpc = ProgramTestRpcConnection { context: rpc };
let payer = rpc.get_payer().insecure_clone();
let program_account_keypair = Keypair::from_bytes(&OLD_REGISTRY_ID_TEST_KEYPAIR).unwrap();
let protocol_config = ProtocolConfig::default();
let (protocol_config_pda, bump) = get_protocol_config_pda_address();
let ix_data = light_registry::instruction::InitializeProtocolConfig {
protocol_config,
bump,
};
// // init with invalid authority
// {
// let accounts = light_registry::accounts::InitializeProtocolConfig {
// protocol_config_pda,
// authority: payer.pubkey(),
// fee_payer: payer.pubkey(),
// system_program: solana_sdk::system_program::id(),
// self_program: light_registry::ID,
// };
// let ix = Instruction {
// program_id: light_registry::ID,
// accounts: accounts.to_account_metas(Some(true)),
// data: ix_data.data(),
// };
// let result = rpc
// .create_and_send_transaction(&[ix], &payer.pubkey(), &[&payer])
// .await;
// assert_rpc_error(
// result,
// 0,
// anchor_lang::error::ErrorCode::ConstraintRaw as u32,
// )
// .unwrap();
// }
// init with valid authority
{
let accounts = light_registry::accounts::InitializeProtocolConfig {
protocol_config_pda,
authority: program_account_keypair.pubkey(),
fee_payer: payer.pubkey(),
system_program: solana_sdk::system_program::id(),
self_program: light_registry::ID,
};
let ix = Instruction {
program_id: light_registry::ID,
accounts: accounts.to_account_metas(Some(true)),
data: ix_data.data(),
};
rpc.create_and_send_transaction(
&[ix],
&payer.pubkey(),
&[&payer, &program_account_keypair],
)
.await
.unwrap();
let protocol_config_pda: ProtocolConfigPda = rpc
.get_anchor_account::<ProtocolConfigPda>(&protocol_config_pda)
.await
.unwrap()
.unwrap();
println!("protocol_config_pda: {:?}", protocol_config_pda);
assert_eq!(
protocol_config_pda.authority,
program_account_keypair.pubkey()
);
assert_eq!(protocol_config_pda.config, protocol_config);
assert_eq!(protocol_config_pda.bump, bump);
}
// Test: update protocol config
let updated_keypair = Keypair::new();
rpc.airdrop_lamports(&updated_keypair.pubkey(), 1_000_000_000)
.await
.unwrap();
// fail to update protocol config with invalid authority
{
let instruction = light_registry::instruction::UpdateProtocolConfig {
protocol_config: None,
};
let accounts = light_registry::accounts::UpdateProtocolConfig {
protocol_config_pda,
authority: payer.pubkey(),
new_authority: Some(updated_keypair.pubkey()),
fee_payer: payer.pubkey(),
};
let ix = Instruction {
program_id: light_registry::ID,
accounts: accounts.to_account_metas(Some(true)),
data: instruction.data(),
};
let result = rpc
.create_and_send_transaction(&[ix], &payer.pubkey(), &[&payer, &updated_keypair])
.await;
assert_rpc_error(
result,
0,
anchor_lang::error::ErrorCode::ConstraintHasOne as u32,
)
.unwrap();
}
{
let updated_protocol_config = ProtocolConfig {
registration_phase_length: 123,
report_work_phase_length: 123,
..Default::default()
};
let instruction = light_registry::instruction::UpdateProtocolConfig {
protocol_config: Some(updated_protocol_config),
};
let accounts = light_registry::accounts::UpdateProtocolConfig {
protocol_config_pda,
authority: program_account_keypair.pubkey(),
new_authority: Some(updated_keypair.pubkey()),
fee_payer: payer.pubkey(),
};
let ix = Instruction {
program_id: light_registry::ID,
accounts: accounts.to_account_metas(Some(true)),
data: instruction.data(),
};
rpc.create_and_send_transaction(
&[ix],
&payer.pubkey(),
&[&payer, &updated_keypair, &program_account_keypair],
)
.await
.unwrap();
let governance_authority = rpc
.get_anchor_account::<ProtocolConfigPda>(&protocol_config_pda)
.await
.unwrap()
.unwrap();
assert_eq!(governance_authority.authority, updated_keypair.pubkey());
assert_eq!(governance_authority.config, updated_protocol_config);
}
let cpi_authority_pda = get_cpi_authority_pda();
let group_seed_keypair = Keypair::from_bytes(&GROUP_PDA_SEED_TEST_KEYPAIR).unwrap();
let group_pda =
initialize_new_group(&group_seed_keypair, &payer, &mut rpc, cpi_authority_pda.0).await;
let random_program_keypair = Keypair::new();
// register program with invalid authority
{
let result = register_program_with_registry_program(
&mut rpc,
&payer,
&group_pda,
&random_program_keypair,
)
.await;
let expected_error_code = anchor_lang::error::ErrorCode::ConstraintHasOne as u32;
assert_rpc_error(result, 1, expected_error_code).unwrap();
}
// deregister program functional and with invalid signer
{
let random_program_keypair = Keypair::new();
register_program_with_registry_program(
&mut rpc,
&updated_keypair,
&group_pda,
&random_program_keypair,
)
.await
.unwrap();
let result = deregister_program_with_registry_program(
&mut rpc,
&payer,
&group_pda,
&random_program_keypair,
)
.await;
let expected_error_code = anchor_lang::error::ErrorCode::ConstraintHasOne as u32;
assert_rpc_error(result, 1, expected_error_code).unwrap();
deregister_program_with_registry_program(
&mut rpc,
&updated_keypair,
&group_pda,
&random_program_keypair,
)
.await
.unwrap();
}
// initialize a Merkle tree with network fee = 0
{
let merkle_tree_keypair = Keypair::new();
let nullifier_queue_keypair = Keypair::new();
let cpi_context_keypair = Keypair::new();
create_state_merkle_tree_and_queue_account(
&payer,
true,
&mut rpc,
&merkle_tree_keypair,
&nullifier_queue_keypair,
Some(&cpi_context_keypair),
None,
Some(Pubkey::new_unique()),
1,
&StateMerkleTreeConfig {
network_fee: None,
..Default::default()
},
&NullifierQueueConfig::default(),
)
.await
.unwrap();
}
// initialize a Merkle tree with network fee = 0
{
let merkle_tree_keypair = Keypair::new();
let nullifier_queue_keypair = Keypair::new();
let cpi_context_keypair = Keypair::new();
let result = create_state_merkle_tree_and_queue_account(
&payer,
true,
&mut rpc,
&merkle_tree_keypair,
&nullifier_queue_keypair,
Some(&cpi_context_keypair),
None,
None,
1,
&StateMerkleTreeConfig {
network_fee: None,
..Default::default()
},
&NullifierQueueConfig::default(),
)
.await;
assert_rpc_error(result, 3, RegistryError::ForesterUndefined.into()).unwrap();
}
// initialize a Merkle tree with network fee = 5000 (default)
{
let merkle_tree_keypair = Keypair::new();
let nullifier_queue_keypair = Keypair::new();
let cpi_context_keypair = Keypair::new();
create_state_merkle_tree_and_queue_account(
&payer,
true,
&mut rpc,
&merkle_tree_keypair,
&nullifier_queue_keypair,
Some(&cpi_context_keypair),
None,
None,
1,
&StateMerkleTreeConfig {
network_fee: Some(5000),
..Default::default()
},
&NullifierQueueConfig::default(),
)
.await
.unwrap();
}
// FAIL: initialize a Merkle tree with network fee != 0 || 5000
{
let merkle_tree_keypair = Keypair::new();
let nullifier_queue_keypair = Keypair::new();
let cpi_context_keypair = Keypair::new();
let result = create_state_merkle_tree_and_queue_account(
&payer,
true,
&mut rpc,
&merkle_tree_keypair,
&nullifier_queue_keypair,
Some(&cpi_context_keypair),
None,
None,
1,
&StateMerkleTreeConfig {
network_fee: Some(5001),
..Default::default()
},
&NullifierQueueConfig::default(),
)
.await;
let expected_error_code = RegistryError::InvalidNetworkFee as u32 + 6000;
assert_rpc_error(result, 3, expected_error_code).unwrap();
}
// initialize a Merkle tree with network fee = 0
{
let merkle_tree_keypair = Keypair::new();
let queue_keypair = Keypair::new();
create_address_merkle_tree_and_queue_account_with_assert(
&payer,
true,
&mut rpc,
&merkle_tree_keypair,
&queue_keypair,
None,
Some(Pubkey::new_unique()),
&AddressMerkleTreeConfig {
network_fee: None,
..Default::default()
},
&AddressQueueConfig::default(),
0,
)
.await
.unwrap();
}
// initialize a Merkle tree with network fee = 5000 (default)
{
let merkle_tree_keypair = Keypair::new();
let queue_keypair = Keypair::new();
create_address_merkle_tree_and_queue_account_with_assert(
&payer,
true,
&mut rpc,
&merkle_tree_keypair,
&queue_keypair,
None,
None,
&AddressMerkleTreeConfig {
network_fee: Some(5000),
..Default::default()
},
&AddressQueueConfig::default(),
0,
)
.await
.unwrap();
}
// FAIL: initialize a Merkle tree with network fee != 0 || 5000
{
let merkle_tree_keypair = Keypair::new();
let queue_keypair = Keypair::new();
let result = create_address_merkle_tree_and_queue_account(
&payer,
true,
&mut rpc,
&merkle_tree_keypair,
&queue_keypair,
None,
None,
&AddressMerkleTreeConfig {
network_fee: Some(5001),
..Default::default()
},
&AddressQueueConfig::default(),
0,
)
.await;
let expected_error_code = RegistryError::InvalidNetworkFee as u32 + 6000;
assert_rpc_error(result, 2, expected_error_code).unwrap();
}
}
#[tokio::test]
async fn test_custom_forester() {
let (mut rpc, env) = setup_test_programs_with_accounts_with_protocol_config(
None,
ProtocolConfig::default(),
false,
)
.await;
let payer = rpc.get_payer().insecure_clone();
{
let unregistered_forester_keypair = Keypair::new();
rpc.airdrop_lamports(&unregistered_forester_keypair.pubkey(), 1_000_000_000)
.await
.unwrap();
let merkle_tree_keypair = Keypair::new();
let nullifier_queue_keypair = Keypair::new();
let cpi_context_keypair = Keypair::new();
// create work 1 item in address and nullifier queue each
let (mut state_merkle_tree_bundle, _, mut rpc) = {
let mut e2e_env = init_program_test_env(rpc, &env).await;
e2e_env.indexer.state_merkle_trees.clear();
// add state merkle tree to the indexer
e2e_env
.indexer
.add_state_merkle_tree(
&mut e2e_env.rpc,
&merkle_tree_keypair,
&nullifier_queue_keypair,
&cpi_context_keypair,
None,
Some(unregistered_forester_keypair.pubkey()),
)
.await;
// e2e_env.create_address(None).await;
e2e_env
.compress_sol_deterministic(&unregistered_forester_keypair, 1_000_000, None)
.await;
e2e_env
.transfer_sol_deterministic(
&unregistered_forester_keypair,
&Pubkey::new_unique(),
None,
)
.await
.unwrap();
(
e2e_env.indexer.state_merkle_trees[0].clone(),
e2e_env.indexer.address_merkle_trees[0].clone(),
e2e_env.rpc,
)
};
{
let result = nullify_compressed_accounts(
&mut rpc,
&payer,
&mut state_merkle_tree_bundle,
0,
true,
)
.await;
assert_rpc_error(result, 0, RegistryError::InvalidSigner.into()).unwrap();
}
// nullify with tree forester
nullify_compressed_accounts(
&mut rpc,
&unregistered_forester_keypair,
&mut state_merkle_tree_bundle,
0,
true,
)
.await
.unwrap();
}
}
/// Test:
/// 1. SUCCESS: Register a forester
/// 2. SUCCESS: Update forester authority
/// 3. SUCCESS: Register forester for epoch
#[tokio::test]
async fn test_register_and_update_forester_pda() {
let (mut rpc, env) = setup_test_programs_with_accounts_with_protocol_config(
None,
ProtocolConfig::default(),
false,
)
.await;
let forester_keypair = Keypair::new();
rpc.airdrop_lamports(&forester_keypair.pubkey(), 1_000_000_000)
.await
.unwrap();
let config = ForesterConfig { fee: 1 };
// 1. SUCCESS: Register a forester
register_test_forester(
&mut rpc,
&env.governance_authority,
&forester_keypair.pubkey(),
config,
)
.await
.unwrap();
// 2. SUCCESS: Update forester authority
let new_forester_keypair = Keypair::new();
rpc.airdrop_lamports(&new_forester_keypair.pubkey(), 1_000_000_000)
.await
.unwrap();
let config = ForesterConfig { fee: 2 };
update_test_forester(
&mut rpc,
&forester_keypair,
&forester_keypair.pubkey(),
Some(&new_forester_keypair),
config,
)
.await
.unwrap();
// change the forester authority back
update_test_forester(
&mut rpc,
&new_forester_keypair,
&forester_keypair.pubkey(),
Some(&forester_keypair),
config,
)
.await
.unwrap();
let protocol_config = rpc
.get_anchor_account::<ProtocolConfigPda>(&env.governance_authority_pda)
.await
.unwrap()
.unwrap()
.config;
// SUCCESS: update forester weight
{
let ix = create_update_forester_pda_weight_instruction(
&forester_keypair.pubkey(),
&env.governance_authority.pubkey(),
11,
);
rpc.create_and_send_transaction(
&[ix],
&env.governance_authority.pubkey(),
&[&env.governance_authority],
)
.await
.unwrap();
let forester_pda: ForesterPda = rpc
.get_anchor_account::<ForesterPda>(&get_forester_pda(&forester_keypair.pubkey()).0)
.await
.unwrap()
.unwrap();
assert_eq!(forester_pda.active_weight, 11);
// change it back because other asserts expect weight 1
let ix = create_update_forester_pda_weight_instruction(
&forester_keypair.pubkey(),
&env.governance_authority.pubkey(),
1,
);
rpc.create_and_send_transaction(
&[ix],
&env.governance_authority.pubkey(),
&[&env.governance_authority],
)
.await
.unwrap();
}
// 3. SUCCESS: register forester for epoch
let tree_accounts = vec![
TreeAccounts {
tree_type: TreeType::State,
merkle_tree: env.merkle_tree_pubkey,
queue: env.nullifier_queue_pubkey,
is_rolledover: false,
},
TreeAccounts {
tree_type: TreeType::Address,
merkle_tree: env.address_merkle_tree_pubkey,
queue: env.address_merkle_tree_queue_pubkey,
is_rolledover: false,
},
];
let registered_epoch = Epoch::register(
&mut rpc,
&protocol_config,
&forester_keypair,
&forester_keypair.pubkey(),
)
.await
.unwrap();
assert!(registered_epoch.is_some());
let mut registered_epoch = registered_epoch.unwrap();
let forester_epoch_pda = rpc
.get_anchor_account::<ForesterEpochPda>(®istered_epoch.forester_epoch_pda)
.await
.unwrap()
.unwrap();
assert!(forester_epoch_pda.total_epoch_weight.is_none());
assert_eq!(forester_epoch_pda.epoch, 0);
let epoch = 0;
let expected_stake = 1;
assert_epoch_pda(&mut rpc, epoch, expected_stake).await;
assert_registered_forester_pda(
&mut rpc,
®istered_epoch.forester_epoch_pda,
&forester_keypair.pubkey(),
epoch,
)
.await;
// advance epoch to active phase
rpc.warp_to_slot(registered_epoch.phases.active.start)
.await
.unwrap();
// finalize registration
{
registered_epoch
.fetch_account_and_add_trees_with_schedule(&mut rpc, &tree_accounts)
.await
.unwrap();
let ix = create_finalize_registration_instruction(
&forester_keypair.pubkey(),
&forester_keypair.pubkey(),
registered_epoch.epoch,
);
rpc.create_and_send_transaction(&[ix], &forester_keypair.pubkey(), &[&forester_keypair])
.await
.unwrap();
assert_finalized_epoch_registration(
&mut rpc,
®istered_epoch.forester_epoch_pda,
®istered_epoch.epoch_pda,
)
.await;
}
// create work 1 item in address and nullifier queue each
let (mut state_merkle_tree_bundle, mut address_merkle_tree, mut rpc) = {
let mut e2e_env = init_program_test_env(rpc, &env).await;
e2e_env.create_address(None, None).await;
e2e_env
.compress_sol_deterministic(&forester_keypair, 1_000_000, None)
.await;
e2e_env
.transfer_sol_deterministic(&forester_keypair, &Pubkey::new_unique(), None)
.await
.unwrap();
(
e2e_env.indexer.state_merkle_trees[0].clone(),
e2e_env.indexer.address_merkle_trees[0].clone(),
e2e_env.rpc,
)
};
// perform 1 work
nullify_compressed_accounts(
&mut rpc,
&forester_keypair,
&mut state_merkle_tree_bundle,
epoch,
false,
)
.await
.unwrap();
empty_address_queue_test(
&forester_keypair,
&mut rpc,
&mut address_merkle_tree,
false,
epoch,
false,
)
.await
.unwrap();
// advance epoch to report work and next registration phase
rpc.warp_to_slot(
registered_epoch.phases.report_work.start - protocol_config.registration_phase_length,
)
.await
.unwrap();
// register for next epoch
let next_registered_epoch = Epoch::register(
&mut rpc,
&protocol_config,
&forester_keypair,
&forester_keypair.pubkey(),
)
.await
.unwrap();
assert!(next_registered_epoch.is_some());
let next_registered_epoch = next_registered_epoch.unwrap();
assert_eq!(next_registered_epoch.epoch, 1);
assert_epoch_pda(&mut rpc, next_registered_epoch.epoch, expected_stake).await;
assert_registered_forester_pda(
&mut rpc,
&next_registered_epoch.forester_epoch_pda,
&forester_keypair.pubkey(),
next_registered_epoch.epoch,
)
.await;
rpc.warp_to_slot(registered_epoch.phases.report_work.start)
.await
.unwrap();
// report work
{
let (pre_forester_epoch_pda, pre_epoch_pda) = fetch_epoch_and_forester_pdas(
&mut rpc,
®istered_epoch.forester_epoch_pda,
®istered_epoch.epoch_pda,
)
.await;
let ix = create_report_work_instruction(
&forester_keypair.pubkey(),
&forester_keypair.pubkey(),
registered_epoch.epoch,
);
rpc.create_and_send_transaction(&[ix], &forester_keypair.pubkey(), &[&forester_keypair])
.await
.unwrap();
assert_report_work(
&mut rpc,
®istered_epoch.forester_epoch_pda,
®istered_epoch.epoch_pda,
pre_forester_epoch_pda,
pre_epoch_pda,
)
.await;
}
}
// TODO: fix numbering
/// Test:
/// 1. FAIL: Register a forester with invalid authority
/// 2. FAIL: Update forester pda authority with invalid authority
/// 2. FAIL: Update forester epoch pda authority with invalid authority
/// 3. FAIL: Nullify with invalid authority
/// 4. FAIL: Update address tree with invalid authority
/// 5. FAIL: Rollover address tree with invalid authority
/// 6. FAIL: Rollover state tree with invalid authority
#[tokio::test]
async fn failing_test_forester() {
let (mut rpc, env) = setup_test_programs_with_accounts(None).await;
let payer = rpc.get_payer().insecure_clone();
// 1. FAIL: Register a forester pda with invalid authority
{
let result = register_test_forester(
&mut rpc,
&payer,
&Keypair::new().pubkey(),
ForesterConfig::default(),
)
.await;
let expected_error_code = anchor_lang::error::ErrorCode::ConstraintHasOne as u32;
assert_rpc_error(result, 0, expected_error_code).unwrap();
}
// 2. FAIL: Update forester pda with invalid authority
{
let forester_pda = get_forester_pda(&env.forester.pubkey()).0;
let instruction_data = light_registry::instruction::UpdateForesterPda { config: None };
let accounts = light_registry::accounts::UpdateForesterPda {
forester_pda,
authority: payer.pubkey(),
new_authority: Some(payer.pubkey()),
};
let ix = Instruction {
program_id: light_registry::ID,
accounts: accounts.to_account_metas(Some(true)),
data: instruction_data.data(),
};
let result = rpc
.create_and_send_transaction(&[ix], &payer.pubkey(), &[&payer])
.await;
let expected_error_code = anchor_lang::error::ErrorCode::ConstraintHasOne as u32;
assert_rpc_error(result, 0, expected_error_code).unwrap();
println!("here1");
}
// 3. FAIL: Update forester pda weight with invalid authority
{
let ix = light_registry::instruction::UpdateForesterPdaWeight { new_weight: 11 };
let accounts = light_registry::accounts::UpdateForesterPdaWeight {
forester_pda: get_forester_pda(&env.forester.pubkey()).0,
authority: payer.pubkey(),
protocol_config_pda: env.governance_authority_pda,
};
let ix = Instruction {
program_id: light_registry::ID,
accounts: accounts.to_account_metas(Some(true)),
data: ix.data(),
};
let result = rpc
.create_and_send_transaction(&[ix], &payer.pubkey(), &[&payer])
.await;
let expected_error_code = anchor_lang::error::ErrorCode::ConstraintHasOne as u32;
assert_rpc_error(result, 0, expected_error_code).unwrap();
println!("here1");
}
// 4. FAIL: Nullify with invalid authority
{
let expected_error_code = RegistryError::InvalidForester as u32 + 6000;
let inputs = CreateNullifyInstructionInputs {
authority: payer.pubkey(),
nullifier_queue: env.nullifier_queue_pubkey,
merkle_tree: env.merkle_tree_pubkey,
change_log_indices: vec![1],
leaves_queue_indices: vec![1u16],
indices: vec![0u64],
proofs: vec![vec![[0u8; 32]; 26]],
derivation: payer.pubkey(),
is_metadata_forester: false,
};
let mut ix = create_nullify_instruction(inputs, 0);
// Swap the derived forester pda with an initialized but invalid one.
ix.accounts[0].pubkey = get_forester_epoch_pda_from_authority(&env.forester.pubkey(), 0).0;
let result = rpc
.create_and_send_transaction(&[ix], &payer.pubkey(), &[&payer])
.await;
assert_rpc_error(result, 0, expected_error_code).unwrap();
println!("here1");
}
// 4 FAIL: update address Merkle tree failed
{
let expected_error_code = RegistryError::InvalidForester as u32 + 6000;
let authority = rpc.get_payer().insecure_clone();
let mut instruction = create_update_address_merkle_tree_instruction(
UpdateAddressMerkleTreeInstructionInputs {
authority: authority.pubkey(),
derivation: authority.pubkey(),
address_merkle_tree: env.address_merkle_tree_pubkey,
address_queue: env.address_merkle_tree_queue_pubkey,
changelog_index: 0,
indexed_changelog_index: 0,
value: 1,
low_address_index: 1,
low_address_value: [0u8; 32],
low_address_next_index: 1,
low_address_next_value: [0u8; 32],
low_address_proof: [[0u8; 32]; 16],
is_metadata_forester: false,
},
0,
);
// Swap the derived forester pda with an initialized but invalid one.
instruction.accounts[0].pubkey =
get_forester_epoch_pda_from_authority(&env.forester.pubkey(), 0).0;
println!("here1");
let result = rpc
.create_and_send_transaction(&[instruction], &authority.pubkey(), &[&authority])
.await;
assert_rpc_error(result, 0, expected_error_code).unwrap();
}
// 5. FAIL: rollover address tree with invalid authority
{
let new_queue_keypair = Keypair::new();
let new_merkle_tree_keypair = Keypair::new();
let expected_error_code = RegistryError::InvalidForester as u32 + 6000;
let authority = rpc.get_payer().insecure_clone();
let mut instructions = create_rollover_address_merkle_tree_instructions(
&mut rpc,
&authority.pubkey(),
&authority.pubkey(),
&new_queue_keypair,
&new_merkle_tree_keypair,
&env.address_merkle_tree_pubkey,
&env.address_merkle_tree_queue_pubkey,
0, // TODO: adapt epoch
false,
)
.await;
// Swap the derived forester pda with an initialized but invalid one.
instructions[2].accounts[0].pubkey =
get_forester_epoch_pda_from_authority(&env.forester.pubkey(), 0).0;
let result = rpc
.create_and_send_transaction(
&instructions,
&authority.pubkey(),
&[&authority, &new_queue_keypair, &new_merkle_tree_keypair],
)
.await;
assert_rpc_error(result, 2, expected_error_code).unwrap();
println!("here1");
}
// 6. FAIL: rollover state tree with invalid authority
{
let new_nullifier_queue_keypair = Keypair::new();
let new_state_merkle_tree_keypair = Keypair::new();
let new_cpi_context = Keypair::new();
let expected_error_code = RegistryError::InvalidForester as u32 + 6000;
let authority = rpc.get_payer().insecure_clone();
let mut instructions = create_rollover_state_merkle_tree_instructions(
&mut rpc,
&authority.pubkey(),
&authority.pubkey(),
&new_nullifier_queue_keypair,
&new_state_merkle_tree_keypair,
&new_cpi_context,
&env.merkle_tree_pubkey,
&env.nullifier_queue_pubkey,
0, // TODO: adapt epoch
false,
)
.await;
// Swap the derived forester pda with an initialized but invalid one.
instructions[3].accounts[0].pubkey =
get_forester_epoch_pda_from_authority(&env.forester.pubkey(), 0).0;
let result = rpc
.create_and_send_transaction(
&instructions,
&authority.pubkey(),
&[
&authority,
&new_nullifier_queue_keypair,
&new_state_merkle_tree_keypair,
&new_cpi_context,
],
)
.await;
assert_rpc_error(result, 3, expected_error_code).unwrap();
}
}
// cargo test-sbf -p registry-test -- --test update_registry_governance_on_testnet update_forester_on_testnet --ignored --nocapture
#[ignore]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn update_forester_on_testnet() {
let env_accounts = get_test_env_accounts();
let mut rpc = SolanaRpcConnection::new(SolanaRpcUrl::ZKTestnet, None);
rpc.airdrop_lamports(&env_accounts.forester.pubkey(), LAMPORTS_PER_SOL * 100)
.await
.unwrap();
let forester_epoch = rpc
.get_anchor_account::<ForesterPda>(&env_accounts.registered_forester_pda)
.await
.unwrap()
.unwrap();
println!("ForesterEpoch: {:?}", forester_epoch);
assert_eq!(forester_epoch.authority, env_accounts.forester.pubkey());
let updated_keypair = read_keypair_file("../../target/forester-keypair.json").unwrap();
println!("updated keypair: {:?}", updated_keypair.pubkey());
update_test_forester(
&mut rpc,
&env_accounts.forester,
&env_accounts.forester.pubkey(),
Some(&updated_keypair),
ForesterConfig::default(),
)
.await
.unwrap();
let forester_epoch = rpc
.get_anchor_account::<ForesterPda>(&env_accounts.registered_forester_pda)
.await
.unwrap()
.unwrap();
println!("ForesterEpoch: {:?}", forester_epoch);
assert_eq!(forester_epoch.authority, updated_keypair.pubkey());
}
#[ignore]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn update_registry_governance_on_testnet() {
let env_accounts = get_test_env_accounts();
let mut rpc = SolanaRpcConnection::new(SolanaRpcUrl::ZKTestnet, None);
rpc.airdrop_lamports(
&env_accounts.governance_authority.pubkey(),
LAMPORTS_PER_SOL * 100,
)
.await
.unwrap();
let governance_authority = rpc
.get_anchor_account::<ProtocolConfigPda>(&env_accounts.governance_authority_pda)
.await
.unwrap()
.unwrap();
println!("GroupAuthority: {:?}", governance_authority);
assert_eq!(
governance_authority.authority,
env_accounts.governance_authority.pubkey()
);
let updated_keypair =
read_keypair_file("../../target/governance-authority-keypair.json").unwrap();
println!("updated keypair: {:?}", updated_keypair.pubkey());
let instruction = light_registry::instruction::UpdateProtocolConfig {
protocol_config: None,
};
let accounts = light_registry::accounts::UpdateProtocolConfig {
protocol_config_pda: env_accounts.governance_authority_pda,
authority: env_accounts.governance_authority.pubkey(),
new_authority: Some(updated_keypair.pubkey()),
fee_payer: env_accounts.governance_authority.pubkey(),
};
let ix = Instruction {
program_id: light_registry::ID,
accounts: accounts.to_account_metas(Some(true)),
data: instruction.data(),
};
let signature = rpc
.create_and_send_transaction(
&[ix],
&env_accounts.governance_authority.pubkey(),
&[&env_accounts.governance_authority],
)
.await
.unwrap();
println!("signature: {:?}", signature);
let governance_authority = rpc
.get_anchor_account::<ProtocolConfigPda>(&env_accounts.governance_authority_pda)
.await
.unwrap()
.unwrap();
assert_eq!(governance_authority.authority, updated_keypair.pubkey());
}
// cargo test-sbf -p registry-test -- --test init_accounts --ignored --nocapture
// TODO: refactor into xtask
#[ignore]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn init_accounts() {
let keypairs = EnvAccountKeypairs::from_target_folder();
println!(
"authority pubkey: {:?}",
keypairs.governance_authority.pubkey()
);
println!("forester pubkey: {:?}", keypairs.forester.pubkey());
setup_accounts(keypairs, SolanaRpcUrl::Localnet).await;
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/registry-test
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/registry-test/src/lib.rs
|
// placeholder
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-programs
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/e2e-test/Cargo.toml
|
[package]
name = "e2e-test"
version = "1.1.0"
description = "Created with Anchor"
edition = "2021"
[lib]
crate-type = ["cdylib", "lib"]
name = "e2e_test"
[features]
no-entrypoint = []
no-idl = []
no-log-ix-name = []
cpi = ["no-entrypoint"]
test-sbf = []
custom-heap = []
default = ["custom-heap"]
[dependencies]
anchor-lang = { workspace = true }
light-compressed-token = { workspace = true }
light-registry = { workspace = true }
light-system-program = { workspace = true }
account-compression = { workspace = true }
light-hasher = {path = "../../merkle-tree/hasher"}
light-concurrent-merkle-tree = {path = "../../merkle-tree/concurrent"}
light-indexed-merkle-tree = {path = "../../merkle-tree/indexed"}
light-merkle-tree-reference = {path = "../../merkle-tree/reference"}
light-utils = {path = "../../utils"}
light-verifier = {path = "../../circuit-lib/verifier"}
[target.'cfg(not(target_os = "solana"))'.dependencies]
solana-sdk = { workspace = true }
solana-client = { workspace = true }
[dev-dependencies]
solana-program-test = { workspace = true }
light-test-utils = { version = "1.2.0", path = "../../test-utils", features=["devenv"] }
light-program-test = { workspace = true, features = ["devenv"] }
reqwest = "0.11.26"
tokio = { workspace = true }
light-prover-client = {path = "../../circuit-lib/light-prover-client" }
num-bigint = "0.4.6"
num-traits = "0.2.19"
spl-token = { workspace = true }
anchor-spl = { workspace = true }
rand = "0.8"
spl-concurrent-merkle-tree = { version = "0.2.0", default-features = false}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-programs
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/e2e-test/Xargo.toml
|
[target.bpfel-unknown-unknown.dependencies.std]
features = []
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/e2e-test
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/e2e-test/tests/test.rs
|
#![cfg(feature = "test-sbf")]
use light_program_test::test_env::setup_test_programs_with_accounts_with_protocol_config;
use light_program_test::test_rpc::ProgramTestRpcConnection;
use light_registry::protocol_config::state::ProtocolConfig;
use light_test_utils::e2e_test_env::{E2ETestEnv, GeneralActionConfig, KeypairActionConfig};
use light_test_utils::indexer::TestIndexer;
#[tokio::test]
async fn test_10_all() {
let protocol_config = ProtocolConfig {
genesis_slot: 0,
slot_length: 100,
registration_phase_length: 100,
active_phase_length: 200,
report_work_phase_length: 100,
..ProtocolConfig::default()
};
let (rpc, env_accounts) =
setup_test_programs_with_accounts_with_protocol_config(None, protocol_config, true).await;
let indexer: TestIndexer<ProgramTestRpcConnection> = TestIndexer::init_from_env(
&env_accounts.forester.insecure_clone(),
&env_accounts,
Some(KeypairActionConfig::all_default().prover_config()),
)
.await;
let mut env =
E2ETestEnv::<ProgramTestRpcConnection, TestIndexer<ProgramTestRpcConnection>>::new(
rpc,
indexer,
&env_accounts,
KeypairActionConfig::all_default(),
GeneralActionConfig::default(),
10,
None,
)
.await;
env.execute_rounds().await;
println!("stats {:?}", env.stats);
}
// cargo test-sbf -p e2e-test -- --nocapture --ignored --test test_10000_all > output.txt 2>&1
#[ignore]
#[tokio::test]
async fn test_10000_all() {
let protocol_config = ProtocolConfig {
genesis_slot: 0,
slot_length: 10,
registration_phase_length: 100,
active_phase_length: 200,
report_work_phase_length: 100,
..ProtocolConfig::default()
};
let (rpc, env_accounts) =
setup_test_programs_with_accounts_with_protocol_config(None, protocol_config, true).await;
let indexer: TestIndexer<ProgramTestRpcConnection> = TestIndexer::init_from_env(
&env_accounts.forester.insecure_clone(),
&env_accounts,
Some(KeypairActionConfig::all_default().prover_config()),
)
.await;
let mut env =
E2ETestEnv::<ProgramTestRpcConnection, TestIndexer<ProgramTestRpcConnection>>::new(
rpc,
indexer,
&env_accounts,
KeypairActionConfig::all_default_no_fee_assert(),
GeneralActionConfig::test_with_rollover(),
10000,
None,
)
.await;
env.execute_rounds().await;
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/e2e-test
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/e2e-test/src/lib.rs
|
// placeholder
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-programs
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/sdk-test-program/.prettierignore
|
.anchor
.DS_Store
target
node_modules
dist
build
test-ledger
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-programs
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/sdk-test-program/README.md
|
# SDK test program
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-programs
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/sdk-test-program/Anchor.toml
|
[toolchain]
[features]
seeds = false
skip-lint = false
[programs.localnet]
sdk_test = "2tzfijPBGbrR5PboyFUFKzfEoLTwdDSHUjANCw929wyt"
[registry]
url = "https://api.apr.dev"
[provider]
cluster = "Localnet"
wallet = "~/.config/solana/id.json"
[scripts]
test = "yarn run ts-mocha -p ./tsconfig.json -t 1000000 tests/**/*.ts"
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-programs
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/sdk-test-program/package.json
|
{
"scripts": {
"lint:fix": "prettier \"*/**/*{.js,.ts}\" -w",
"lint": "prettier \"*/**/*{.js,.ts}\" --check",
"test": "cargo test-sbf -p sdk-test -- --test-threads 1"
},
"dependencies": {
"@coral-xyz/anchor": "^0.29.0"
},
"devDependencies": {
"@lightprotocol/zk-compression-cli": "workspace:*",
"chai": "^4.3.4",
"mocha": "^10.7.3",
"ts-mocha": "^10.0.0",
"@types/bn.js": "^5.1.0",
"@types/chai": "^4.3.0",
"@types/mocha": "^10.0.7",
"typescript": "^5.5.4",
"prettier": "^2.6.2"
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-programs
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/sdk-test-program/tsconfig.json
|
{
"compilerOptions": {
"types": ["mocha", "chai"],
"typeRoots": ["./node_modules/@types"],
"lib": ["es2015"],
"module": "commonjs",
"target": "es6",
"esModuleInterop": true
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/sdk-test-program/programs
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/sdk-test-program/programs/sdk-test/Cargo.toml
|
[package]
name = "sdk-test"
version = "0.7.0"
description = "Test program for Light SDK and Light Macros"
edition = "2021"
rust-version = "1.75.0"
license = "Apache-2.0"
[lib]
crate-type = ["cdylib", "lib"]
name = "sdk_test"
[features]
no-entrypoint = []
no-idl = []
no-log-ix-name = []
cpi = ["no-entrypoint"]
default = ["idl-build"]
test-sbf = []
bench-sbf = []
idl-build = ["anchor-lang/idl-build", "light-sdk/idl-build"]
[dependencies]
anchor-lang = { workspace=true}
borsh = { workspace = true }
light-hasher = { workspace = true, features = ["solana"] }
light-macros = { workspace = true }
light-sdk = { workspace = true }
light-sdk-macros = { workspace = true }
light-utils = { workspace = true }
light-verifier = { workspace = true }
[target.'cfg(not(target_os = "solana"))'.dependencies]
solana-sdk = { workspace = true }
[dev-dependencies]
light-client = { workspace = true , features = ["devenv"]}
light-program-test = { workspace = true, features = ["devenv"] }
light-test-utils = { path = "../../../../test-utils", version = "1.2.0", features = ["devenv"] }
solana-program-test = { workspace = true }
tokio = { workspace = true }
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/sdk-test-program/programs
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/sdk-test-program/programs/sdk-test/Xargo.toml
|
[target.bpfel-unknown-unknown.dependencies.std]
features = []
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/sdk-test-program/programs/sdk-test
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/sdk-test-program/programs/sdk-test/tests/test.rs
|
#![cfg(feature = "test-sbf")]
use anchor_lang::{AnchorDeserialize, InstructionData, ToAccountMetas};
use light_client::indexer::{AddressMerkleTreeAccounts, Indexer, StateMerkleTreeAccounts};
use light_client::rpc::merkle_tree::MerkleTreeExt;
use light_program_test::test_env::{setup_test_programs_with_accounts_v2, EnvAccounts};
use light_program_test::test_indexer::TestIndexer;
use light_program_test::test_rpc::ProgramTestRpcConnection;
use light_sdk::account_meta::LightAccountMeta;
use light_sdk::address::derive_address;
use light_sdk::compressed_account::CompressedAccountWithMerkleContext;
use light_sdk::instruction_data::LightInstructionData;
use light_sdk::merkle_context::{AddressMerkleContext, RemainingAccounts};
use light_sdk::utils::get_cpi_authority_pda;
use light_sdk::verify::find_cpi_signer;
use light_sdk::{PROGRAM_ID_ACCOUNT_COMPRESSION, PROGRAM_ID_LIGHT_SYSTEM, PROGRAM_ID_NOOP};
use light_test_utils::{RpcConnection, RpcError};
use sdk_test::{MyCompressedAccount, NestedData};
use solana_sdk::instruction::Instruction;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, Signer};
#[tokio::test]
async fn test_sdk_test() {
let (mut rpc, env) =
setup_test_programs_with_accounts_v2(Some(vec![(String::from("sdk_test"), sdk_test::ID)]))
.await;
let payer = rpc.get_payer().insecure_clone();
let mut test_indexer: TestIndexer<ProgramTestRpcConnection> = TestIndexer::new(
&[StateMerkleTreeAccounts {
merkle_tree: env.merkle_tree_pubkey,
nullifier_queue: env.nullifier_queue_pubkey,
cpi_context: env.cpi_context_account_pubkey,
}],
&[AddressMerkleTreeAccounts {
merkle_tree: env.address_merkle_tree_pubkey,
queue: env.address_merkle_tree_queue_pubkey,
}],
true,
true,
)
.await;
let mut remaining_accounts = RemainingAccounts::default();
let address_merkle_context = AddressMerkleContext {
address_merkle_tree_pubkey: env.address_merkle_tree_pubkey,
address_queue_pubkey: env.address_merkle_tree_queue_pubkey,
};
let (address, _) = derive_address(
&[b"compressed", b"test"],
&address_merkle_context,
&sdk_test::ID,
);
let account_compression_authority = get_cpi_authority_pda(&PROGRAM_ID_LIGHT_SYSTEM);
let registered_program_pda = Pubkey::find_program_address(
&[PROGRAM_ID_LIGHT_SYSTEM.to_bytes().as_slice()],
&PROGRAM_ID_ACCOUNT_COMPRESSION,
)
.0;
with_nested_data(
"test".to_string(),
&mut rpc,
&mut test_indexer,
&env,
&mut remaining_accounts,
&payer,
&address,
&account_compression_authority,
®istered_program_pda,
&PROGRAM_ID_LIGHT_SYSTEM,
)
.await
.unwrap();
// Check that it was created correctly.
let compressed_accounts = test_indexer.get_compressed_accounts_by_owner(&sdk_test::ID);
assert_eq!(compressed_accounts.len(), 1);
let compressed_account = &compressed_accounts[0];
let record = &compressed_account
.compressed_account
.data
.as_ref()
.unwrap()
.data;
let record = MyCompressedAccount::deserialize(&mut &record[..]).unwrap();
assert_eq!(record.nested.one, 1);
update_nested_data(
&mut rpc,
&mut test_indexer,
&mut remaining_accounts,
NestedData {
one: 2,
two: 3,
three: 3,
four: 4,
five: 5,
six: 6,
seven: 7,
eight: 8,
nine: 9,
ten: 10,
eleven: 11,
twelve: 12,
},
&payer,
compressed_account,
&account_compression_authority,
®istered_program_pda,
&PROGRAM_ID_LIGHT_SYSTEM,
)
.await
.unwrap();
// Check that it was updated correctly.
let compressed_accounts = test_indexer.get_compressed_accounts_by_owner(&sdk_test::ID);
assert_eq!(compressed_accounts.len(), 1);
let compressed_account = &compressed_accounts[0];
let record = &compressed_account
.compressed_account
.data
.as_ref()
.unwrap()
.data;
let record = MyCompressedAccount::deserialize(&mut &record[..]).unwrap();
assert_eq!(record.nested.one, 2);
}
async fn with_nested_data<R>(
name: String,
rpc: &mut R,
test_indexer: &mut TestIndexer<R>,
env: &EnvAccounts,
remaining_accounts: &mut RemainingAccounts,
payer: &Keypair,
address: &[u8; 32],
account_compression_authority: &Pubkey,
registered_program_pda: &Pubkey,
light_system_program: &Pubkey,
) -> Result<(), RpcError>
where
R: RpcConnection + MerkleTreeExt,
{
let rpc_result = test_indexer
.create_proof_for_compressed_accounts(
None,
None,
Some(&[*address]),
Some(vec![env.address_merkle_tree_pubkey]),
rpc,
)
.await;
let address_merkle_context = AddressMerkleContext {
address_merkle_tree_pubkey: env.address_merkle_tree_pubkey,
address_queue_pubkey: env.address_merkle_tree_queue_pubkey,
};
let account = LightAccountMeta::new_init(
&env.merkle_tree_pubkey,
Some(&address_merkle_context),
Some(rpc_result.address_root_indices[0]),
remaining_accounts,
)
.unwrap();
let inputs = LightInstructionData {
proof: Some(rpc_result),
accounts: Some(vec![account]),
};
let inputs = inputs.serialize().unwrap();
let instruction_data = sdk_test::instruction::WithNestedData { inputs, name };
let cpi_signer = find_cpi_signer(&sdk_test::ID);
let accounts = sdk_test::accounts::WithNestedData {
signer: payer.pubkey(),
light_system_program: *light_system_program,
account_compression_program: PROGRAM_ID_ACCOUNT_COMPRESSION,
account_compression_authority: *account_compression_authority,
registered_program_pda: *registered_program_pda,
noop_program: PROGRAM_ID_NOOP,
self_program: sdk_test::ID,
cpi_signer,
system_program: solana_sdk::system_program::id(),
};
let remaining_accounts = remaining_accounts.to_account_metas();
let instruction = Instruction {
program_id: sdk_test::ID,
accounts: [accounts.to_account_metas(Some(true)), remaining_accounts].concat(),
data: instruction_data.data(),
};
let event = rpc
.create_and_send_transaction_with_event(&[instruction], &payer.pubkey(), &[payer], None)
.await?;
test_indexer.add_compressed_accounts_with_token_data(&event.unwrap().0);
Ok(())
}
async fn update_nested_data<R>(
rpc: &mut R,
test_indexer: &mut TestIndexer<R>,
remaining_accounts: &mut RemainingAccounts,
nested_data: NestedData,
payer: &Keypair,
compressed_account: &CompressedAccountWithMerkleContext,
account_compression_authority: &Pubkey,
registered_program_pda: &Pubkey,
light_system_program: &Pubkey,
) -> Result<(), RpcError>
where
R: RpcConnection + MerkleTreeExt,
{
let hash = compressed_account.hash().unwrap();
let merkle_tree_pubkey = compressed_account.merkle_context.merkle_tree_pubkey;
let rpc_result = test_indexer
.create_proof_for_compressed_accounts(
Some(&[hash]),
Some(&[merkle_tree_pubkey]),
None,
None,
rpc,
)
.await;
let compressed_account = LightAccountMeta::new_mut(
compressed_account,
rpc_result.root_indices[0],
&merkle_tree_pubkey,
remaining_accounts,
);
let inputs = LightInstructionData {
proof: Some(rpc_result),
accounts: Some(vec![compressed_account]),
};
let inputs = inputs.serialize().unwrap();
let instruction_data = sdk_test::instruction::UpdateNestedData {
inputs,
nested_data,
};
let cpi_signer = find_cpi_signer(&sdk_test::ID);
let accounts = sdk_test::accounts::UpdateNestedData {
signer: payer.pubkey(),
light_system_program: *light_system_program,
account_compression_program: PROGRAM_ID_ACCOUNT_COMPRESSION,
account_compression_authority: *account_compression_authority,
registered_program_pda: *registered_program_pda,
noop_program: PROGRAM_ID_NOOP,
self_program: sdk_test::ID,
cpi_signer,
system_program: solana_sdk::system_program::id(),
};
let remaining_accounts = remaining_accounts.to_account_metas();
let instruction = Instruction {
program_id: sdk_test::ID,
accounts: [accounts.to_account_metas(Some(true)), remaining_accounts].concat(),
data: instruction_data.data(),
};
let event = rpc
.create_and_send_transaction_with_event(&[instruction], &payer.pubkey(), &[payer], None)
.await?;
test_indexer.add_compressed_accounts_with_token_data(&event.unwrap().0);
Ok(())
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/sdk-test-program/programs/sdk-test
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/sdk-test-program/programs/sdk-test/src/lib.rs
|
use anchor_lang::prelude::*;
use light_hasher::Discriminator;
use light_sdk::{
account::LightAccount, address::derive_address, error::LightSdkError,
instruction_data::LightInstructionData, light_account, light_system_accounts,
program_merkle_context::unpack_address_merkle_context, verify::verify_light_accounts,
LightHasher, LightTraits,
};
declare_id!("2tzfijPBGbrR5PboyFUFKzfEoLTwdDSHUjANCw929wyt");
#[program]
pub mod sdk_test {
use super::*;
pub fn with_compressed_account<'info>(
ctx: Context<'_, '_, '_, 'info, WithCompressedAccount<'info>>,
inputs: Vec<u8>,
name: String,
) -> Result<()> {
let inputs = LightInstructionData::deserialize(&inputs)?;
let accounts = inputs
.accounts
.as_ref()
.ok_or(LightSdkError::ExpectedAccounts)?;
let address_merkle_context = accounts[0]
.address_merkle_context
.ok_or(LightSdkError::ExpectedAddressMerkleContext)?;
let address_merkle_context =
unpack_address_merkle_context(address_merkle_context, ctx.remaining_accounts);
let (address, address_seed) = derive_address(
&[b"compressed", name.as_bytes()],
&address_merkle_context,
&crate::ID,
);
let mut my_compressed_account: LightAccount<'_, MyCompressedAccount> =
LightAccount::from_meta_init(
&accounts[0],
MyCompressedAccount::discriminator(),
address,
address_seed,
&crate::ID,
)?;
my_compressed_account.name = name;
verify_light_accounts(
&ctx,
inputs.proof,
&[my_compressed_account],
None,
false,
None,
)?;
Ok(())
}
pub fn with_nested_data<'info>(
ctx: Context<'_, '_, '_, 'info, WithNestedData<'info>>,
inputs: Vec<u8>,
name: String,
) -> Result<()> {
let inputs = LightInstructionData::deserialize(&inputs)?;
let accounts = inputs
.accounts
.as_ref()
.ok_or(LightSdkError::ExpectedAccounts)?;
let address_merkle_context = accounts[0]
.address_merkle_context
.ok_or(LightSdkError::ExpectedAddressMerkleContext)?;
let address_merkle_context =
unpack_address_merkle_context(address_merkle_context, ctx.remaining_accounts);
let (address, address_seed) = derive_address(
&[b"compressed", name.as_bytes()],
&address_merkle_context,
&crate::ID,
);
let mut my_compressed_account: LightAccount<'_, MyCompressedAccount> =
LightAccount::from_meta_init(
&accounts[0],
MyCompressedAccount::discriminator(),
address,
address_seed,
&crate::ID,
)?;
my_compressed_account.name = name;
my_compressed_account.nested = NestedData::default();
verify_light_accounts(
&ctx,
inputs.proof,
&[my_compressed_account],
None,
false,
None,
)?;
Ok(())
}
pub fn update_nested_data<'info>(
ctx: Context<'_, '_, '_, 'info, UpdateNestedData<'info>>,
inputs: Vec<u8>,
nested_data: NestedData,
) -> Result<()> {
let inputs = LightInstructionData::deserialize(&inputs)?;
let accounts = inputs
.accounts
.as_ref()
.ok_or(LightSdkError::ExpectedAccounts)?;
let mut my_compressed_account: LightAccount<'_, MyCompressedAccount> =
LightAccount::from_meta_mut(
&accounts[0],
MyCompressedAccount::discriminator(),
&crate::ID,
)?;
my_compressed_account.nested = nested_data;
verify_light_accounts(
&ctx,
inputs.proof,
&[my_compressed_account],
None,
false,
None,
)?;
Ok(())
}
pub fn without_compressed_account<'info>(
ctx: Context<'_, '_, '_, 'info, WithoutCompressedAccount<'info>>,
name: String,
) -> Result<()> {
ctx.accounts.my_regular_account.name = name;
Ok(())
}
}
#[light_account]
#[derive(Clone, Debug, Default)]
pub struct MyCompressedAccount {
name: String,
#[nested]
pub nested: NestedData,
}
// Illustrates nested hashing feature.
#[derive(LightHasher, Clone, Debug, AnchorSerialize, AnchorDeserialize)]
pub struct NestedData {
pub one: u16,
pub two: u16,
pub three: u16,
pub four: u16,
pub five: u16,
pub six: u16,
pub seven: u16,
pub eight: u16,
pub nine: u16,
pub ten: u16,
pub eleven: u16,
pub twelve: u16,
}
impl Default for NestedData {
fn default() -> Self {
Self {
one: 1,
two: 2,
three: 3,
four: 4,
five: 5,
six: 6,
seven: 7,
eight: 8,
nine: 9,
ten: 10,
eleven: 11,
twelve: 12,
}
}
}
#[account]
pub struct MyRegularAccount {
name: String,
}
#[light_system_accounts]
#[derive(Accounts, LightTraits)]
#[instruction(name: String)]
pub struct WithCompressedAccount<'info> {
#[account(mut)]
#[fee_payer]
pub signer: Signer<'info>,
#[self_program]
pub self_program: Program<'info, crate::program::SdkTest>,
/// CHECK: Checked in light-system-program.
#[authority]
pub cpi_signer: AccountInfo<'info>,
// #[light_account(
// init,
// seeds = [b"compressed".as_slice()],
// )]
// pub my_compressed_account: LightAccount<MyCompressedAccount>,
}
#[light_system_accounts]
#[derive(Accounts, LightTraits)]
pub struct WithNestedData<'info> {
#[account(mut)]
#[fee_payer]
pub signer: Signer<'info>,
#[self_program]
pub self_program: Program<'info, crate::program::SdkTest>,
/// CHECK: Checked in light-system-program.
#[authority]
pub cpi_signer: AccountInfo<'info>,
// #[light_account(
// init,
// seeds = [b"compressed".as_slice()],
// )]
// pub my_compressed_account: LightAccount<MyCompressedAccount>,
}
#[light_system_accounts]
#[derive(Accounts, LightTraits)]
pub struct UpdateNestedData<'info> {
#[account(mut)]
#[fee_payer]
pub signer: Signer<'info>,
#[self_program]
pub self_program: Program<'info, crate::program::SdkTest>,
/// CHECK: Checked in light-system-program.
#[authority]
pub cpi_signer: AccountInfo<'info>,
// #[light_account(
// mut,
// seeds = [b"compressed".as_slice()],
// )]
// pub my_compressed_account: LightAccount<MyCompressedAccount>,
}
#[derive(Accounts)]
#[instruction(name: String)]
pub struct WithoutCompressedAccount<'info> {
#[account(mut)]
pub signer: Signer<'info>,
#[account(
init,
seeds = [b"compressed".as_slice(), name.as_bytes()],
bump,
payer = signer,
space = 8 + 8,
)]
pub my_regular_account: Account<'info, MyRegularAccount>,
pub system_program: Program<'info, System>,
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-programs
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/compressed-token-test/Cargo.toml
|
[package]
name = "compressed-token-test"
version = "1.1.0"
description = "Created with Anchor"
edition = "2021"
[lib]
crate-type = ["cdylib", "lib"]
name = "compressed_token_test"
[features]
no-entrypoint = []
no-idl = []
no-log-ix-name = []
cpi = ["no-entrypoint"]
test-sbf = []
custom-heap = []
default = ["custom-heap"]
[dependencies]
anchor-lang = { workspace = true }
light-compressed-token = { workspace = true }
light-system-program = { workspace = true }
account-compression = { workspace = true }
light-hasher = {path = "../../merkle-tree/hasher"}
light-concurrent-merkle-tree = {path = "../../merkle-tree/concurrent"}
light-utils = {path = "../../utils"}
light-verifier = {path = "../../circuit-lib/verifier"}
[target.'cfg(not(target_os = "solana"))'.dependencies]
solana-sdk = { workspace = true }
[dev-dependencies]
solana-program-test = { workspace = true }
light-test-utils = { version = "1.2.0", path = "../../test-utils", features=["devenv"] }
light-program-test = { workspace = true, features = ["devenv"] }
reqwest = "0.11.26"
tokio = { workspace = true }
light-prover-client = {path = "../../circuit-lib/light-prover-client" }
num-bigint = "0.4.6"
num-traits = "0.2.19"
spl-token = { workspace = true }
anchor-spl = { workspace = true }
rand = "0.8"
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-programs
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/compressed-token-test/Xargo.toml
|
[target.bpfel-unknown-unknown.dependencies.std]
features = []
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/compressed-token-test
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/compressed-token-test/tests/test.rs
|
#![cfg(feature = "test-sbf")]
use anchor_lang::{
system_program, AnchorDeserialize, AnchorSerialize, InstructionData, ToAccountMetas,
};
use anchor_spl::token::{Mint, TokenAccount};
use anchor_spl::token_2022::spl_token_2022;
use anchor_spl::token_2022::spl_token_2022::extension::ExtensionType;
use light_compressed_token::delegation::sdk::{
create_approve_instruction, create_revoke_instruction, CreateApproveInstructionInputs,
CreateRevokeInstructionInputs,
};
use light_compressed_token::freeze::sdk::{create_instruction, CreateInstructionInputs};
use light_compressed_token::get_token_pool_pda;
use light_compressed_token::mint_sdk::create_create_token_pool_instruction;
use light_compressed_token::mint_sdk::create_mint_to_instruction;
use light_compressed_token::process_transfer::transfer_sdk::create_transfer_instruction;
use light_compressed_token::process_transfer::{get_cpi_authority_pda, TokenTransferOutputData};
use light_compressed_token::spl_compression::spl_token_pool_derivation;
use light_compressed_token::token_data::AccountState;
use light_compressed_token::{token_data::TokenData, ErrorCode};
use light_program_test::test_env::setup_test_programs_with_accounts;
use light_program_test::test_rpc::ProgramTestRpcConnection;
use light_prover_client::gnark::helpers::{kill_prover, spawn_prover, ProofType, ProverConfig};
use light_system_program::{
invoke::processor::CompressedProof,
sdk::compressed_account::{CompressedAccountWithMerkleContext, MerkleContext},
};
use light_test_utils::spl::mint_tokens_helper_with_lamports;
use light_test_utils::spl::revoke_test;
use light_test_utils::spl::thaw_test;
use light_test_utils::spl::BurnInstructionMode;
use light_test_utils::spl::{approve_test, create_mint_22_helper};
use light_test_utils::spl::{burn_test, mint_tokens_22_helper_with_lamports};
use light_test_utils::spl::{
compress_test, compressed_transfer_test, create_mint_helper, decompress_test,
mint_tokens_helper,
};
use light_test_utils::spl::{
compressed_transfer_22_test, create_burn_test_instruction, perform_compress_spl_token_account,
};
use light_test_utils::spl::{create_token_2022_account, freeze_test};
use light_test_utils::spl::{mint_spl_tokens, mint_wrapped_sol};
use light_test_utils::{
airdrop_lamports, assert_rpc_error, create_account_instruction, Indexer, RpcConnection,
RpcError, TokenDataWithContext,
};
use light_test_utils::{assert_custom_error_or_program_error, indexer::TestIndexer};
use light_verifier::VerifierError;
use rand::Rng;
use solana_sdk::system_instruction;
use solana_sdk::{
instruction::{Instruction, InstructionError},
pubkey::Pubkey,
signature::Keypair,
signer::Signer,
transaction::{Transaction, TransactionError},
};
use spl_token::{error::TokenError, instruction::initialize_mint};
#[tokio::test]
async fn test_create_mint() {
let (mut rpc, _) = setup_test_programs_with_accounts(None).await;
let payer = rpc.get_payer().insecure_clone();
create_mint_helper(&mut rpc, &payer).await;
}
#[tokio::test]
async fn test_failing_create_token_pool() {
let (mut rpc, _) = setup_test_programs_with_accounts(None).await;
let payer = rpc.get_payer().insecure_clone();
let rent = rpc
.get_minimum_balance_for_rent_exemption(Mint::LEN)
.await
.unwrap();
let mint_1_keypair = Keypair::new();
let mint_1_account_create_ix = create_account_instruction(
&payer.pubkey(),
Mint::LEN,
rent,
&spl_token::ID,
Some(&mint_1_keypair),
);
let create_mint_1_ix = initialize_mint(
&spl_token::ID,
&mint_1_keypair.pubkey(),
&payer.pubkey(),
Some(&payer.pubkey()),
2,
)
.unwrap();
rpc.create_and_send_transaction(
&[mint_1_account_create_ix, create_mint_1_ix],
&payer.pubkey(),
&[&payer, &mint_1_keypair],
)
.await
.unwrap();
let mint_1_pool_pda = get_token_pool_pda(&mint_1_keypair.pubkey());
let mint_2_keypair = Keypair::new();
let mint_2_account_create_ix = create_account_instruction(
&payer.pubkey(),
Mint::LEN,
rent,
&spl_token::ID,
Some(&mint_2_keypair),
);
let create_mint_2_ix = initialize_mint(
&spl_token::ID,
&mint_2_keypair.pubkey(),
&payer.pubkey(),
Some(&payer.pubkey()),
2,
)
.unwrap();
rpc.create_and_send_transaction(
&[mint_2_account_create_ix, create_mint_2_ix],
&payer.pubkey(),
&[&payer, &mint_2_keypair],
)
.await
.unwrap();
let mint_2_pool_pda = get_token_pool_pda(&mint_2_keypair.pubkey());
// Try to create pool for `mint_1` while using seeds of `mint_2` for PDAs.
{
let instruction_data = light_compressed_token::instruction::CreateTokenPool {};
let accounts = light_compressed_token::accounts::CreateTokenPoolInstruction {
fee_payer: payer.pubkey(),
token_pool_pda: mint_2_pool_pda,
system_program: system_program::ID,
mint: mint_1_keypair.pubkey(),
token_program: anchor_spl::token::ID,
cpi_authority_pda: get_cpi_authority_pda().0,
};
let instruction = Instruction {
program_id: light_compressed_token::ID,
accounts: accounts.to_account_metas(Some(true)),
data: instruction_data.data(),
};
let result = rpc
.create_and_send_transaction(&[instruction], &payer.pubkey(), &[&payer])
.await;
assert_rpc_error(
result,
0,
anchor_lang::error::ErrorCode::ConstraintSeeds.into(),
)
.unwrap();
}
// Invalid program id.
{
let instruction_data = light_compressed_token::instruction::CreateTokenPool {};
let accounts = light_compressed_token::accounts::CreateTokenPoolInstruction {
fee_payer: payer.pubkey(),
token_pool_pda: mint_1_pool_pda,
system_program: system_program::ID,
mint: mint_1_keypair.pubkey(),
token_program: light_system_program::ID, // invalid program id should be spl token program or token 2022 program
cpi_authority_pda: get_cpi_authority_pda().0,
};
let instruction = Instruction {
program_id: light_compressed_token::ID,
accounts: accounts.to_account_metas(Some(true)),
data: instruction_data.data(),
};
let result = rpc
.create_and_send_transaction(&[instruction], &payer.pubkey(), &[&payer])
.await;
assert_rpc_error(
result,
0,
anchor_lang::error::ErrorCode::InvalidProgramId.into(),
)
.unwrap();
}
// Try to create pool for `mint_2` while using seeds of `mint_1` for PDAs.
{
let instruction_data = light_compressed_token::instruction::CreateTokenPool {};
let accounts = light_compressed_token::accounts::CreateTokenPoolInstruction {
fee_payer: payer.pubkey(),
token_pool_pda: mint_1_pool_pda,
system_program: system_program::ID,
mint: mint_2_keypair.pubkey(),
token_program: anchor_spl::token::ID,
cpi_authority_pda: get_cpi_authority_pda().0,
};
let instruction = Instruction {
program_id: light_compressed_token::ID,
accounts: accounts.to_account_metas(Some(true)),
data: instruction_data.data(),
};
let result = rpc
.create_and_send_transaction(&[instruction], &payer.pubkey(), &[&payer])
.await;
assert_rpc_error(
result,
0,
anchor_lang::error::ErrorCode::ConstraintSeeds.into(),
)
.unwrap();
}
// failing test try to create a token pool with mint with non-whitelisted token extension
{
let payer = rpc.get_payer().insecure_clone();
let payer_pubkey = payer.pubkey();
let mint = Keypair::new();
let token_authority = payer.insecure_clone();
let space = ExtensionType::try_calculate_account_len::<spl_token_2022::state::Mint>(&[
ExtensionType::MintCloseAuthority,
])
.unwrap();
let mut instructions = vec![system_instruction::create_account(
&payer.pubkey(),
&mint.pubkey(),
rpc.get_minimum_balance_for_rent_exemption(space)
.await
.unwrap(),
space as u64,
&spl_token_2022::ID,
)];
let invalid_token_extension_ix =
spl_token_2022::instruction::initialize_mint_close_authority(
&spl_token_2022::ID,
&mint.pubkey(),
Some(&token_authority.pubkey()),
)
.unwrap();
instructions.push(invalid_token_extension_ix);
instructions.push(
spl_token_2022::instruction::initialize_mint(
&spl_token_2022::ID,
&mint.pubkey(),
&token_authority.pubkey(),
None,
2,
)
.unwrap(),
);
instructions.push(create_create_token_pool_instruction(
&payer_pubkey,
&mint.pubkey(),
true,
));
let result = rpc
.create_and_send_transaction(&instructions, &payer_pubkey, &[&payer, &mint])
.await;
assert_rpc_error(
result,
3,
light_compressed_token::ErrorCode::MintWithInvalidExtension.into(),
)
.unwrap();
}
// functional create token pool account with token 2022 mint with allowed metadata pointer extension
{
let payer = rpc.get_payer().insecure_clone();
// create_mint_helper(&mut rpc, &payer).await;
let payer_pubkey = payer.pubkey();
let mint = Keypair::new();
let token_authority = payer.insecure_clone();
let space = ExtensionType::try_calculate_account_len::<spl_token_2022::state::Mint>(&[
ExtensionType::MetadataPointer,
])
.unwrap();
let mut instructions = vec![system_instruction::create_account(
&payer.pubkey(),
&mint.pubkey(),
rpc.get_minimum_balance_for_rent_exemption(space)
.await
.unwrap(),
space as u64,
&spl_token_2022::ID,
)];
let token_extension_ix =
spl_token_2022::extension::metadata_pointer::instruction::initialize(
&spl_token_2022::ID,
&mint.pubkey(),
Some(token_authority.pubkey()),
None,
)
.unwrap();
instructions.push(token_extension_ix);
instructions.push(
spl_token_2022::instruction::initialize_mint(
&spl_token_2022::ID,
&mint.pubkey(),
&token_authority.pubkey(),
None,
2,
)
.unwrap(),
);
instructions.push(create_create_token_pool_instruction(
&payer_pubkey,
&mint.pubkey(),
true,
));
rpc.create_and_send_transaction(&instructions, &payer_pubkey, &[&payer, &mint])
.await
.unwrap();
let token_pool_pubkey = get_token_pool_pda(&mint.pubkey());
let token_pool_account = rpc.get_account(token_pool_pubkey).await.unwrap().unwrap();
spl_token_pool_derivation(
&mint.pubkey(),
&light_compressed_token::ID,
&token_pool_pubkey,
)
.unwrap();
assert_eq!(token_pool_account.data.len(), TokenAccount::LEN);
}
}
#[tokio::test]
async fn test_wrapped_sol() {
spawn_prover(
false,
ProverConfig {
run_mode: None,
circuits: vec![ProofType::Inclusion],
},
)
.await;
// is token 22 fails with Instruction: InitializeAccount, Program log: Error: Invalid Mint line 216
for is_token_22 in vec![false] {
let (mut rpc, env) = setup_test_programs_with_accounts(None).await;
let payer = rpc.get_payer().insecure_clone();
let mut test_indexer =
TestIndexer::<ProgramTestRpcConnection>::init_from_env(&payer, &env, None).await;
let native_mint = if is_token_22 {
spl_token_2022::native_mint::ID
} else {
spl_token::native_mint::ID
};
let token_account_keypair = Keypair::new();
create_token_2022_account(
&mut rpc,
&native_mint,
&token_account_keypair,
&payer,
is_token_22,
)
.await
.unwrap();
let amount = 1_000_000_000u64;
mint_wrapped_sol(
&mut rpc,
&payer,
&token_account_keypair.pubkey(),
amount,
is_token_22,
)
.await
.unwrap();
let fetched_token_account = rpc
.get_account(token_account_keypair.pubkey())
.await
.unwrap()
.unwrap();
use anchor_lang::solana_program::program_pack::Pack;
let unpacked_token_account: spl_token::state::Account =
spl_token::state::Account::unpack(&fetched_token_account.data).unwrap();
assert_eq!(unpacked_token_account.amount, amount);
assert_eq!(unpacked_token_account.owner, payer.pubkey());
assert_eq!(unpacked_token_account.mint, native_mint);
assert!(unpacked_token_account.is_native.is_some());
let instruction =
create_create_token_pool_instruction(&payer.pubkey(), &native_mint, is_token_22);
rpc.create_and_send_transaction(&[instruction], &payer.pubkey(), &[&payer])
.await
.unwrap();
compress_test(
&payer,
&mut rpc,
&mut test_indexer,
amount,
&native_mint,
&env.merkle_tree_pubkey,
&token_account_keypair.pubkey(),
None,
is_token_22,
)
.await;
let input_compressed_accounts =
test_indexer.get_compressed_token_accounts_by_owner(&payer.pubkey());
decompress_test(
&payer,
&mut rpc,
&mut test_indexer,
input_compressed_accounts,
amount,
&env.merkle_tree_pubkey,
&token_account_keypair.pubkey(),
None,
is_token_22,
)
.await;
}
kill_prover();
}
async fn test_mint_to(amounts: Vec<u64>, iterations: usize, lamports: Option<u64>) {
let (mut rpc, env) = setup_test_programs_with_accounts(None).await;
let payer = rpc.get_payer().insecure_clone();
let merkle_tree_pubkey = env.merkle_tree_pubkey;
let mut test_indexer =
TestIndexer::<ProgramTestRpcConnection>::init_from_env(&payer, &env, None).await;
let recipients = amounts
.iter()
.map(|_| Keypair::new().pubkey())
.collect::<Vec<_>>();
let mint = create_mint_helper(&mut rpc, &payer).await;
for _ in 0..iterations {
mint_tokens_helper_with_lamports(
&mut rpc,
&mut test_indexer,
&merkle_tree_pubkey,
&payer,
&mint,
amounts.clone(),
recipients.clone(),
lamports,
)
.await;
}
kill_prover();
}
/// Functional tests:
/// - Mint 10 tokens to spl token account
/// - Compress spl token account
/// - Mint 20 more tokens to spl token account
/// - failing to compress spl token account with 21 remaining balance
/// - Compress spl token account with 1 remaining token
#[tokio::test]
async fn compress_spl_account() {
for is_token_22 in [false, true] {
let (mut rpc, env) = setup_test_programs_with_accounts(None).await;
let payer = rpc.get_payer().insecure_clone();
let merkle_tree_pubkey = env.merkle_tree_pubkey;
let mut test_indexer =
TestIndexer::<ProgramTestRpcConnection>::init_from_env(&payer, &env, None).await;
let token_account_keypair = Keypair::new();
let token_owner = payer.insecure_clone();
airdrop_lamports(&mut rpc, &token_owner.pubkey(), 1_000_000_000)
.await
.unwrap();
let mint = if is_token_22 {
create_mint_22_helper(&mut rpc, &payer).await
} else {
create_mint_helper(&mut rpc, &payer).await
};
create_token_2022_account(
&mut rpc,
&mint,
&token_account_keypair,
&token_owner,
is_token_22,
)
.await
.unwrap();
let first_token_account_balance = 10;
mint_spl_tokens(
&mut rpc,
&mint,
&token_account_keypair.pubkey(),
&token_owner.pubkey(),
&token_owner,
first_token_account_balance,
is_token_22,
)
.await
.unwrap();
perform_compress_spl_token_account(
&mut rpc,
&mut test_indexer,
&payer,
&token_owner,
&mint,
&token_account_keypair.pubkey(),
&merkle_tree_pubkey,
None,
is_token_22,
)
.await
.unwrap();
let first_token_account_balance = 20;
mint_spl_tokens(
&mut rpc,
&mint,
&token_account_keypair.pubkey(),
&token_owner.pubkey(),
&token_owner,
first_token_account_balance,
is_token_22,
)
.await
.unwrap();
{
let result = perform_compress_spl_token_account(
&mut rpc,
&mut test_indexer,
&payer,
&token_owner,
&mint,
&token_account_keypair.pubkey(),
&merkle_tree_pubkey,
Some(first_token_account_balance + 1), // invalid remaining amount
is_token_22,
)
.await;
assert_rpc_error(result, 0, ErrorCode::InsufficientTokenAccountBalance.into()).unwrap();
}
perform_compress_spl_token_account(
&mut rpc,
&mut test_indexer,
&payer,
&token_owner,
&mint,
&token_account_keypair.pubkey(),
&merkle_tree_pubkey,
Some(1),
is_token_22,
)
.await
.unwrap();
}
}
#[tokio::test]
async fn test_22_mint_to() {
let (mut rpc, env) = setup_test_programs_with_accounts(None).await;
let payer = rpc.get_payer().insecure_clone();
let merkle_tree_pubkey = env.merkle_tree_pubkey;
let mut test_indexer =
TestIndexer::<ProgramTestRpcConnection>::init_from_env(&payer, &env, None).await;
let mint = create_mint_22_helper(&mut rpc, &payer).await;
mint_tokens_22_helper_with_lamports(
&mut rpc,
&mut test_indexer,
&merkle_tree_pubkey,
&payer,
&mint,
vec![1u64; 25].clone(),
vec![payer.pubkey(); 25].clone(),
None,
true,
)
.await;
}
#[tokio::test]
async fn test_22_transfer() {
perform_transfer_22_test(1, 1, 12412, true).await;
}
#[tokio::test]
async fn test_1_mint_to() {
test_mint_to(vec![10000], 1, Some(1_000_000)).await
}
#[tokio::test]
async fn test_1_max_mint_to() {
test_mint_to(vec![u64::MAX], 1, Some(1_000_000)).await
}
#[tokio::test]
async fn test_5_mint_to() {
test_mint_to(vec![0, 10000, 10000, 10000, 10000], 1, Some(1_000_000)).await
}
#[tokio::test]
async fn test_10_mint_to() {
let mut rng = rand::thread_rng();
// Make sure that the tokal token supply does not exceed `u64::MAX`.
let amounts: Vec<u64> = (0..10).map(|_| rng.gen_range(0..(u64::MAX / 10))).collect();
test_mint_to(amounts, 1, Some(1_000_000)).await
}
#[tokio::test]
async fn test_20_mint_to() {
let mut rng = rand::thread_rng();
// Make sure that the total token supply does not exceed `u64::MAX`.
let amounts: Vec<u64> = (0..20).map(|_| rng.gen_range(0..(u64::MAX / 20))).collect();
test_mint_to(amounts, 1, Some(1_000_000)).await
}
#[tokio::test]
async fn test_25_mint_to() {
let mut rng = rand::thread_rng();
// Make sure that the total token supply does not exceed `u64::MAX`.
let amounts: Vec<u64> = (0..25)
.map(|_| rng.gen_range(0..(u64::MAX / (25 * 10))))
.collect();
test_mint_to(amounts, 10, Some(1_000_000)).await
}
#[tokio::test]
async fn test_25_mint_to_zeros() {
let amounts = vec![0; 25];
test_mint_to(amounts, 1, Some(1_000_000)).await
}
/// Failing tests:
/// 1. Try to mint token from `mint_1` and sign the transaction with `mint_2`
/// authority.
/// 2. Try to mint token from `mint_2` and sign the transaction with `mint_1`
/// authority.
/// 3. Try to mint token from `mint_1` while using `mint_2` pool.
/// 4. Try to mint token from `mint_2` while using `mint_1` pool.
/// 5. Invalid CPI authority.
/// 6. Invalid registered program.
/// 7. Invalid noop program.
/// 8. Invalid account compression authority.
/// 9. Invalid Merkle tree.
/// 10. Mint more than `u64::MAX` tokens.
/// 11. Multiple mints which overflow the token supply over `u64::MAX`.
#[tokio::test]
async fn test_mint_to_failing() {
for is_token_22 in vec![false, true] {
const MINTS: usize = 10;
let (mut rpc, env) = setup_test_programs_with_accounts(None).await;
let payer_1 = rpc.get_payer().insecure_clone();
let merkle_tree_pubkey = env.merkle_tree_pubkey;
let mut rng = rand::thread_rng();
let payer_2 = Keypair::new();
airdrop_lamports(&mut rpc, &payer_2.pubkey(), 1_000_000_000)
.await
.unwrap();
let mint_1 = if is_token_22 {
create_mint_22_helper(&mut rpc, &payer_1).await
} else {
create_mint_helper(&mut rpc, &payer_1).await
};
let mint_pool_1 = get_token_pool_pda(&mint_1);
let mint_2 = if is_token_22 {
create_mint_22_helper(&mut rpc, &payer_2).await
} else {
create_mint_helper(&mut rpc, &payer_2).await
};
// Make sure that the tokal token supply does not exceed `u64::MAX`.
let amounts: Vec<u64> = (0..MINTS)
.map(|_| rng.gen_range(0..(u64::MAX / MINTS as u64)))
.collect();
let recipients = amounts
.iter()
.map(|_| Keypair::new().pubkey())
.collect::<Vec<_>>();
let instruction_data = light_compressed_token::instruction::MintTo {
amounts: amounts.clone(),
public_keys: recipients.clone(),
lamports: None,
};
let token_program = if is_token_22 {
anchor_spl::token_2022::ID
} else {
anchor_spl::token::ID
};
// 1. Try to mint token from `mint_1` and sign the transaction with `mint_2`
// authority.
{
let instruction = create_mint_to_instruction(
&payer_2.pubkey(),
&payer_2.pubkey(),
&mint_1,
&merkle_tree_pubkey,
amounts.clone(),
recipients.clone(),
None,
is_token_22,
);
let result = rpc
.create_and_send_transaction(&[instruction], &payer_2.pubkey(), &[&payer_2])
.await;
// Owner doesn't match the mint authority.
assert_rpc_error(result, 0, ErrorCode::InvalidAuthorityMint.into()).unwrap();
}
// 2. Try to mint token from `mint_2` and sign the transaction with `mint_1`
// authority.
{
let instruction = create_mint_to_instruction(
&payer_1.pubkey(),
&payer_1.pubkey(),
&mint_2,
&merkle_tree_pubkey,
amounts.clone(),
recipients.clone(),
None,
is_token_22,
);
let result = rpc
.create_and_send_transaction(&[instruction], &payer_1.pubkey(), &[&payer_1])
.await;
// Owner doesn't match the mint authority.
assert_rpc_error(result, 0, ErrorCode::InvalidAuthorityMint.into()).unwrap();
}
// 3. Try to mint token to random token account.
{
let token_account_keypair = Keypair::new();
create_token_2022_account(
&mut rpc,
&mint_1,
&token_account_keypair,
&payer_1,
is_token_22,
)
.await
.unwrap();
let accounts = light_compressed_token::accounts::MintToInstruction {
fee_payer: payer_1.pubkey(),
authority: payer_1.pubkey(),
cpi_authority_pda: get_cpi_authority_pda().0,
mint: mint_1,
token_pool_pda: token_account_keypair.pubkey(),
token_program,
light_system_program: light_system_program::ID,
registered_program_pda: light_system_program::utils::get_registered_program_pda(
&light_system_program::ID,
),
noop_program: Pubkey::new_from_array(
account_compression::utils::constants::NOOP_PUBKEY,
),
account_compression_authority: light_system_program::utils::get_cpi_authority_pda(
&light_system_program::ID,
),
account_compression_program: account_compression::ID,
merkle_tree: merkle_tree_pubkey,
self_program: light_compressed_token::ID,
system_program: system_program::ID,
sol_pool_pda: None,
};
let instruction = Instruction {
program_id: light_compressed_token::ID,
accounts: accounts.to_account_metas(Some(true)),
data: instruction_data.data(),
};
let result = rpc
.create_and_send_transaction(&[instruction], &payer_1.pubkey(), &[&payer_1])
.await;
assert_rpc_error(
result,
0,
anchor_lang::error::ErrorCode::ConstraintSeeds.into(),
)
.unwrap();
}
// 4. Try to mint token from `mint_2` while using `mint_1` pool.
{
let accounts = light_compressed_token::accounts::MintToInstruction {
fee_payer: payer_2.pubkey(),
authority: payer_2.pubkey(),
cpi_authority_pda: get_cpi_authority_pda().0,
mint: mint_2,
token_pool_pda: mint_pool_1,
token_program,
light_system_program: light_system_program::ID,
registered_program_pda: light_system_program::utils::get_registered_program_pda(
&light_system_program::ID,
),
noop_program: Pubkey::new_from_array(
account_compression::utils::constants::NOOP_PUBKEY,
),
account_compression_authority: light_system_program::utils::get_cpi_authority_pda(
&light_system_program::ID,
),
account_compression_program: account_compression::ID,
merkle_tree: merkle_tree_pubkey,
self_program: light_compressed_token::ID,
system_program: system_program::ID,
sol_pool_pda: None,
};
let instruction = Instruction {
program_id: light_compressed_token::ID,
accounts: accounts.to_account_metas(Some(true)),
data: instruction_data.data(),
};
let result = rpc
.create_and_send_transaction(&[instruction], &payer_2.pubkey(), &[&payer_2])
.await;
assert_rpc_error(
result,
0,
anchor_lang::error::ErrorCode::ConstraintSeeds.into(),
)
.unwrap();
}
// 5. Invalid CPI authority.
{
let invalid_cpi_authority_pda = Keypair::new();
let accounts = light_compressed_token::accounts::MintToInstruction {
fee_payer: payer_2.pubkey(),
authority: payer_2.pubkey(),
cpi_authority_pda: invalid_cpi_authority_pda.pubkey(),
mint: mint_1,
token_pool_pda: mint_pool_1,
token_program,
light_system_program: light_system_program::ID,
registered_program_pda: light_system_program::utils::get_registered_program_pda(
&light_system_program::ID,
),
noop_program: Pubkey::new_from_array(
account_compression::utils::constants::NOOP_PUBKEY,
),
account_compression_authority: light_system_program::utils::get_cpi_authority_pda(
&light_system_program::ID,
),
account_compression_program: account_compression::ID,
merkle_tree: merkle_tree_pubkey,
self_program: light_compressed_token::ID,
system_program: system_program::ID,
sol_pool_pda: None,
};
let instruction = Instruction {
program_id: light_compressed_token::ID,
accounts: accounts.to_account_metas(Some(true)),
data: instruction_data.data(),
};
let result = rpc
.create_and_send_transaction(&[instruction], &payer_2.pubkey(), &[&payer_2])
.await;
assert_rpc_error(
result,
0,
anchor_lang::error::ErrorCode::ConstraintSeeds.into(),
)
.unwrap();
}
// 6. Invalid registered program.
{
let invalid_registered_program = Keypair::new();
let accounts = light_compressed_token::accounts::MintToInstruction {
fee_payer: payer_1.pubkey(),
authority: payer_1.pubkey(),
cpi_authority_pda: get_cpi_authority_pda().0,
mint: mint_1,
token_pool_pda: mint_pool_1,
token_program,
light_system_program: light_system_program::ID,
registered_program_pda: invalid_registered_program.pubkey(),
noop_program: Pubkey::new_from_array(
account_compression::utils::constants::NOOP_PUBKEY,
),
account_compression_authority: light_system_program::utils::get_cpi_authority_pda(
&light_system_program::ID,
),
account_compression_program: account_compression::ID,
merkle_tree: merkle_tree_pubkey,
self_program: light_compressed_token::ID,
system_program: system_program::ID,
sol_pool_pda: None,
};
let instruction = Instruction {
program_id: light_compressed_token::ID,
accounts: accounts.to_account_metas(Some(true)),
data: instruction_data.data(),
};
let result = rpc
.create_and_send_transaction(&[instruction], &payer_1.pubkey(), &[&payer_1])
.await;
assert_rpc_error(
result,
0,
anchor_lang::error::ErrorCode::ConstraintSeeds.into(),
)
.unwrap();
}
// 7. Invalid noop program.
{
let invalid_noop_program = Keypair::new();
let accounts = light_compressed_token::accounts::MintToInstruction {
fee_payer: payer_1.pubkey(),
authority: payer_1.pubkey(),
cpi_authority_pda: get_cpi_authority_pda().0,
mint: mint_1,
token_pool_pda: mint_pool_1,
token_program,
light_system_program: light_system_program::ID,
registered_program_pda: light_system_program::utils::get_registered_program_pda(
&light_system_program::ID,
),
noop_program: invalid_noop_program.pubkey(),
account_compression_authority: light_system_program::utils::get_cpi_authority_pda(
&light_system_program::ID,
),
account_compression_program: account_compression::ID,
merkle_tree: merkle_tree_pubkey,
self_program: light_compressed_token::ID,
system_program: system_program::ID,
sol_pool_pda: None,
};
let instruction = Instruction {
program_id: light_compressed_token::ID,
accounts: accounts.to_account_metas(Some(true)),
data: instruction_data.data(),
};
let result = rpc
.create_and_send_transaction(&[instruction], &payer_1.pubkey(), &[&payer_1])
.await;
assert_rpc_error(
result,
0,
account_compression::errors::AccountCompressionErrorCode::InvalidNoopPubkey.into(),
)
.unwrap();
}
// 8. Invalid account compression authority.
{
let invalid_account_compression_authority = Keypair::new();
let accounts = light_compressed_token::accounts::MintToInstruction {
fee_payer: payer_1.pubkey(),
authority: payer_1.pubkey(),
cpi_authority_pda: get_cpi_authority_pda().0,
mint: mint_1,
token_pool_pda: mint_pool_1,
token_program,
light_system_program: light_system_program::ID,
registered_program_pda: light_system_program::utils::get_registered_program_pda(
&light_system_program::ID,
),
noop_program: Pubkey::new_from_array(
account_compression::utils::constants::NOOP_PUBKEY,
),
account_compression_authority: invalid_account_compression_authority.pubkey(),
account_compression_program: account_compression::ID,
merkle_tree: merkle_tree_pubkey,
self_program: light_compressed_token::ID,
system_program: system_program::ID,
sol_pool_pda: None,
};
let instruction = Instruction {
program_id: light_compressed_token::ID,
accounts: accounts.to_account_metas(Some(true)),
data: instruction_data.data(),
};
let result = rpc
.create_and_send_transaction(&[instruction], &payer_1.pubkey(), &[&payer_1])
.await;
assert_rpc_error(
result,
0,
anchor_lang::error::ErrorCode::ConstraintSeeds.into(),
)
.unwrap();
}
// 9. Invalid Merkle tree.
{
let invalid_merkle_tree = Keypair::new();
let instruction = create_mint_to_instruction(
&payer_1.pubkey(),
&payer_1.pubkey(),
&mint_1,
&invalid_merkle_tree.pubkey(),
amounts.clone(),
recipients.clone(),
None,
is_token_22,
);
let result = rpc
.create_and_send_transaction(&[instruction], &payer_1.pubkey(), &[&payer_1])
.await;
assert!(matches!(
result,
Err(RpcError::TransactionError(
TransactionError::InstructionError(
0,
InstructionError::ProgramFailedToComplete
)
))
));
}
// 10. Mint more than `u64::MAX` tokens.
{
// Overall sum greater than `u64::MAX`
let amounts = vec![u64::MAX / 5; MINTS];
let instruction = create_mint_to_instruction(
&payer_1.pubkey(),
&payer_1.pubkey(),
&mint_1,
&merkle_tree_pubkey,
amounts,
recipients.clone(),
None,
is_token_22,
);
let result = rpc
.create_and_send_transaction(&[instruction], &payer_1.pubkey(), &[&payer_1])
.await;
assert_rpc_error(result, 0, ErrorCode::MintTooLarge.into()).unwrap();
}
// 11. Multiple mints which overflow the token supply over `u64::MAX`.
{
let amounts = vec![u64::MAX / 10; MINTS];
let instruction = create_mint_to_instruction(
&payer_1.pubkey(),
&payer_1.pubkey(),
&mint_1,
&merkle_tree_pubkey,
amounts,
recipients.clone(),
None,
is_token_22,
);
// The first mint is still below `u64::MAX`.
rpc.create_and_send_transaction(&[instruction.clone()], &payer_1.pubkey(), &[&payer_1])
.await
.unwrap();
// The second mint should overflow.
let result = rpc
.create_and_send_transaction(&[instruction], &payer_1.pubkey(), &[&payer_1])
.await;
assert_rpc_error(result, 0, TokenError::Overflow as u32).unwrap();
}
}
}
#[tokio::test]
async fn test_transfers() {
let possible_inputs = [1, 2, 3, 4, 8];
for input_num in possible_inputs {
for output_num in 1..8 {
if input_num == 8 && output_num > 5 {
// 8 inputs and 7 outputs is the max we can do
break;
}
println!(
"\n\ninput num: {}, output num: {}\n\n",
input_num, output_num
);
perform_transfer_test(input_num, output_num, 10_000).await
}
}
}
#[tokio::test]
async fn test_1_transfer() {
let possible_inputs = [1];
for input_num in possible_inputs {
for output_num in 1..2 {
if input_num == 8 && output_num > 5 {
// 8 inputs and 7 outputs is the max we can do
break;
}
println!(
"\n\ninput num: {}, output num: {}\n\n",
input_num, output_num
);
perform_transfer_test(input_num, output_num, 10_000).await
}
}
}
#[tokio::test]
async fn test_2_transfer() {
let possible_inputs = [2];
for input_num in possible_inputs {
for output_num in 2..3 {
if input_num == 8 && output_num > 5 {
// 8 inputs and 7 outputs is the max we can do
break;
}
println!(
"\n\ninput num: {}, output num: {}\n\n",
input_num, output_num
);
perform_transfer_test(input_num, output_num, 10_000).await
}
}
}
#[tokio::test]
async fn test_8_transfer() {
let possible_inputs = [8];
for input_num in possible_inputs {
let output_num = 5;
println!(
"\n\ninput num: {}, output num: {}\n\n",
input_num, output_num
);
perform_transfer_test(input_num, output_num, 10_000).await
}
}
/// Creates inputs compressed accounts with amount tokens each
/// Transfers all tokens from inputs compressed accounts evenly distributed to outputs compressed accounts
async fn perform_transfer_test(inputs: usize, outputs: usize, amount: u64) {
perform_transfer_22_test(inputs, outputs, amount, false).await;
}
async fn perform_transfer_22_test(inputs: usize, outputs: usize, amount: u64, token_22: bool) {
let (mut rpc, env) = setup_test_programs_with_accounts(None).await;
let payer = rpc.get_payer().insecure_clone();
let merkle_tree_pubkey = env.merkle_tree_pubkey;
let mut test_indexer = TestIndexer::<ProgramTestRpcConnection>::init_from_env(
&payer,
&env,
Some(ProverConfig {
run_mode: None,
circuits: vec![ProofType::Inclusion],
}),
)
.await;
let mint = if token_22 {
create_mint_22_helper(&mut rpc, &payer).await
} else {
create_mint_helper(&mut rpc, &payer).await
};
let sender = Keypair::new();
mint_tokens_22_helper_with_lamports(
&mut rpc,
&mut test_indexer,
&merkle_tree_pubkey,
&payer,
&mint,
vec![amount; inputs],
vec![sender.pubkey(); inputs],
Some(1_000_000),
token_22,
)
.await;
let mut recipients = Vec::new();
for _ in 0..outputs {
recipients.push(Pubkey::new_unique());
}
let input_compressed_accounts =
test_indexer.get_compressed_token_accounts_by_owner(&sender.pubkey());
let equal_amount = (amount * inputs as u64) / outputs as u64;
let rest_amount = (amount * inputs as u64) % outputs as u64;
let mut output_amounts = vec![equal_amount; outputs - 1];
output_amounts.push(equal_amount + rest_amount);
compressed_transfer_22_test(
&payer,
&mut rpc,
&mut test_indexer,
&mint,
&sender,
&recipients,
&output_amounts,
None,
input_compressed_accounts.as_slice(),
&vec![env.merkle_tree_pubkey; outputs],
None,
false,
None,
token_22,
)
.await;
}
#[tokio::test]
async fn test_decompression() {
spawn_prover(
false,
ProverConfig {
run_mode: None,
circuits: vec![ProofType::Inclusion],
},
)
.await;
for is_token_22 in vec![false, true] {
println!("is_token_22: {}", is_token_22);
let (mut context, env) = setup_test_programs_with_accounts(None).await;
let payer = context.get_payer().insecure_clone();
let merkle_tree_pubkey = env.merkle_tree_pubkey;
let mut test_indexer =
TestIndexer::<ProgramTestRpcConnection>::init_from_env(&payer, &env, None).await;
let sender = Keypair::new();
airdrop_lamports(&mut context, &sender.pubkey(), 1_000_000_000)
.await
.unwrap();
let mint = if is_token_22 {
create_mint_22_helper(&mut context, &payer).await
} else {
create_mint_helper(&mut context, &payer).await
};
let amount = 10000u64;
println!("2");
mint_tokens_22_helper_with_lamports(
&mut context,
&mut test_indexer,
&merkle_tree_pubkey,
&payer,
&mint,
vec![amount],
vec![sender.pubkey()],
None,
is_token_22,
)
.await;
println!("3");
let token_account_keypair = Keypair::new();
create_token_2022_account(
&mut context,
&mint,
&token_account_keypair,
&sender,
is_token_22,
)
.await
.unwrap();
println!("4");
let input_compressed_account =
test_indexer.get_compressed_token_accounts_by_owner(&sender.pubkey());
decompress_test(
&sender,
&mut context,
&mut test_indexer,
input_compressed_account,
amount,
&merkle_tree_pubkey,
&token_account_keypair.pubkey(),
None,
is_token_22,
)
.await;
println!("5");
compress_test(
&sender,
&mut context,
&mut test_indexer,
amount,
&mint,
&merkle_tree_pubkey,
&token_account_keypair.pubkey(),
None,
is_token_22,
)
.await;
}
kill_prover();
}
/// Test delegation:
/// 1. Delegate tokens with approve
/// 2. Delegate transfers a part of the delegated tokens
/// 3. Delegate transfers all the remaining delegated tokens
async fn test_delegation(
mint_amount: u64,
num_inputs: usize,
delegated_amount: u64,
output_amounts_1: Vec<u64>,
output_amounts_2: Vec<u64>,
) {
let (mut rpc, env) = setup_test_programs_with_accounts(None).await;
let payer = rpc.get_payer().insecure_clone();
let merkle_tree_pubkey = env.merkle_tree_pubkey;
let mut test_indexer = TestIndexer::<ProgramTestRpcConnection>::init_from_env(
&payer,
&env,
Some(ProverConfig {
run_mode: None,
circuits: vec![ProofType::Inclusion],
}),
)
.await;
let sender = Keypair::new();
airdrop_lamports(&mut rpc, &sender.pubkey(), 1_000_000_000)
.await
.unwrap();
let delegate = Keypair::new();
airdrop_lamports(&mut rpc, &delegate.pubkey(), 1_000_000_000)
.await
.unwrap();
let mint = create_mint_helper(&mut rpc, &payer).await;
mint_tokens_helper_with_lamports(
&mut rpc,
&mut test_indexer,
&merkle_tree_pubkey,
&payer,
&mint,
vec![mint_amount; num_inputs],
vec![sender.pubkey(); num_inputs],
Some(1_000_000),
)
.await;
// 1. Delegate tokens
{
let input_compressed_accounts =
test_indexer.get_compressed_token_accounts_by_owner(&sender.pubkey());
let delegated_compressed_account_merkle_tree = input_compressed_accounts[0]
.compressed_account
.merkle_context
.merkle_tree_pubkey;
approve_test(
&sender,
&mut rpc,
&mut test_indexer,
input_compressed_accounts,
delegated_amount,
Some(100),
&delegate.pubkey(),
&delegated_compressed_account_merkle_tree,
&delegated_compressed_account_merkle_tree,
None,
)
.await;
}
let recipient = Pubkey::new_unique();
// 2. Transfer partial delegated amount
{
let input_compressed_accounts =
test_indexer.get_compressed_token_accounts_by_owner(&sender.pubkey());
let input_compressed_accounts = input_compressed_accounts
.iter()
.filter(|x| x.token_data.delegate.is_some())
.cloned()
.collect::<Vec<TokenDataWithContext>>();
compressed_transfer_test(
&delegate,
&mut rpc,
&mut test_indexer,
&mint,
&sender,
&[recipient, sender.pubkey()],
&output_amounts_1,
Some(vec![Some(90), Some(10)]),
input_compressed_accounts.as_slice(),
&[env.merkle_tree_pubkey; 2],
Some(1),
true,
None,
)
.await;
}
// 3. Transfer full delegated amount
{
let input_compressed_accounts =
test_indexer.get_compressed_token_accounts_by_owner(&sender.pubkey());
let input_compressed_accounts = input_compressed_accounts
.iter()
.filter(|x| x.token_data.delegate.is_some())
.cloned()
.collect::<Vec<TokenDataWithContext>>();
compressed_transfer_test(
&delegate,
&mut rpc,
&mut test_indexer,
&mint,
&sender,
&[recipient],
&output_amounts_2,
None,
input_compressed_accounts.as_slice(),
&[env.merkle_tree_pubkey; 1],
None,
true,
None,
)
.await;
}
kill_prover();
}
/// Test delegation:
/// 1. Delegate tokens with approve
/// 2. Delegate transfers a part of the delegated tokens
/// 3. Delegate transfers all the remaining delegated tokens
#[tokio::test]
async fn test_delegation_mixed() {
let mint_amount: u64 = 10000;
let num_inputs: usize = 2;
let delegated_amount: u64 = 3000;
let (mut rpc, env) = setup_test_programs_with_accounts(None).await;
let payer = rpc.get_payer().insecure_clone();
let merkle_tree_pubkey = env.merkle_tree_pubkey;
let mut test_indexer = TestIndexer::<ProgramTestRpcConnection>::init_from_env(
&payer,
&env,
Some(ProverConfig {
run_mode: None,
circuits: vec![ProofType::Inclusion],
}),
)
.await;
let sender = Keypair::new();
airdrop_lamports(&mut rpc, &sender.pubkey(), 1_000_000_000)
.await
.unwrap();
let delegate = Keypair::new();
airdrop_lamports(&mut rpc, &delegate.pubkey(), 1_000_000_000)
.await
.unwrap();
let mint = create_mint_helper(&mut rpc, &payer).await;
mint_tokens_helper_with_lamports(
&mut rpc,
&mut test_indexer,
&merkle_tree_pubkey,
&payer,
&mint,
vec![mint_amount; num_inputs],
vec![sender.pubkey(); num_inputs],
Some(1_000_000),
)
.await;
mint_tokens_helper_with_lamports(
&mut rpc,
&mut test_indexer,
&merkle_tree_pubkey,
&payer,
&mint,
vec![mint_amount; num_inputs],
vec![delegate.pubkey(); num_inputs],
Some(1_000_000),
)
.await;
// 1. Delegate tokens
{
let input_compressed_accounts =
test_indexer.get_compressed_token_accounts_by_owner(&sender.pubkey());
let delegated_compressed_account_merkle_tree = input_compressed_accounts[0]
.compressed_account
.merkle_context
.merkle_tree_pubkey;
approve_test(
&sender,
&mut rpc,
&mut test_indexer,
input_compressed_accounts,
delegated_amount,
Some(100),
&delegate.pubkey(),
&delegated_compressed_account_merkle_tree,
&delegated_compressed_account_merkle_tree,
None,
)
.await;
}
let recipient = Pubkey::new_unique();
// 2. Transfer partial delegated amount with delegate change account
{
let input_compressed_accounts =
test_indexer.get_compressed_token_accounts_by_owner(&sender.pubkey());
let mut input_compressed_accounts = input_compressed_accounts
.iter()
.filter(|x| x.token_data.delegate.is_some())
.cloned()
.collect::<Vec<TokenDataWithContext>>();
let delegate_input_compressed_accounts =
test_indexer.get_compressed_token_accounts_by_owner(&delegate.pubkey());
input_compressed_accounts
.extend_from_slice(&[delegate_input_compressed_accounts[0].clone()]);
let delegate_lamports = delegate_input_compressed_accounts[0]
.compressed_account
.compressed_account
.lamports;
let delegate_input_amount = input_compressed_accounts
.iter()
.map(|x| x.token_data.amount)
.sum::<u64>();
compressed_transfer_test(
&delegate,
&mut rpc,
&mut test_indexer,
&mint,
&sender,
&[recipient, sender.pubkey(), delegate.pubkey()],
&[100, 200, delegate_input_amount - 300],
Some(vec![Some(90), Some(10), Some(delegate_lamports)]),
input_compressed_accounts.as_slice(),
&[env.merkle_tree_pubkey; 3],
Some(1),
true,
None,
)
.await;
}
let recipient = Pubkey::new_unique();
// 3. Transfer partial delegated amount without delegate change account
{
let input_compressed_accounts =
test_indexer.get_compressed_token_accounts_by_owner(&sender.pubkey());
let mut input_compressed_accounts = input_compressed_accounts
.iter()
.filter(|x| x.token_data.delegate.is_some())
.cloned()
.collect::<Vec<TokenDataWithContext>>();
let delegate_input_compressed_accounts =
test_indexer.get_compressed_token_accounts_by_owner(&delegate.pubkey());
input_compressed_accounts
.extend_from_slice(&[delegate_input_compressed_accounts[0].clone()]);
let delegate_input_amount = input_compressed_accounts
.iter()
.map(|x| x.token_data.amount)
.sum::<u64>();
let lamports_output_amount = input_compressed_accounts
.iter()
.map(|x| x.compressed_account.compressed_account.lamports)
.sum::<u64>()
- 100;
compressed_transfer_test(
&delegate,
&mut rpc,
&mut test_indexer,
&mint,
&sender,
&[recipient, sender.pubkey(), delegate.pubkey()],
&[100, 200, delegate_input_amount - 300],
Some(vec![Some(90), Some(10), Some(lamports_output_amount)]),
input_compressed_accounts.as_slice(),
&[env.merkle_tree_pubkey; 3],
None,
true,
None,
)
.await;
println!("part 3");
}
// 3. Transfer full delegated amount
{
let input_compressed_accounts =
test_indexer.get_compressed_token_accounts_by_owner(&sender.pubkey());
let mut input_compressed_accounts = input_compressed_accounts
.iter()
.filter(|x| x.token_data.delegate.is_some())
.cloned()
.collect::<Vec<TokenDataWithContext>>();
let delegate_input_compressed_accounts =
test_indexer.get_compressed_token_accounts_by_owner(&delegate.pubkey());
input_compressed_accounts.extend_from_slice(&delegate_input_compressed_accounts);
let input_amount = input_compressed_accounts
.iter()
.map(|x| x.token_data.amount)
.sum::<u64>();
compressed_transfer_test(
&delegate,
&mut rpc,
&mut test_indexer,
&mint,
&sender,
&[recipient],
&[input_amount],
None,
input_compressed_accounts.as_slice(),
&[env.merkle_tree_pubkey; 1],
None,
true,
None,
)
.await;
println!("part 4");
}
kill_prover();
}
#[tokio::test]
async fn test_delegation_0() {
let num_inputs = 1;
test_delegation(0, num_inputs, 0, vec![0, 0], vec![0]).await
}
#[tokio::test]
async fn test_delegation_10000() {
let num_inputs = 1;
test_delegation(10000, num_inputs, 1000, vec![900, 100], vec![100]).await
}
#[tokio::test]
async fn test_delegation_8_inputs() {
let num_inputs = 8;
test_delegation(10000, num_inputs, 1000, vec![900, 100], vec![100]).await
}
#[tokio::test]
async fn test_delegation_max() {
let num_inputs = 1;
test_delegation(
u64::MAX,
num_inputs,
u64::MAX,
vec![u64::MAX - 100, 100],
vec![100],
)
.await
}
/// Failing tests:
/// 1. Invalid delegated compressed account Merkle tree.
/// 2. Invalid change compressed account Merkle tree.
/// 3. Invalid proof.
/// 4. Invalid mint.
#[tokio::test]
async fn test_approve_failing() {
let (mut rpc, env) = setup_test_programs_with_accounts(None).await;
let payer = rpc.get_payer().insecure_clone();
let merkle_tree_pubkey = env.merkle_tree_pubkey;
let mut test_indexer = TestIndexer::<ProgramTestRpcConnection>::init_from_env(
&payer,
&env,
Some(ProverConfig {
run_mode: None,
circuits: vec![ProofType::Inclusion],
}),
)
.await;
let sender = Keypair::new();
airdrop_lamports(&mut rpc, &sender.pubkey(), 1_000_000_000)
.await
.unwrap();
let delegate = Keypair::new();
airdrop_lamports(&mut rpc, &delegate.pubkey(), 1_000_000_000)
.await
.unwrap();
let mint = create_mint_helper(&mut rpc, &payer).await;
let amount = 10000u64;
mint_tokens_helper(
&mut rpc,
&mut test_indexer,
&merkle_tree_pubkey,
&payer,
&mint,
vec![amount],
vec![sender.pubkey()],
)
.await;
let input_compressed_accounts =
test_indexer.get_compressed_token_accounts_by_owner(&sender.pubkey());
let delegated_amount = 1000u64;
let delegated_compressed_account_merkle_tree = input_compressed_accounts[0]
.compressed_account
.merkle_context
.merkle_tree_pubkey;
let input_compressed_account_hashes = input_compressed_accounts
.iter()
.map(|x| x.compressed_account.hash().unwrap())
.collect::<Vec<_>>();
let input_merkle_tree_pubkeys = input_compressed_accounts
.iter()
.map(|x| x.compressed_account.merkle_context.merkle_tree_pubkey)
.collect::<Vec<_>>();
let proof_rpc_result = test_indexer
.create_proof_for_compressed_accounts(
Some(&input_compressed_account_hashes),
Some(&input_merkle_tree_pubkeys),
None,
None,
&mut rpc,
)
.await;
let mint = input_compressed_accounts[0].token_data.mint;
// 1. Invalid delegated compressed account Merkle tree.
{
let invalid_delegated_merkle_tree = Keypair::new();
let inputs = CreateApproveInstructionInputs {
fee_payer: rpc.get_payer().pubkey(),
authority: sender.pubkey(),
input_merkle_contexts: input_compressed_accounts
.iter()
.map(|x| x.compressed_account.merkle_context)
.collect(),
input_token_data: input_compressed_accounts
.iter()
.map(|x| x.token_data.clone())
.collect(),
input_compressed_accounts: input_compressed_accounts
.iter()
.map(|x| &x.compressed_account.compressed_account)
.cloned()
.collect::<Vec<_>>(),
mint,
delegated_amount,
delegate_lamports: None,
delegated_compressed_account_merkle_tree: invalid_delegated_merkle_tree.pubkey(),
change_compressed_account_merkle_tree: delegated_compressed_account_merkle_tree,
delegate: delegate.pubkey(),
root_indices: proof_rpc_result.root_indices.clone(),
proof: proof_rpc_result.proof.clone(),
};
let instruction = create_approve_instruction(inputs).unwrap();
let context_payer = rpc.get_payer().insecure_clone();
let result = rpc
.create_and_send_transaction(
&[instruction],
&sender.pubkey(),
&[&context_payer, &sender],
)
.await;
// Anchor panics when trying to read the MT account. Unfortunately
// there is no specific error code to assert.
assert!(matches!(
result,
Err(RpcError::TransactionError(
TransactionError::InstructionError(0, InstructionError::ProgramFailedToComplete)
))
));
}
// 2. Invalid change compressed account Merkle tree.
{
let invalid_change_merkle_tree = Keypair::new();
let inputs = CreateApproveInstructionInputs {
fee_payer: rpc.get_payer().pubkey(),
authority: sender.pubkey(),
input_merkle_contexts: input_compressed_accounts
.iter()
.map(|x| x.compressed_account.merkle_context)
.collect(),
input_token_data: input_compressed_accounts
.iter()
.map(|x| x.token_data.clone())
.collect(),
input_compressed_accounts: input_compressed_accounts
.iter()
.map(|x| &x.compressed_account.compressed_account)
.cloned()
.collect::<Vec<_>>(),
mint,
delegated_amount,
delegate_lamports: None,
delegated_compressed_account_merkle_tree,
change_compressed_account_merkle_tree: invalid_change_merkle_tree.pubkey(),
delegate: delegate.pubkey(),
root_indices: proof_rpc_result.root_indices.clone(),
proof: proof_rpc_result.proof.clone(),
};
let instruction = create_approve_instruction(inputs).unwrap();
let context_payer = rpc.get_payer().insecure_clone();
let result = rpc
.create_and_send_transaction(
&[instruction],
&sender.pubkey(),
&[&context_payer, &sender],
)
.await;
// Anchor panics when trying to read the MT account. Unfortunately
// there is no specific error code to assert.
assert!(matches!(
result,
Err(RpcError::TransactionError(
TransactionError::InstructionError(0, InstructionError::ProgramFailedToComplete)
))
));
}
// 3. Invalid proof.
{
let invalid_proof = CompressedProof {
a: [0; 32],
b: [0; 64],
c: [0; 32],
};
let inputs = CreateApproveInstructionInputs {
fee_payer: rpc.get_payer().pubkey(),
authority: sender.pubkey(),
input_merkle_contexts: input_compressed_accounts
.iter()
.map(|x| x.compressed_account.merkle_context)
.collect(),
input_token_data: input_compressed_accounts
.iter()
.map(|x| x.token_data.clone())
.collect(),
input_compressed_accounts: input_compressed_accounts
.iter()
.map(|x| &x.compressed_account.compressed_account)
.cloned()
.collect::<Vec<_>>(),
mint,
delegated_amount,
delegate_lamports: None,
delegated_compressed_account_merkle_tree,
change_compressed_account_merkle_tree: delegated_compressed_account_merkle_tree,
delegate: delegate.pubkey(),
root_indices: proof_rpc_result.root_indices.clone(),
proof: invalid_proof,
};
let instruction = create_approve_instruction(inputs).unwrap();
let context_payer = rpc.get_payer().insecure_clone();
let result = rpc
.create_and_send_transaction(
&[instruction],
&sender.pubkey(),
&[&context_payer, &sender],
)
.await;
assert_rpc_error(result, 0, VerifierError::ProofVerificationFailed.into()).unwrap();
}
// 4. Invalid mint.
{
let invalid_mint = Keypair::new();
let inputs = CreateApproveInstructionInputs {
fee_payer: rpc.get_payer().pubkey(),
authority: sender.pubkey(),
input_merkle_contexts: input_compressed_accounts
.iter()
.map(|x| x.compressed_account.merkle_context)
.collect(),
input_token_data: input_compressed_accounts
.iter()
.map(|x| x.token_data.clone())
.collect(),
input_compressed_accounts: input_compressed_accounts
.iter()
.map(|x| &x.compressed_account.compressed_account)
.cloned()
.collect::<Vec<_>>(),
mint: invalid_mint.pubkey(),
delegated_amount,
delegate_lamports: None,
delegated_compressed_account_merkle_tree,
change_compressed_account_merkle_tree: delegated_compressed_account_merkle_tree,
delegate: delegate.pubkey(),
root_indices: proof_rpc_result.root_indices.clone(),
proof: proof_rpc_result.proof.clone(),
};
let instruction = create_approve_instruction(inputs).unwrap();
let context_payer = rpc.get_payer().insecure_clone();
let result = rpc
.create_and_send_transaction(
&[instruction],
&sender.pubkey(),
&[&context_payer, &sender],
)
.await;
assert_rpc_error(result, 0, VerifierError::ProofVerificationFailed.into()).unwrap();
}
// 5. Invalid delegate amount (too high)
{
let sum_inputs = input_compressed_accounts
.iter()
.map(|x| x.token_data.amount)
.sum::<u64>();
let delegated_amount = sum_inputs + 1;
let inputs = CreateApproveInstructionInputs {
fee_payer: rpc.get_payer().pubkey(),
authority: sender.pubkey(),
input_merkle_contexts: input_compressed_accounts
.iter()
.map(|x| x.compressed_account.merkle_context)
.collect(),
input_token_data: input_compressed_accounts
.iter()
.map(|x| x.token_data.clone())
.collect(),
input_compressed_accounts: input_compressed_accounts
.iter()
.map(|x| &x.compressed_account.compressed_account)
.cloned()
.collect::<Vec<_>>(),
mint,
delegated_amount,
delegate_lamports: None,
delegated_compressed_account_merkle_tree,
change_compressed_account_merkle_tree: delegated_compressed_account_merkle_tree,
delegate: delegate.pubkey(),
root_indices: proof_rpc_result.root_indices.clone(),
proof: proof_rpc_result.proof.clone(),
};
let instruction = create_approve_instruction(inputs).unwrap();
let context_payer = rpc.get_payer().insecure_clone();
let result = rpc
.create_and_send_transaction(
&[instruction],
&sender.pubkey(),
&[&context_payer, &sender],
)
.await;
assert_rpc_error(result, 0, ErrorCode::ArithmeticUnderflow.into()).unwrap();
}
}
/// Test revoke:
/// 1. Delegate tokens with approve
/// 2. Revoke
async fn test_revoke(num_inputs: usize, mint_amount: u64, delegated_amount: u64) {
let (mut rpc, env) = setup_test_programs_with_accounts(None).await;
let payer = rpc.get_payer().insecure_clone();
let merkle_tree_pubkey = env.merkle_tree_pubkey;
let mut test_indexer = TestIndexer::<ProgramTestRpcConnection>::init_from_env(
&payer,
&env,
Some(ProverConfig {
run_mode: None,
circuits: vec![ProofType::Inclusion],
}),
)
.await;
let sender = Keypair::new();
airdrop_lamports(&mut rpc, &sender.pubkey(), 1_000_000_000)
.await
.unwrap();
let delegate = Keypair::new();
airdrop_lamports(&mut rpc, &delegate.pubkey(), 1_000_000_000)
.await
.unwrap();
let mint = create_mint_helper(&mut rpc, &payer).await;
mint_tokens_helper_with_lamports(
&mut rpc,
&mut test_indexer,
&merkle_tree_pubkey,
&payer,
&mint,
vec![mint_amount; num_inputs],
vec![sender.pubkey(); num_inputs],
Some(1_000_000),
)
.await;
// 1. Delegate tokens
{
let input_compressed_accounts =
test_indexer.get_compressed_token_accounts_by_owner(&sender.pubkey());
for input in input_compressed_accounts.iter() {
let input_compressed_accounts = vec![input.clone()];
let delegated_compressed_account_merkle_tree = input_compressed_accounts[0]
.compressed_account
.merkle_context
.merkle_tree_pubkey;
approve_test(
&sender,
&mut rpc,
&mut test_indexer,
input_compressed_accounts,
delegated_amount,
Some(1000),
&delegate.pubkey(),
&delegated_compressed_account_merkle_tree,
&delegated_compressed_account_merkle_tree,
None,
)
.await;
}
}
// 2. Revoke
{
let input_compressed_accounts = test_indexer
.get_compressed_token_accounts_by_owner(&sender.pubkey())
.iter()
.filter(|x| x.token_data.delegate.is_some())
.cloned()
.collect::<Vec<TokenDataWithContext>>();
let input_compressed_accounts = input_compressed_accounts
.iter()
.filter(|x| x.token_data.delegate.is_some())
.cloned()
.collect::<Vec<TokenDataWithContext>>();
let delegated_compressed_account_merkle_tree = input_compressed_accounts[0]
.compressed_account
.merkle_context
.merkle_tree_pubkey;
revoke_test(
&sender,
&mut rpc,
&mut test_indexer,
input_compressed_accounts,
&delegated_compressed_account_merkle_tree,
None,
)
.await;
}
}
#[tokio::test]
async fn test_revoke_0() {
let num_inputs = 1;
test_revoke(num_inputs, 0, 0).await
}
#[tokio::test]
async fn test_revoke_10000() {
let num_inputs = 1;
test_revoke(num_inputs, 10000, 1000).await
}
#[tokio::test]
async fn test_revoke_8_inputs() {
let num_inputs = 8;
test_revoke(num_inputs, 10000, 1000).await
}
#[tokio::test]
async fn test_revoke_max() {
let num_inputs = 1;
test_revoke(num_inputs, u64::MAX, u64::MAX).await
}
/// Failing tests:
/// 1. Invalid root indices.
/// 2. Invalid Merkle tree.
/// 3. Invalid mint.
#[tokio::test]
async fn test_revoke_failing() {
let (mut rpc, env) = setup_test_programs_with_accounts(None).await;
let payer = rpc.get_payer().insecure_clone();
let merkle_tree_pubkey = env.merkle_tree_pubkey;
let mut test_indexer = TestIndexer::<ProgramTestRpcConnection>::init_from_env(
&payer,
&env,
Some(ProverConfig {
run_mode: None,
circuits: vec![ProofType::Inclusion],
}),
)
.await;
let sender = Keypair::new();
airdrop_lamports(&mut rpc, &sender.pubkey(), 1_000_000_000)
.await
.unwrap();
let delegate = Keypair::new();
airdrop_lamports(&mut rpc, &delegate.pubkey(), 1_000_000_000)
.await
.unwrap();
let mint = create_mint_helper(&mut rpc, &payer).await;
let amount = 10000u64;
mint_tokens_helper(
&mut rpc,
&mut test_indexer,
&merkle_tree_pubkey,
&payer,
&mint,
vec![amount],
vec![sender.pubkey()],
)
.await;
// Delegate tokens
{
let input_compressed_accounts =
test_indexer.get_compressed_token_accounts_by_owner(&sender.pubkey());
let delegated_amount = 1000u64;
let delegated_compressed_account_merkle_tree = input_compressed_accounts[0]
.compressed_account
.merkle_context
.merkle_tree_pubkey;
approve_test(
&sender,
&mut rpc,
&mut test_indexer,
input_compressed_accounts,
delegated_amount,
None,
&delegate.pubkey(),
&delegated_compressed_account_merkle_tree,
&delegated_compressed_account_merkle_tree,
None,
)
.await;
}
let input_compressed_accounts =
test_indexer.get_compressed_token_accounts_by_owner(&sender.pubkey());
let input_compressed_accounts = input_compressed_accounts
.iter()
.filter(|x| x.token_data.delegate.is_some())
.cloned()
.collect::<Vec<TokenDataWithContext>>();
let input_compressed_account_hashes = input_compressed_accounts
.iter()
.map(|x| x.compressed_account.hash().unwrap())
.collect::<Vec<_>>();
let input_merkle_tree_pubkeys = input_compressed_accounts
.iter()
.map(|x| x.compressed_account.merkle_context.merkle_tree_pubkey)
.collect::<Vec<_>>();
let proof_rpc_result = test_indexer
.create_proof_for_compressed_accounts(
Some(&input_compressed_account_hashes),
Some(&input_merkle_tree_pubkeys),
None,
None,
&mut rpc,
)
.await;
// 1. Invalid root indices.
{
let invalid_root_indices = vec![0];
let inputs = CreateRevokeInstructionInputs {
fee_payer: rpc.get_payer().pubkey(),
authority: sender.pubkey(),
input_merkle_contexts: input_compressed_accounts
.iter()
.map(|x| x.compressed_account.merkle_context)
.collect(),
input_token_data: input_compressed_accounts
.iter()
.map(|x| x.token_data.clone())
.collect(),
input_compressed_accounts: input_compressed_accounts
.iter()
.map(|x| &x.compressed_account.compressed_account)
.cloned()
.collect::<Vec<_>>(),
mint,
output_account_merkle_tree: merkle_tree_pubkey,
root_indices: invalid_root_indices,
proof: proof_rpc_result.proof.clone(),
};
let instruction = create_revoke_instruction(inputs).unwrap();
let context_payer = rpc.get_payer().insecure_clone();
let result = rpc
.create_and_send_transaction(
&[instruction],
&sender.pubkey(),
&[&context_payer, &sender],
)
.await;
assert_rpc_error(result, 0, VerifierError::ProofVerificationFailed.into()).unwrap();
}
// 2. Invalid Merkle tree.
{
let invalid_merkle_tree = Keypair::new();
let inputs = CreateRevokeInstructionInputs {
fee_payer: rpc.get_payer().pubkey(),
authority: sender.pubkey(),
input_merkle_contexts: input_compressed_accounts
.iter()
.map(|x| x.compressed_account.merkle_context)
.collect(),
input_token_data: input_compressed_accounts
.iter()
.map(|x| x.token_data.clone())
.collect(),
input_compressed_accounts: input_compressed_accounts
.iter()
.map(|x| &x.compressed_account.compressed_account)
.cloned()
.collect::<Vec<_>>(),
mint,
output_account_merkle_tree: invalid_merkle_tree.pubkey(),
root_indices: proof_rpc_result.root_indices.clone(),
proof: proof_rpc_result.proof.clone(),
};
let instruction = create_revoke_instruction(inputs).unwrap();
let context_payer = rpc.get_payer().insecure_clone();
let result = rpc
.create_and_send_transaction(
&[instruction],
&sender.pubkey(),
&[&context_payer, &sender],
)
.await;
// Anchor panics when trying to deserialize the account, the
// instruction returns `ProgramFailedToComplete`. No specific error
// to assert.
assert!(matches!(
result,
Err(RpcError::TransactionError(
TransactionError::InstructionError(0, InstructionError::ProgramFailedToComplete)
))
));
}
// 3. Invalid mint.
{
let invalid_mint = Keypair::new();
let inputs = CreateRevokeInstructionInputs {
fee_payer: rpc.get_payer().pubkey(),
authority: sender.pubkey(),
input_merkle_contexts: input_compressed_accounts
.iter()
.map(|x| x.compressed_account.merkle_context)
.collect(),
input_token_data: input_compressed_accounts
.iter()
.map(|x| x.token_data.clone())
.collect(),
input_compressed_accounts: input_compressed_accounts
.iter()
.map(|x| &x.compressed_account.compressed_account)
.cloned()
.collect::<Vec<_>>(),
mint: invalid_mint.pubkey(),
output_account_merkle_tree: merkle_tree_pubkey,
root_indices: proof_rpc_result.root_indices,
proof: proof_rpc_result.proof,
};
let instruction = create_revoke_instruction(inputs).unwrap();
let context_payer = rpc.get_payer().insecure_clone();
let result = rpc
.create_and_send_transaction(
&[instruction],
&sender.pubkey(),
&[&context_payer, &sender],
)
.await;
assert_rpc_error(result, 0, VerifierError::ProofVerificationFailed.into()).unwrap();
}
}
/// Test Burn:
/// 1. Burn tokens
/// 1. Delegate tokens with approve
/// 2. Burn delegated tokens
#[tokio::test]
async fn test_burn() {
spawn_prover(
false,
ProverConfig {
run_mode: None,
circuits: vec![ProofType::Inclusion],
},
)
.await;
for is_token_22 in [false, true] {
println!("is_token_22: {}", is_token_22);
let (mut rpc, env) = setup_test_programs_with_accounts(None).await;
let payer = rpc.get_payer().insecure_clone();
let merkle_tree_pubkey = env.merkle_tree_pubkey;
let mut test_indexer =
TestIndexer::<ProgramTestRpcConnection>::init_from_env(&payer, &env, None).await;
let sender = Keypair::new();
airdrop_lamports(&mut rpc, &sender.pubkey(), 1_000_000_000)
.await
.unwrap();
let delegate = Keypair::new();
airdrop_lamports(&mut rpc, &delegate.pubkey(), 1_000_000_000)
.await
.unwrap();
let mint = if is_token_22 {
create_mint_22_helper(&mut rpc, &payer).await
} else {
create_mint_helper(&mut rpc, &payer).await
};
let amount = 10000u64;
mint_tokens_22_helper_with_lamports(
&mut rpc,
&mut test_indexer,
&merkle_tree_pubkey,
&payer,
&mint,
vec![amount],
vec![sender.pubkey()],
Some(1_000_000),
is_token_22,
)
.await;
// 1. Burn tokens
{
let input_compressed_accounts =
test_indexer.get_compressed_token_accounts_by_owner(&sender.pubkey());
let burn_amount = 1000u64;
let change_account_merkle_tree = input_compressed_accounts[0]
.compressed_account
.merkle_context
.merkle_tree_pubkey;
burn_test(
&sender,
&mut rpc,
&mut test_indexer,
input_compressed_accounts,
&change_account_merkle_tree,
burn_amount,
false,
None,
is_token_22,
)
.await;
}
// 2. Delegate tokens
{
let input_compressed_accounts =
test_indexer.get_compressed_token_accounts_by_owner(&sender.pubkey());
let delegated_amount = 1000u64;
let delegated_compressed_account_merkle_tree = input_compressed_accounts[0]
.compressed_account
.merkle_context
.merkle_tree_pubkey;
approve_test(
&sender,
&mut rpc,
&mut test_indexer,
input_compressed_accounts,
delegated_amount,
None,
&delegate.pubkey(),
&delegated_compressed_account_merkle_tree,
&delegated_compressed_account_merkle_tree,
None,
)
.await;
}
// 3. Burn delegated tokens
{
let input_compressed_accounts =
test_indexer.get_compressed_token_accounts_by_owner(&sender.pubkey());
let input_compressed_accounts = input_compressed_accounts
.iter()
.filter(|x| x.token_data.delegate.is_some())
.cloned()
.collect::<Vec<TokenDataWithContext>>();
let burn_amount = 100;
let change_account_merkle_tree = input_compressed_accounts[0]
.compressed_account
.merkle_context
.merkle_tree_pubkey;
burn_test(
&delegate,
&mut rpc,
&mut test_indexer,
input_compressed_accounts,
&change_account_merkle_tree,
burn_amount,
true,
None,
is_token_22,
)
.await;
}
// 3. Burn all delegated tokens
{
let input_compressed_accounts =
test_indexer.get_compressed_token_accounts_by_owner(&sender.pubkey());
let input_compressed_accounts = input_compressed_accounts
.iter()
.filter(|x| x.token_data.delegate.is_some())
.cloned()
.collect::<Vec<TokenDataWithContext>>();
let burn_amount = input_compressed_accounts
.iter()
.map(|x| x.token_data.amount)
.sum::<u64>();
let change_account_merkle_tree = input_compressed_accounts[0]
.compressed_account
.merkle_context
.merkle_tree_pubkey;
burn_test(
&delegate,
&mut rpc,
&mut test_indexer,
input_compressed_accounts,
&change_account_merkle_tree,
burn_amount,
true,
None,
is_token_22,
)
.await;
}
}
}
#[tokio::test]
async fn failing_tests_burn() {
spawn_prover(
false,
ProverConfig {
run_mode: None,
circuits: vec![ProofType::Inclusion],
},
)
.await;
for is_token_22 in [false, true] {
let (mut rpc, env) = setup_test_programs_with_accounts(None).await;
let payer = rpc.get_payer().insecure_clone();
let merkle_tree_pubkey = env.merkle_tree_pubkey;
let mut test_indexer =
TestIndexer::<ProgramTestRpcConnection>::init_from_env(&payer, &env, None).await;
let sender = Keypair::new();
airdrop_lamports(&mut rpc, &sender.pubkey(), 1_000_000_000)
.await
.unwrap();
let delegate = Keypair::new();
airdrop_lamports(&mut rpc, &delegate.pubkey(), 1_000_000_000)
.await
.unwrap();
let mint = if is_token_22 {
create_mint_22_helper(&mut rpc, &payer).await
} else {
create_mint_helper(&mut rpc, &payer).await
};
let amount = 10000u64;
mint_tokens_22_helper_with_lamports(
&mut rpc,
&mut test_indexer,
&merkle_tree_pubkey,
&payer,
&mint,
vec![amount],
vec![sender.pubkey()],
None,
is_token_22,
)
.await;
// Delegate tokens
{
let input_compressed_accounts =
test_indexer.get_compressed_token_accounts_by_owner(&sender.pubkey());
let delegated_amount = 1000u64;
let delegated_compressed_account_merkle_tree = input_compressed_accounts[0]
.compressed_account
.merkle_context
.merkle_tree_pubkey;
approve_test(
&sender,
&mut rpc,
&mut test_indexer,
input_compressed_accounts,
delegated_amount,
None,
&delegate.pubkey(),
&delegated_compressed_account_merkle_tree,
&delegated_compressed_account_merkle_tree,
None,
)
.await;
}
// 1. invalid proof
{
let input_compressed_accounts =
test_indexer.get_compressed_token_accounts_by_owner(&sender.pubkey());
let burn_amount = 1;
let change_account_merkle_tree = input_compressed_accounts[0]
.compressed_account
.merkle_context
.merkle_tree_pubkey;
let (_, _, _, _, instruction) = create_burn_test_instruction(
&sender,
&mut rpc,
&mut test_indexer,
&input_compressed_accounts,
&change_account_merkle_tree,
burn_amount,
false,
BurnInstructionMode::InvalidProof,
is_token_22,
)
.await;
let res = rpc
.create_and_send_transaction(&[instruction], &sender.pubkey(), &[&payer, &sender])
.await;
assert_rpc_error(res, 0, VerifierError::ProofVerificationFailed.into()).unwrap();
}
// 2. Signer is delegate but token data has no delegate.
{
let input_compressed_accounts =
test_indexer.get_compressed_token_accounts_by_owner(&sender.pubkey());
let burn_amount = 1;
let change_account_merkle_tree = input_compressed_accounts[0]
.compressed_account
.merkle_context
.merkle_tree_pubkey;
let (_, _, _, _, instruction) = create_burn_test_instruction(
&delegate,
&mut rpc,
&mut test_indexer,
&input_compressed_accounts,
&change_account_merkle_tree,
burn_amount,
true,
BurnInstructionMode::Normal,
is_token_22,
)
.await;
let res = rpc
.create_and_send_transaction(
&[instruction],
&delegate.pubkey(),
&[&payer, &delegate],
)
.await;
assert_rpc_error(res, 0, VerifierError::ProofVerificationFailed.into()).unwrap();
}
// 3. Signer is delegate but token data has no delegate.
{
let input_compressed_accounts =
test_indexer.get_compressed_token_accounts_by_owner(&sender.pubkey());
let input_compressed_accounts = input_compressed_accounts
.iter()
.filter(|x| x.token_data.delegate.is_some())
.cloned()
.collect::<Vec<TokenDataWithContext>>();
let burn_amount = 1;
let change_account_merkle_tree = input_compressed_accounts[0]
.compressed_account
.merkle_context
.merkle_tree_pubkey;
let (_, _, _, _, instruction) = create_burn_test_instruction(
&sender,
&mut rpc,
&mut test_indexer,
&input_compressed_accounts,
&change_account_merkle_tree,
burn_amount,
true,
BurnInstructionMode::Normal,
is_token_22,
)
.await;
let res = rpc
.create_and_send_transaction(&[instruction], &sender.pubkey(), &[&payer, &sender])
.await;
assert_rpc_error(res, 0, ErrorCode::DelegateSignerCheckFailed.into()).unwrap();
}
// 4. invalid authority (use delegate as authority)
{
let input_compressed_accounts =
test_indexer.get_compressed_token_accounts_by_owner(&sender.pubkey());
let burn_amount = 1;
let change_account_merkle_tree = input_compressed_accounts[0]
.compressed_account
.merkle_context
.merkle_tree_pubkey;
let (_, _, _, _, instruction) = create_burn_test_instruction(
&delegate,
&mut rpc,
&mut test_indexer,
&input_compressed_accounts,
&change_account_merkle_tree,
burn_amount,
false,
BurnInstructionMode::Normal,
is_token_22,
)
.await;
let res = rpc
.create_and_send_transaction(
&[instruction],
&delegate.pubkey(),
&[&payer, &delegate],
)
.await;
assert_rpc_error(res, 0, VerifierError::ProofVerificationFailed.into()).unwrap();
}
// 5. invalid mint
{
let input_compressed_accounts =
test_indexer.get_compressed_token_accounts_by_owner(&sender.pubkey());
let burn_amount = 1;
let change_account_merkle_tree = input_compressed_accounts[0]
.compressed_account
.merkle_context
.merkle_tree_pubkey;
let (_, _, _, _, instruction) = create_burn_test_instruction(
&sender,
&mut rpc,
&mut test_indexer,
&input_compressed_accounts,
&change_account_merkle_tree,
burn_amount,
false,
BurnInstructionMode::InvalidMint,
is_token_22,
)
.await;
let res = rpc
.create_and_send_transaction(&[instruction], &payer.pubkey(), &[&payer, &sender])
.await;
assert_rpc_error(
res,
0,
anchor_lang::error::ErrorCode::AccountNotInitialized.into(),
)
.unwrap();
}
// 6. invalid change merkle tree
{
let input_compressed_accounts =
test_indexer.get_compressed_token_accounts_by_owner(&sender.pubkey());
let burn_amount = 1;
let invalid_change_account_merkle_tree = input_compressed_accounts[0]
.compressed_account
.merkle_context
.nullifier_queue_pubkey;
let (_, _, _, _, instruction) = create_burn_test_instruction(
&sender,
&mut rpc,
&mut test_indexer,
&input_compressed_accounts,
&invalid_change_account_merkle_tree,
burn_amount,
false,
BurnInstructionMode::Normal,
is_token_22,
)
.await;
let res = rpc
.create_and_send_transaction(&[instruction], &payer.pubkey(), &[&payer, &sender])
.await;
assert_rpc_error(
res,
0,
anchor_lang::error::ErrorCode::AccountDiscriminatorMismatch.into(),
)
.unwrap();
}
}
}
/// Test freeze and thaw:
/// 1. Freeze tokens
/// 2. Thaw tokens
/// 3. Delegate tokens
/// 4. Freeze delegated tokens
/// 5. Thaw delegated tokens
async fn test_freeze_and_thaw(mint_amount: u64, delegated_amount: u64) {
spawn_prover(
false,
ProverConfig {
run_mode: None,
circuits: vec![ProofType::Inclusion],
},
)
.await;
for is_token_22 in [false, true] {
let (mut rpc, env) = setup_test_programs_with_accounts(None).await;
let payer = rpc.get_payer().insecure_clone();
let merkle_tree_pubkey = env.merkle_tree_pubkey;
let mut test_indexer =
TestIndexer::<ProgramTestRpcConnection>::init_from_env(&payer, &env, None).await;
let sender = Keypair::new();
airdrop_lamports(&mut rpc, &sender.pubkey(), 1_000_000_000)
.await
.unwrap();
let delegate = Keypair::new();
airdrop_lamports(&mut rpc, &delegate.pubkey(), 1_000_000_000)
.await
.unwrap();
let mint = if is_token_22 {
create_mint_22_helper(&mut rpc, &payer).await
} else {
create_mint_helper(&mut rpc, &payer).await
};
mint_tokens_22_helper_with_lamports(
&mut rpc,
&mut test_indexer,
&merkle_tree_pubkey,
&payer,
&mint,
vec![mint_amount],
vec![sender.pubkey()],
Some(1_000_000),
is_token_22,
)
.await;
// 1. Freeze tokens
{
let input_compressed_accounts =
test_indexer.get_compressed_token_accounts_by_owner(&sender.pubkey());
let output_merkle_tree = input_compressed_accounts[0]
.compressed_account
.merkle_context
.merkle_tree_pubkey;
freeze_test(
&payer,
&mut rpc,
&mut test_indexer,
input_compressed_accounts,
&output_merkle_tree,
None,
)
.await;
}
// 2. Thaw tokens
{
let input_compressed_accounts =
test_indexer.get_compressed_token_accounts_by_owner(&sender.pubkey());
let input_compressed_accounts = input_compressed_accounts
.iter()
.filter(|x| x.token_data.state == AccountState::Frozen)
.cloned()
.collect::<Vec<TokenDataWithContext>>();
let output_merkle_tree = input_compressed_accounts[0]
.compressed_account
.merkle_context
.merkle_tree_pubkey;
thaw_test(
&payer,
&mut rpc,
&mut test_indexer,
input_compressed_accounts,
&output_merkle_tree,
None,
)
.await;
}
// 3. Delegate tokens
{
let input_compressed_accounts =
test_indexer.get_compressed_token_accounts_by_owner(&sender.pubkey());
let delegated_compressed_account_merkle_tree = input_compressed_accounts[0]
.compressed_account
.merkle_context
.merkle_tree_pubkey;
approve_test(
&sender,
&mut rpc,
&mut test_indexer,
input_compressed_accounts,
delegated_amount,
None,
&delegate.pubkey(),
&delegated_compressed_account_merkle_tree,
&delegated_compressed_account_merkle_tree,
None,
)
.await;
}
// 4. Freeze delegated tokens
{
let input_compressed_accounts =
test_indexer.get_compressed_token_accounts_by_owner(&sender.pubkey());
let output_merkle_tree = input_compressed_accounts[0]
.compressed_account
.merkle_context
.merkle_tree_pubkey;
freeze_test(
&payer,
&mut rpc,
&mut test_indexer,
input_compressed_accounts,
&output_merkle_tree,
None,
)
.await;
}
// 5. Thaw delegated tokens
{
let input_compressed_accounts =
test_indexer.get_compressed_token_accounts_by_owner(&sender.pubkey());
let input_compressed_accounts = input_compressed_accounts
.iter()
.filter(|x| x.token_data.state == AccountState::Frozen)
.cloned()
.collect::<Vec<TokenDataWithContext>>();
let output_merkle_tree = input_compressed_accounts[0]
.compressed_account
.merkle_context
.merkle_tree_pubkey;
thaw_test(
&payer,
&mut rpc,
&mut test_indexer,
input_compressed_accounts,
&output_merkle_tree,
None,
)
.await;
}
}
}
#[tokio::test]
async fn test_freeze_and_thaw_0() {
test_freeze_and_thaw(0, 0).await
}
#[tokio::test]
async fn test_freeze_and_thaw_10000() {
test_freeze_and_thaw(10000, 1000).await
}
/// Failing tests:
/// 1. Invalid authority.
/// 2. Invalid Merkle tree.
/// 3. Invalid proof.
/// 4. Freeze frozen compressed account.
#[tokio::test]
async fn test_failing_freeze() {
spawn_prover(
false,
ProverConfig {
run_mode: None,
circuits: vec![ProofType::Inclusion],
},
)
.await;
for is_token_22 in [false, true] {
let (mut rpc, env) = setup_test_programs_with_accounts(None).await;
let payer = rpc.get_payer().insecure_clone();
let merkle_tree_pubkey = env.merkle_tree_pubkey;
let mut test_indexer =
TestIndexer::<ProgramTestRpcConnection>::init_from_env(&payer, &env, None).await;
let sender = Keypair::new();
airdrop_lamports(&mut rpc, &sender.pubkey(), 1_000_000_000)
.await
.unwrap();
let delegate = Keypair::new();
airdrop_lamports(&mut rpc, &delegate.pubkey(), 1_000_000_000)
.await
.unwrap();
let mint = if is_token_22 {
create_mint_22_helper(&mut rpc, &payer).await
} else {
create_mint_helper(&mut rpc, &payer).await
};
let amount = 10000u64;
mint_tokens_22_helper_with_lamports(
&mut rpc,
&mut test_indexer,
&merkle_tree_pubkey,
&payer,
&mint,
vec![amount; 3],
vec![sender.pubkey(); 3],
None,
is_token_22,
)
.await;
let input_compressed_accounts =
vec![test_indexer.get_compressed_token_accounts_by_owner(&sender.pubkey())[0].clone()];
let outputs_merkle_tree = input_compressed_accounts[0]
.compressed_account
.merkle_context
.merkle_tree_pubkey;
let input_compressed_account_hashes = input_compressed_accounts
.iter()
.map(|x| x.compressed_account.hash().unwrap())
.collect::<Vec<_>>();
let input_merkle_tree_pubkeys = input_compressed_accounts
.iter()
.map(|x| x.compressed_account.merkle_context.merkle_tree_pubkey)
.collect::<Vec<_>>();
let proof_rpc_result = test_indexer
.create_proof_for_compressed_accounts(
Some(&input_compressed_account_hashes),
Some(&input_merkle_tree_pubkeys),
None,
None,
&mut rpc,
)
.await;
let context_payer = rpc.get_payer().insecure_clone();
// 1. Invalid authority.
{
let invalid_authority = Keypair::new();
let inputs = CreateInstructionInputs {
fee_payer: rpc.get_payer().pubkey(),
authority: invalid_authority.pubkey(),
input_merkle_contexts: input_compressed_accounts
.iter()
.map(|x| x.compressed_account.merkle_context)
.collect(),
input_token_data: input_compressed_accounts
.iter()
.map(|x| x.token_data.clone())
.collect(),
input_compressed_accounts: input_compressed_accounts
.iter()
.map(|x| &x.compressed_account.compressed_account)
.cloned()
.collect::<Vec<_>>(),
outputs_merkle_tree,
root_indices: proof_rpc_result.root_indices.clone(),
proof: proof_rpc_result.proof.clone(),
};
let instruction = create_instruction::<true>(inputs).unwrap();
let result = rpc
.create_and_send_transaction(
&[instruction],
&payer.pubkey(),
&[&context_payer, &invalid_authority],
)
.await;
assert_rpc_error(result, 0, ErrorCode::InvalidFreezeAuthority.into()).unwrap();
}
// 2. Invalid Merkle tree.
{
let invalid_merkle_tree = Keypair::new();
let inputs = CreateInstructionInputs {
fee_payer: rpc.get_payer().pubkey(),
authority: payer.pubkey(),
input_merkle_contexts: input_compressed_accounts
.iter()
.map(|x| x.compressed_account.merkle_context)
.collect(),
input_token_data: input_compressed_accounts
.iter()
.map(|x| x.token_data.clone())
.collect(),
input_compressed_accounts: input_compressed_accounts
.iter()
.map(|x| &x.compressed_account.compressed_account)
.cloned()
.collect::<Vec<_>>(),
outputs_merkle_tree: invalid_merkle_tree.pubkey(),
root_indices: proof_rpc_result.root_indices.clone(),
proof: proof_rpc_result.proof.clone(),
};
let instruction = create_instruction::<true>(inputs).unwrap();
let result = rpc
.create_and_send_transaction(
&[instruction],
&payer.pubkey(),
&[&context_payer, &payer],
)
.await;
// Anchor panics when trying to read the MT account. Unfortunately
// there is no specific error code to assert.
assert!(matches!(
result,
Err(RpcError::TransactionError(
TransactionError::InstructionError(
0,
InstructionError::ProgramFailedToComplete
)
))
));
}
// 3. Invalid proof.
{
let invalid_proof = CompressedProof {
a: [0; 32],
b: [0; 64],
c: [0; 32],
};
let inputs = CreateInstructionInputs {
fee_payer: rpc.get_payer().pubkey(),
authority: payer.pubkey(),
input_merkle_contexts: input_compressed_accounts
.iter()
.map(|x| x.compressed_account.merkle_context)
.collect(),
input_token_data: input_compressed_accounts
.iter()
.map(|x| x.token_data.clone())
.collect(),
input_compressed_accounts: input_compressed_accounts
.iter()
.map(|x| &x.compressed_account.compressed_account)
.cloned()
.collect::<Vec<_>>(),
outputs_merkle_tree,
root_indices: proof_rpc_result.root_indices.clone(),
proof: invalid_proof,
};
let instruction = create_instruction::<true>(inputs).unwrap();
let result = rpc
.create_and_send_transaction(
&[instruction],
&payer.pubkey(),
&[&context_payer, &payer],
)
.await;
assert_rpc_error(result, 0, VerifierError::ProofVerificationFailed.into()).unwrap();
}
// 4. Freeze frozen compressed account
{
freeze_test(
&payer,
&mut rpc,
&mut test_indexer,
input_compressed_accounts,
&outputs_merkle_tree,
None,
)
.await;
let input_compressed_accounts = vec![test_indexer
.get_compressed_token_accounts_by_owner(&sender.pubkey())
.iter()
.filter(|x| x.token_data.state == AccountState::Frozen)
.cloned()
.collect::<Vec<TokenDataWithContext>>()[0]
.clone()];
let outputs_merkle_tree = input_compressed_accounts[0]
.compressed_account
.merkle_context
.merkle_tree_pubkey;
let input_compressed_account_hashes = input_compressed_accounts
.iter()
.map(|x| x.compressed_account.hash().unwrap())
.collect::<Vec<_>>();
let input_merkle_tree_pubkeys = input_compressed_accounts
.iter()
.map(|x| x.compressed_account.merkle_context.merkle_tree_pubkey)
.collect::<Vec<_>>();
let proof_rpc_result = test_indexer
.create_proof_for_compressed_accounts(
Some(&input_compressed_account_hashes),
Some(&input_merkle_tree_pubkeys),
None,
None,
&mut rpc,
)
.await;
let inputs = CreateInstructionInputs {
fee_payer: rpc.get_payer().pubkey(),
authority: payer.pubkey(),
input_merkle_contexts: input_compressed_accounts
.iter()
.map(|x| x.compressed_account.merkle_context)
.collect(),
input_token_data: input_compressed_accounts
.iter()
.map(|x| x.token_data.clone())
.collect(),
input_compressed_accounts: input_compressed_accounts
.iter()
.map(|x| &x.compressed_account.compressed_account)
.cloned()
.collect::<Vec<_>>(),
outputs_merkle_tree,
root_indices: proof_rpc_result.root_indices.clone(),
proof: proof_rpc_result.proof.clone(),
};
let instruction = create_instruction::<true>(inputs).unwrap();
let result = rpc
.create_and_send_transaction(
&[instruction],
&payer.pubkey(),
&[&context_payer, &payer],
)
.await;
assert_rpc_error(result, 0, VerifierError::ProofVerificationFailed.into()).unwrap();
}
}
}
/// Failing tests:
/// 1. Invalid authority.
/// 2. Invalid Merkle tree.
/// 3. Invalid proof.
/// 4. thaw compressed account which is not frozen
#[tokio::test]
async fn test_failing_thaw() {
spawn_prover(
false,
ProverConfig {
run_mode: None,
circuits: vec![ProofType::Inclusion],
},
)
.await;
for is_token_22 in [false, true] {
let (mut rpc, env) = setup_test_programs_with_accounts(None).await;
let payer = rpc.get_payer().insecure_clone();
let merkle_tree_pubkey = env.merkle_tree_pubkey;
let mut test_indexer =
TestIndexer::<ProgramTestRpcConnection>::init_from_env(&payer, &env, None).await;
let sender = Keypair::new();
airdrop_lamports(&mut rpc, &sender.pubkey(), 1_000_000_000)
.await
.unwrap();
let delegate = Keypair::new();
airdrop_lamports(&mut rpc, &delegate.pubkey(), 1_000_000_000)
.await
.unwrap();
let mint = if is_token_22 {
create_mint_22_helper(&mut rpc, &payer).await
} else {
create_mint_helper(&mut rpc, &payer).await
};
let amount = 10000u64;
mint_tokens_22_helper_with_lamports(
&mut rpc,
&mut test_indexer,
&merkle_tree_pubkey,
&payer,
&mint,
vec![amount; 2],
vec![sender.pubkey(); 2],
None,
is_token_22,
)
.await;
// Freeze tokens
{
let input_compressed_accounts = vec![test_indexer
.get_compressed_token_accounts_by_owner(&sender.pubkey())[0]
.clone()];
let output_merkle_tree = input_compressed_accounts[0]
.compressed_account
.merkle_context
.merkle_tree_pubkey;
freeze_test(
&payer,
&mut rpc,
&mut test_indexer,
input_compressed_accounts,
&output_merkle_tree,
None,
)
.await;
}
let input_compressed_accounts =
test_indexer.get_compressed_token_accounts_by_owner(&sender.pubkey());
let input_compressed_accounts = input_compressed_accounts
.iter()
.filter(|x| x.token_data.state == AccountState::Frozen)
.cloned()
.collect::<Vec<TokenDataWithContext>>();
let outputs_merkle_tree = input_compressed_accounts[0]
.compressed_account
.merkle_context
.merkle_tree_pubkey;
let input_compressed_account_hashes = input_compressed_accounts
.iter()
.map(|x| x.compressed_account.hash().unwrap())
.collect::<Vec<_>>();
let input_merkle_tree_pubkeys = input_compressed_accounts
.iter()
.map(|x| x.compressed_account.merkle_context.merkle_tree_pubkey)
.collect::<Vec<_>>();
let proof_rpc_result = test_indexer
.create_proof_for_compressed_accounts(
Some(&input_compressed_account_hashes),
Some(&input_merkle_tree_pubkeys),
None,
None,
&mut rpc,
)
.await;
let context_payer = rpc.get_payer().insecure_clone();
// 1. Invalid authority.
{
let invalid_authority = Keypair::new();
let inputs = CreateInstructionInputs {
fee_payer: rpc.get_payer().pubkey(),
authority: invalid_authority.pubkey(),
input_merkle_contexts: input_compressed_accounts
.iter()
.map(|x| x.compressed_account.merkle_context)
.collect(),
input_token_data: input_compressed_accounts
.iter()
.map(|x| x.token_data.clone())
.collect(),
input_compressed_accounts: input_compressed_accounts
.iter()
.map(|x| &x.compressed_account.compressed_account)
.cloned()
.collect::<Vec<_>>(),
outputs_merkle_tree,
root_indices: proof_rpc_result.root_indices.clone(),
proof: proof_rpc_result.proof.clone(),
};
let instruction = create_instruction::<false>(inputs).unwrap();
let result = rpc
.create_and_send_transaction(
&[instruction],
&payer.pubkey(),
&[&context_payer, &invalid_authority],
)
.await;
assert_rpc_error(result, 0, ErrorCode::InvalidFreezeAuthority.into()).unwrap();
}
// 2. Invalid Merkle tree.
{
let invalid_merkle_tree = Keypair::new();
let inputs = CreateInstructionInputs {
fee_payer: rpc.get_payer().pubkey(),
authority: payer.pubkey(),
input_merkle_contexts: input_compressed_accounts
.iter()
.map(|x| x.compressed_account.merkle_context)
.collect(),
input_token_data: input_compressed_accounts
.iter()
.map(|x| x.token_data.clone())
.collect(),
input_compressed_accounts: input_compressed_accounts
.iter()
.map(|x| &x.compressed_account.compressed_account)
.cloned()
.collect::<Vec<_>>(),
outputs_merkle_tree: invalid_merkle_tree.pubkey(),
root_indices: proof_rpc_result.root_indices.clone(),
proof: proof_rpc_result.proof.clone(),
};
let instruction = create_instruction::<false>(inputs).unwrap();
let result = rpc
.create_and_send_transaction(
&[instruction],
&payer.pubkey(),
&[&context_payer, &payer],
)
.await;
// Anchor panics when trying to read the MT account. Unfortunately
// there is no specific error code to assert.
assert!(matches!(
result,
Err(RpcError::TransactionError(
TransactionError::InstructionError(
0,
InstructionError::ProgramFailedToComplete
)
))
));
}
// 3. Invalid proof.
{
let invalid_proof = CompressedProof {
a: [0; 32],
b: [0; 64],
c: [0; 32],
};
let inputs = CreateInstructionInputs {
fee_payer: rpc.get_payer().pubkey(),
authority: payer.pubkey(),
input_merkle_contexts: input_compressed_accounts
.iter()
.map(|x| x.compressed_account.merkle_context)
.collect(),
input_token_data: input_compressed_accounts
.iter()
.map(|x| x.token_data.clone())
.collect(),
input_compressed_accounts: input_compressed_accounts
.iter()
.map(|x| &x.compressed_account.compressed_account)
.cloned()
.collect::<Vec<_>>(),
outputs_merkle_tree,
root_indices: proof_rpc_result.root_indices.clone(),
proof: invalid_proof,
};
let instruction = create_instruction::<false>(inputs).unwrap();
let result = rpc
.create_and_send_transaction(
&[instruction],
&payer.pubkey(),
&[&context_payer, &payer],
)
.await;
assert_rpc_error(result, 0, VerifierError::ProofVerificationFailed.into()).unwrap();
}
// 4. thaw compressed account which is not frozen
{
let input_compressed_accounts =
test_indexer.get_compressed_token_accounts_by_owner(&sender.pubkey());
let input_compressed_accounts = input_compressed_accounts
.iter()
.filter(|x| x.token_data.state == AccountState::Initialized)
.cloned()
.collect::<Vec<TokenDataWithContext>>();
let outputs_merkle_tree = input_compressed_accounts[0]
.compressed_account
.merkle_context
.merkle_tree_pubkey;
let input_compressed_account_hashes = input_compressed_accounts
.iter()
.map(|x| x.compressed_account.hash().unwrap())
.collect::<Vec<_>>();
let input_merkle_tree_pubkeys = input_compressed_accounts
.iter()
.map(|x| x.compressed_account.merkle_context.merkle_tree_pubkey)
.collect::<Vec<_>>();
let proof_rpc_result = test_indexer
.create_proof_for_compressed_accounts(
Some(&input_compressed_account_hashes),
Some(&input_merkle_tree_pubkeys),
None,
None,
&mut rpc,
)
.await;
let inputs = CreateInstructionInputs {
fee_payer: rpc.get_payer().pubkey(),
authority: payer.pubkey(),
input_merkle_contexts: input_compressed_accounts
.iter()
.map(|x| x.compressed_account.merkle_context)
.collect(),
input_token_data: input_compressed_accounts
.iter()
.map(|x| x.token_data.clone())
.collect(),
input_compressed_accounts: input_compressed_accounts
.iter()
.map(|x| &x.compressed_account.compressed_account)
.cloned()
.collect::<Vec<_>>(),
outputs_merkle_tree,
root_indices: proof_rpc_result.root_indices.clone(),
proof: proof_rpc_result.proof.clone(),
};
let instruction = create_instruction::<false>(inputs).unwrap();
let result = rpc
.create_and_send_transaction(
&[instruction],
&payer.pubkey(),
&[&context_payer, &payer],
)
.await;
assert_rpc_error(result, 0, VerifierError::ProofVerificationFailed.into()).unwrap();
}
}
}
/// Failing tests:
/// 1. Invalid decompress account
/// 2. Invalid token pool pda
/// 3. Invalid decompression amount -1
/// 4. Invalid decompression amount +1
/// 5. Invalid decompression amount 0
/// 6: invalid token recipient
/// 7. Invalid compression amount -1
/// 8. Invalid compression amount +1
/// 9. Invalid compression amount 0
#[tokio::test]
async fn test_failing_decompression() {
spawn_prover(
true,
ProverConfig {
run_mode: None,
circuits: vec![ProofType::Inclusion],
},
)
.await;
for is_token_22 in vec![false, true] {
let (mut context, env) = setup_test_programs_with_accounts(None).await;
let payer = context.get_payer().insecure_clone();
let merkle_tree_pubkey = env.merkle_tree_pubkey;
let mut test_indexer =
TestIndexer::<ProgramTestRpcConnection>::init_from_env(&payer, &env, None).await;
let sender = Keypair::new();
airdrop_lamports(&mut context, &sender.pubkey(), 1_000_000_000)
.await
.unwrap();
let mint = if is_token_22 {
create_mint_22_helper(&mut context, &payer).await
} else {
create_mint_helper(&mut context, &payer).await
};
let amount = 10000u64;
mint_tokens_22_helper_with_lamports(
&mut context,
&mut test_indexer,
&merkle_tree_pubkey,
&payer,
&mint,
vec![amount],
vec![sender.pubkey()],
None,
is_token_22,
)
.await;
let token_account_keypair = Keypair::new();
create_token_2022_account(
&mut context,
&mint,
&token_account_keypair,
&sender,
is_token_22,
)
.await
.unwrap();
let input_compressed_account =
test_indexer.get_compressed_token_accounts_by_owner(&sender.pubkey());
let decompress_amount = amount - 1000;
// Test 1: invalid decompress account
{
let invalid_token_account = mint;
failing_compress_decompress(
&sender,
&mut context,
&mut test_indexer,
input_compressed_account.clone(),
decompress_amount, // needs to be consistent with compression amount
&merkle_tree_pubkey,
decompress_amount,
false,
&invalid_token_account,
Some(get_token_pool_pda(&mint)),
&mint,
0, //ProgramError::InvalidAccountData.into(), error code 17179869184 does not fit u32
is_token_22,
)
.await
.unwrap_err();
}
// Test 2: invalid token pool pda (compress and decompress)
{
let invalid_token_account_keypair = Keypair::new();
create_token_2022_account(
&mut context,
&mint,
&invalid_token_account_keypair,
&payer,
is_token_22,
)
.await
.unwrap();
failing_compress_decompress(
&sender,
&mut context,
&mut test_indexer,
input_compressed_account.clone(),
decompress_amount, // needs to be consistent with compression amount
&merkle_tree_pubkey,
decompress_amount,
false,
&token_account_keypair.pubkey(),
Some(invalid_token_account_keypair.pubkey()),
&mint,
ErrorCode::InvalidTokenPoolPda.into(),
is_token_22,
)
.await
.unwrap();
let invalid_token_account_keypair = Keypair::new();
create_token_2022_account(
&mut context,
&mint,
&invalid_token_account_keypair,
&payer,
is_token_22,
)
.await
.unwrap();
failing_compress_decompress(
&sender,
&mut context,
&mut test_indexer,
input_compressed_account.clone(),
0, // needs to be consistent with compression amount
&merkle_tree_pubkey,
0,
true,
&token_account_keypair.pubkey(),
Some(invalid_token_account_keypair.pubkey()),
&mint,
ErrorCode::InvalidTokenPoolPda.into(),
is_token_22,
)
.await
.unwrap();
}
// Test 3: invalid compression amount -1
{
failing_compress_decompress(
&sender,
&mut context,
&mut test_indexer,
input_compressed_account.clone(),
decompress_amount, // needs to be consistent with compression amount
&merkle_tree_pubkey,
decompress_amount - 1,
false,
&token_account_keypair.pubkey(),
Some(get_token_pool_pda(&mint)),
&mint,
ErrorCode::SumCheckFailed.into(),
is_token_22,
)
.await
.unwrap();
}
// Test 4: invalid compression amount + 1
{
failing_compress_decompress(
&sender,
&mut context,
&mut test_indexer,
input_compressed_account.clone(),
decompress_amount, // needs to be consistent with compression amount
&merkle_tree_pubkey,
decompress_amount + 1,
false,
&token_account_keypair.pubkey(),
Some(get_token_pool_pda(&mint)),
&mint,
ErrorCode::ComputeOutputSumFailed.into(),
is_token_22,
)
.await
.unwrap();
}
// Test 5: invalid compression amount 0
{
failing_compress_decompress(
&sender,
&mut context,
&mut test_indexer,
input_compressed_account.clone(),
decompress_amount, // needs to be consistent with compression amount
&merkle_tree_pubkey,
0,
false,
&token_account_keypair.pubkey(),
Some(get_token_pool_pda(&mint)),
&mint,
ErrorCode::SumCheckFailed.into(),
is_token_22,
)
.await
.unwrap();
}
// Test 6: invalid token recipient
{
failing_compress_decompress(
&sender,
&mut context,
&mut test_indexer,
input_compressed_account.clone(),
decompress_amount, // needs to be consistent with compression amount
&merkle_tree_pubkey,
decompress_amount,
false,
&get_token_pool_pda(&mint),
Some(get_token_pool_pda(&mint)),
&mint,
ErrorCode::IsTokenPoolPda.into(),
is_token_22,
)
.await
.unwrap();
}
// functional so that we have tokens to compress
decompress_test(
&sender,
&mut context,
&mut test_indexer,
input_compressed_account,
amount,
&merkle_tree_pubkey,
&token_account_keypair.pubkey(),
None,
is_token_22,
)
.await;
let compress_amount = decompress_amount - 100;
// Test 7: invalid compression amount -1
{
failing_compress_decompress(
&sender,
&mut context,
&mut test_indexer,
Vec::new(),
compress_amount, // needs to be consistent with compression amount
&merkle_tree_pubkey,
compress_amount - 1,
true,
&token_account_keypair.pubkey(),
Some(get_token_pool_pda(&mint)),
&mint,
ErrorCode::ComputeOutputSumFailed.into(),
is_token_22,
)
.await
.unwrap();
}
// Test 8: invalid compression amount +1
{
failing_compress_decompress(
&sender,
&mut context,
&mut test_indexer,
Vec::new(),
compress_amount, // needs to be consistent with compression amount
&merkle_tree_pubkey,
compress_amount + 1,
true,
&token_account_keypair.pubkey(),
Some(get_token_pool_pda(&mint)),
&mint,
ErrorCode::SumCheckFailed.into(),
is_token_22,
)
.await
.unwrap();
}
// Test 9: invalid compression amount 0
{
failing_compress_decompress(
&sender,
&mut context,
&mut test_indexer,
Vec::new(),
compress_amount, // needs to be consistent with compression amount
&merkle_tree_pubkey,
0,
true,
&token_account_keypair.pubkey(),
Some(get_token_pool_pda(&mint)),
&mint,
ErrorCode::ComputeOutputSumFailed.into(),
is_token_22,
)
.await
.unwrap();
}
// functional
compress_test(
&sender,
&mut context,
&mut test_indexer,
amount,
&mint,
&merkle_tree_pubkey,
&token_account_keypair.pubkey(),
None,
is_token_22,
)
.await;
}
kill_prover();
}
#[allow(clippy::too_many_arguments)]
pub async fn failing_compress_decompress<R: RpcConnection>(
payer: &Keypair,
rpc: &mut R,
test_indexer: &mut TestIndexer<R>,
input_compressed_accounts: Vec<TokenDataWithContext>,
amount: u64,
output_merkle_tree_pubkey: &Pubkey,
compression_amount: u64,
is_compress: bool,
compress_or_decompress_token_account: &Pubkey,
token_pool_pda: Option<Pubkey>,
mint: &Pubkey,
error_code: u32,
is_token_22: bool,
) -> Result<(), RpcError> {
let max_amount: u64 = input_compressed_accounts
.iter()
.map(|x| x.token_data.amount)
.sum();
let change_out_compressed_account = if !is_compress {
TokenTransferOutputData {
amount: max_amount - amount,
owner: payer.pubkey(),
lamports: None,
merkle_tree: *output_merkle_tree_pubkey,
}
} else {
TokenTransferOutputData {
amount: max_amount + amount,
owner: payer.pubkey(),
lamports: None,
merkle_tree: *output_merkle_tree_pubkey,
}
};
let input_compressed_account_hashes = input_compressed_accounts
.iter()
.map(|x| x.compressed_account.hash().unwrap())
.collect::<Vec<_>>();
let input_merkle_tree_pubkeys = input_compressed_accounts
.iter()
.map(|x| x.compressed_account.merkle_context.merkle_tree_pubkey)
.collect::<Vec<_>>();
let (root_indices, proof) = if !input_compressed_account_hashes.is_empty() {
let proof_rpc_result = test_indexer
.create_proof_for_compressed_accounts(
Some(&input_compressed_account_hashes),
Some(&input_merkle_tree_pubkeys),
None,
None,
rpc,
)
.await;
(proof_rpc_result.root_indices, Some(proof_rpc_result.proof))
} else {
(Vec::new(), None)
};
let instruction = create_transfer_instruction(
&rpc.get_payer().pubkey(),
&payer.pubkey(),
&input_compressed_accounts
.iter()
.map(|x| x.compressed_account.merkle_context)
.collect::<Vec<_>>(),
&[change_out_compressed_account],
&root_indices,
&proof,
input_compressed_accounts
.iter()
.map(|x| x.token_data.clone())
.collect::<Vec<_>>()
.as_slice(),
&input_compressed_accounts
.iter()
.map(|x| &x.compressed_account.compressed_account)
.cloned()
.collect::<Vec<_>>(),
*mint,
None,
is_compress,
Some(compression_amount),
token_pool_pda,
Some(*compress_or_decompress_token_account),
true,
None,
None,
is_token_22,
)
.unwrap();
let instructions = if !is_compress {
vec![instruction]
} else {
let approve_instruction = if is_token_22 {
spl_token_2022::instruction::approve(
&spl_token_2022::ID,
compress_or_decompress_token_account,
&get_cpi_authority_pda().0,
&payer.pubkey(),
&[&payer.pubkey()],
amount,
)
.unwrap()
} else {
spl_token::instruction::approve(
&anchor_spl::token::ID,
compress_or_decompress_token_account,
&get_cpi_authority_pda().0,
&payer.pubkey(),
&[&payer.pubkey()],
amount,
)
.unwrap()
};
vec![approve_instruction, instruction]
};
let context_payer = rpc.get_payer().insecure_clone();
let result = rpc
.create_and_send_transaction(
&instructions,
&context_payer.pubkey(),
&[&context_payer, payer],
)
.await;
assert_rpc_error(
result,
instructions.len().saturating_sub(1) as u8,
error_code,
)
}
/// Failing tests:
/// Out utxo tests:
/// 1. Invalid token data amount (+ 1)
/// 2. Invalid token data amount (- 1)
/// 3. Invalid token data zero out amount
/// 4. Invalid double token data amount
/// In utxo tests:
/// 5. Invalid input token data amount (0)
/// 6. Invalid delegate
/// 7. Invalid owner
/// 8. Invalid is native (deactivated, revisit)
/// 9. DelegateUndefined
/// Invalid account state (Frozen is only hashed if frozen thus failing test is not possible)
/// 10. invalid root indices (ProofVerificationFailed)
/// 11. invalid mint (ProofVerificationFailed)
/// 12. invalid Merkle tree pubkey (ProofVerificationFailed)
#[tokio::test]
async fn test_invalid_inputs() {
let (mut rpc, env) = setup_test_programs_with_accounts(None).await;
let payer = rpc.get_payer().insecure_clone();
let merkle_tree_pubkey = env.merkle_tree_pubkey;
let nullifier_queue_pubkey = env.nullifier_queue_pubkey;
let mut test_indexer = TestIndexer::<ProgramTestRpcConnection>::init_from_env(
&payer,
&env,
Some(ProverConfig {
run_mode: None,
circuits: vec![ProofType::Inclusion],
}),
)
.await;
let recipient_keypair = Keypair::new();
airdrop_lamports(&mut rpc, &recipient_keypair.pubkey(), 1_000_000_000)
.await
.unwrap();
let mint = create_mint_helper(&mut rpc, &payer).await;
let amount = 10000u64;
mint_tokens_helper(
&mut rpc,
&mut test_indexer,
&merkle_tree_pubkey,
&payer,
&mint,
vec![amount],
vec![recipient_keypair.pubkey()],
)
.await;
let payer = recipient_keypair.insecure_clone();
let transfer_recipient_keypair = Keypair::new();
let input_compressed_account_token_data =
test_indexer.token_compressed_accounts[0].token_data.clone();
let input_compressed_accounts = vec![test_indexer.token_compressed_accounts[0]
.compressed_account
.clone()];
let proof_rpc_result = test_indexer
.create_proof_for_compressed_accounts(
Some(&[input_compressed_accounts[0].hash().unwrap()]),
Some(&[input_compressed_accounts[0]
.merkle_context
.merkle_tree_pubkey]),
None,
None,
&mut rpc,
)
.await;
let change_out_compressed_account_0 = TokenTransferOutputData {
amount: input_compressed_account_token_data.amount - 1000,
owner: recipient_keypair.pubkey(),
lamports: None,
merkle_tree: merkle_tree_pubkey,
};
let transfer_recipient_out_compressed_account_0 = TokenTransferOutputData {
amount: 1000,
owner: transfer_recipient_keypair.pubkey(),
lamports: None,
merkle_tree: merkle_tree_pubkey,
};
{
let mut transfer_recipient_out_compressed_account_0 =
transfer_recipient_out_compressed_account_0;
transfer_recipient_out_compressed_account_0.amount += 1;
// Test 1: invalid token data amount (+ 1)
let res = perform_transfer_failing_test(
&mut rpc,
change_out_compressed_account_0,
transfer_recipient_out_compressed_account_0,
&merkle_tree_pubkey,
&nullifier_queue_pubkey,
&recipient_keypair,
&Some(proof_rpc_result.proof.clone()),
&proof_rpc_result.root_indices,
&input_compressed_accounts,
false,
)
.await;
assert_custom_error_or_program_error(res, ErrorCode::ComputeOutputSumFailed.into())
.unwrap();
}
// Test 2: invalid token data amount (- 1)
{
let transfer_recipient_out_compressed_account_0 = TokenTransferOutputData {
amount: 1000 - 1,
owner: transfer_recipient_keypair.pubkey(),
lamports: None,
merkle_tree: merkle_tree_pubkey,
};
// invalid token data amount (- 1)
let res = perform_transfer_failing_test(
&mut rpc,
change_out_compressed_account_0,
transfer_recipient_out_compressed_account_0,
&merkle_tree_pubkey,
&nullifier_queue_pubkey,
&recipient_keypair,
&Some(proof_rpc_result.proof.clone()),
&proof_rpc_result.root_indices,
&input_compressed_accounts,
false,
)
.await;
assert_custom_error_or_program_error(res, ErrorCode::SumCheckFailed.into()).unwrap();
}
// Test 3: invalid token data amount (0)
{
let zero_amount = TokenTransferOutputData {
amount: 0,
owner: transfer_recipient_keypair.pubkey(),
lamports: None,
merkle_tree: merkle_tree_pubkey,
};
// invalid token data zero out amount
let res = perform_transfer_failing_test(
&mut rpc,
zero_amount,
zero_amount,
&merkle_tree_pubkey,
&nullifier_queue_pubkey,
&recipient_keypair,
&Some(proof_rpc_result.proof.clone()),
&proof_rpc_result.root_indices,
&input_compressed_accounts,
false,
)
.await;
assert_custom_error_or_program_error(res, ErrorCode::SumCheckFailed.into()).unwrap();
}
// Test 4: invalid token data amount (2x)
{
let double_amount = TokenTransferOutputData {
amount: input_compressed_account_token_data.amount,
owner: transfer_recipient_keypair.pubkey(),
lamports: None,
merkle_tree: merkle_tree_pubkey,
};
// invalid double token data amount
let res = perform_transfer_failing_test(
&mut rpc,
double_amount,
double_amount,
&merkle_tree_pubkey,
&nullifier_queue_pubkey,
&recipient_keypair,
&Some(proof_rpc_result.proof.clone()),
&proof_rpc_result.root_indices,
&input_compressed_accounts,
false,
)
.await;
assert_custom_error_or_program_error(res, ErrorCode::ComputeOutputSumFailed.into())
.unwrap();
}
// Test 4: invalid token data amount (2x)
{
let double_amount = TokenTransferOutputData {
amount: input_compressed_account_token_data.amount,
owner: transfer_recipient_keypair.pubkey(),
lamports: None,
merkle_tree: merkle_tree_pubkey,
};
let res = perform_transfer_failing_test(
&mut rpc,
double_amount,
double_amount,
&merkle_tree_pubkey,
&nullifier_queue_pubkey,
&recipient_keypair,
&Some(proof_rpc_result.proof.clone()),
&proof_rpc_result.root_indices,
&input_compressed_accounts,
false,
)
.await;
assert_custom_error_or_program_error(res, ErrorCode::ComputeOutputSumFailed.into())
.unwrap();
}
// Test 5: invalid input token data amount (0)
{
let mut input_compressed_account_token_data_invalid_amount =
test_indexer.token_compressed_accounts[0].token_data.clone();
input_compressed_account_token_data_invalid_amount.amount = 0;
let mut input_compressed_accounts = vec![test_indexer.token_compressed_accounts[0]
.compressed_account
.clone()];
crate::TokenData::serialize(
&input_compressed_account_token_data_invalid_amount,
&mut input_compressed_accounts[0]
.compressed_account
.data
.as_mut()
.unwrap()
.data
.as_mut_slice(),
)
.unwrap();
let change_out_compressed_account_0 = TokenTransferOutputData {
amount: input_compressed_account_token_data.amount - 1000,
owner: recipient_keypair.pubkey(),
lamports: None,
merkle_tree: merkle_tree_pubkey,
};
let transfer_recipient_out_compressed_account_0 = TokenTransferOutputData {
amount: 1000,
owner: transfer_recipient_keypair.pubkey(),
lamports: None,
merkle_tree: merkle_tree_pubkey,
};
let res = perform_transfer_failing_test(
&mut rpc,
change_out_compressed_account_0,
transfer_recipient_out_compressed_account_0,
&merkle_tree_pubkey,
&nullifier_queue_pubkey,
&recipient_keypair,
&Some(proof_rpc_result.proof.clone()),
&proof_rpc_result.root_indices,
&input_compressed_accounts,
false,
)
.await;
assert_custom_error_or_program_error(res, ErrorCode::ComputeOutputSumFailed.into())
.unwrap();
}
// Test 6: invalid delegate
{
let mut input_compressed_account_token_data =
test_indexer.token_compressed_accounts[0].token_data.clone();
input_compressed_account_token_data.delegate = Some(Pubkey::new_unique());
let mut input_compressed_accounts = vec![test_indexer.token_compressed_accounts[0]
.compressed_account
.clone()];
let mut vec = Vec::new();
crate::TokenData::serialize(&input_compressed_account_token_data, &mut vec).unwrap();
input_compressed_accounts[0]
.compressed_account
.data
.as_mut()
.unwrap()
.data = vec;
let res = perform_transfer_failing_test(
&mut rpc,
change_out_compressed_account_0,
transfer_recipient_out_compressed_account_0,
&merkle_tree_pubkey,
&nullifier_queue_pubkey,
&recipient_keypair,
&Some(proof_rpc_result.proof.clone()),
&proof_rpc_result.root_indices,
&input_compressed_accounts,
false,
)
.await;
assert_custom_error_or_program_error(res, VerifierError::ProofVerificationFailed.into())
.unwrap();
}
// Test 7: invalid owner
{
let invalid_payer = rpc.get_payer().insecure_clone();
let res = perform_transfer_failing_test(
&mut rpc,
change_out_compressed_account_0,
transfer_recipient_out_compressed_account_0,
&merkle_tree_pubkey,
&nullifier_queue_pubkey,
&invalid_payer,
&Some(proof_rpc_result.proof.clone()),
&proof_rpc_result.root_indices,
&input_compressed_accounts,
false,
)
.await;
assert_custom_error_or_program_error(res, VerifierError::ProofVerificationFailed.into())
.unwrap();
}
// Test 10: invalid root indices
{
let mut root_indices = proof_rpc_result.root_indices.clone();
root_indices[0] += 1;
let res = perform_transfer_failing_test(
&mut rpc,
change_out_compressed_account_0,
transfer_recipient_out_compressed_account_0,
&merkle_tree_pubkey,
&nullifier_queue_pubkey,
&payer,
&Some(proof_rpc_result.proof.clone()),
&root_indices,
&input_compressed_accounts,
false,
)
.await;
assert_custom_error_or_program_error(res, VerifierError::ProofVerificationFailed.into())
.unwrap();
}
// Test 11: invalid mint
{
let res = perform_transfer_failing_test(
&mut rpc,
change_out_compressed_account_0,
transfer_recipient_out_compressed_account_0,
&nullifier_queue_pubkey,
&nullifier_queue_pubkey,
&payer,
&Some(proof_rpc_result.proof.clone()),
&proof_rpc_result.root_indices,
&input_compressed_accounts,
true,
)
.await;
assert_custom_error_or_program_error(
res,
anchor_lang::error::ErrorCode::AccountDiscriminatorMismatch.into(),
)
.unwrap();
}
// Test 12: invalid Merkle tree pubkey
{
let res = perform_transfer_failing_test(
&mut rpc,
change_out_compressed_account_0,
transfer_recipient_out_compressed_account_0,
&nullifier_queue_pubkey,
&nullifier_queue_pubkey,
&payer,
&Some(proof_rpc_result.proof.clone()),
&proof_rpc_result.root_indices,
&input_compressed_accounts,
false,
)
.await;
assert_custom_error_or_program_error(
res,
anchor_lang::error::ErrorCode::AccountDiscriminatorMismatch.into(),
)
.unwrap();
}
kill_prover();
}
#[allow(clippy::too_many_arguments)]
async fn perform_transfer_failing_test<R: RpcConnection>(
rpc: &mut R,
change_token_transfer_output: TokenTransferOutputData,
transfer_recipient_token_transfer_output: TokenTransferOutputData,
merkle_tree_pubkey: &Pubkey,
nullifier_queue_pubkey: &Pubkey,
payer: &Keypair,
proof: &Option<CompressedProof>,
root_indices: &[u16],
input_compressed_accounts: &[CompressedAccountWithMerkleContext],
invalid_mint: bool,
) -> Result<solana_sdk::signature::Signature, RpcError> {
let input_compressed_account_token_data: Vec<TokenData> = input_compressed_accounts
.iter()
.map(|x| {
TokenData::deserialize(&mut &x.compressed_account.data.as_ref().unwrap().data[..])
.unwrap()
})
.collect();
let mint = if invalid_mint {
Pubkey::new_unique()
} else {
input_compressed_account_token_data[0].mint
};
let instruction = create_transfer_instruction(
&payer.pubkey(),
&payer.pubkey(),
&input_compressed_accounts
.iter()
.map(|x| MerkleContext {
merkle_tree_pubkey: *merkle_tree_pubkey,
nullifier_queue_pubkey: *nullifier_queue_pubkey,
leaf_index: x.merkle_context.leaf_index,
queue_index: None,
})
.collect::<Vec<MerkleContext>>(),
&[
change_token_transfer_output,
transfer_recipient_token_transfer_output,
],
root_indices,
proof,
input_compressed_account_token_data.as_slice(),
&input_compressed_accounts
.iter()
.map(|x| &x.compressed_account)
.cloned()
.collect::<Vec<_>>(),
mint,
None,
false,
None,
None,
None,
true,
None,
None,
false,
)
.unwrap();
let latest_blockhash = rpc.get_latest_blockhash().await.unwrap();
let transaction = Transaction::new_signed_with_payer(
&[instruction],
Some(&payer.pubkey()),
[&payer].as_slice(),
latest_blockhash,
);
rpc.process_transaction(transaction).await
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/compressed-token-test
|
solana_public_repos/Lightprotocol/light-protocol/test-programs/compressed-token-test/src/lib.rs
|
// placeholder
| 0
|
solana_public_repos/Lightprotocol/light-protocol
|
solana_public_repos/Lightprotocol/light-protocol/heap/Cargo.toml
|
[package]
name = "light-heap"
version = "1.1.0"
description = "Custom heap allocator used in Light Protocol"
repository = "https://github.com/Lightprotocol/light-protocol"
license = "Apache-2.0"
edition = "2021"
[dependencies]
anchor-lang = { workspace = true }
| 0
|
solana_public_repos/Lightprotocol/light-protocol/heap
|
solana_public_repos/Lightprotocol/light-protocol/heap/src/lib.rs
|
use std::{alloc::Layout, mem::size_of, ptr::null_mut};
pub mod bench;
#[cfg(target_os = "solana")]
use anchor_lang::{
prelude::*,
solana_program::entrypoint::{HEAP_LENGTH, HEAP_START_ADDRESS},
};
#[cfg(target_os = "solana")]
#[global_allocator]
pub static GLOBAL_ALLOCATOR: BumpAllocator = BumpAllocator {
start: HEAP_START_ADDRESS as usize,
len: HEAP_LENGTH,
};
#[cfg(target_os = "solana")]
#[error_code]
pub enum HeapError {
#[msg("The provided position to free is invalid.")]
InvalidHeapPos,
}
pub struct BumpAllocator {
pub start: usize,
pub len: usize,
}
impl BumpAllocator {
const RESERVED_MEM: usize = size_of::<*mut u8>();
#[cfg(target_os = "solana")]
pub fn new() -> Self {
Self {
start: HEAP_START_ADDRESS as usize,
len: HEAP_LENGTH,
}
}
/// Returns the current position of the heap.
///
/// # Safety
///
/// This function is unsafe because it returns a raw pointer.
pub unsafe fn pos(&self) -> usize {
let pos_ptr = self.start as *mut usize;
*pos_ptr
}
/// Reset heap start cursor to position.
///
/// # Safety
///
/// Do not use this function if you initialized heap memory after pos which you still need.
pub unsafe fn move_cursor(&self, pos: usize) {
let pos_ptr = self.start as *mut usize;
*pos_ptr = pos;
}
#[cfg(target_os = "solana")]
pub fn log_total_heap(&self, msg: &str) -> u64 {
const HEAP_END_ADDRESS: u64 = HEAP_START_ADDRESS as u64 + HEAP_LENGTH as u64;
let heap_start = unsafe { self.pos() } as u64;
let heap_used = HEAP_END_ADDRESS - heap_start;
msg!("{}: total heap used: {}", msg, heap_used);
heap_used
}
#[cfg(target_os = "solana")]
pub fn get_heap_pos(&self) -> usize {
let heap_start = unsafe { self.pos() } as usize;
heap_start
}
#[cfg(target_os = "solana")]
pub fn free_heap(&self, pos: usize) -> Result<()> {
if pos < self.start + BumpAllocator::RESERVED_MEM || pos > self.start + self.len {
return err!(HeapError::InvalidHeapPos);
}
unsafe { self.move_cursor(pos) };
Ok(())
}
}
unsafe impl std::alloc::GlobalAlloc for BumpAllocator {
#[inline]
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let pos_ptr = self.start as *mut usize;
let mut pos = *pos_ptr;
if pos == 0 {
// First time, set starting position
pos = self.start + self.len;
}
pos = pos.saturating_sub(layout.size());
pos &= !(layout.align().wrapping_sub(1));
if pos < self.start + BumpAllocator::RESERVED_MEM {
return null_mut();
}
*pos_ptr = pos;
pos as *mut u8
}
#[inline]
unsafe fn dealloc(&self, _: *mut u8, _: Layout) {
// no dellaoc in Solana runtime :*(
}
}
#[cfg(test)]
mod test {
use std::{
alloc::{GlobalAlloc, Layout},
mem::size_of,
ptr::null_mut,
};
use super::*;
#[test]
fn test_pos_move_cursor_heap() {
use std::mem::size_of;
{
let heap = [0u8; 128];
let allocator = BumpAllocator {
start: heap.as_ptr() as *const _ as usize,
len: heap.len(),
};
let pos = unsafe { allocator.pos() };
assert_eq!(pos, unsafe { allocator.pos() });
assert_eq!(pos, 0);
let mut pos_64 = 0;
for i in 0..128 - size_of::<*mut u8>() {
if i == 64 {
pos_64 = unsafe { allocator.pos() };
}
let ptr = unsafe {
allocator.alloc(Layout::from_size_align(1, size_of::<u8>()).unwrap())
};
assert_eq!(
ptr as *const _ as usize,
heap.as_ptr() as *const _ as usize + heap.len() - 1 - i
);
assert_eq!(ptr as *const _ as usize, unsafe { allocator.pos() });
}
let pos_128 = unsafe { allocator.pos() };
// free half of the heap
unsafe { allocator.move_cursor(pos_64) };
assert_eq!(pos_64, unsafe { allocator.pos() });
assert_ne!(pos_64 + 1, unsafe { allocator.pos() });
// allocate second half of the heap again
for i in 0..64 - size_of::<*mut u8>() {
let ptr = unsafe {
allocator.alloc(Layout::from_size_align(1, size_of::<u8>()).unwrap())
};
assert_eq!(
ptr as *const _ as usize,
heap.as_ptr() as *const _ as usize + heap.len() - 1 - (i + 64)
);
assert_eq!(ptr as *const _ as usize, unsafe { allocator.pos() });
}
assert_eq!(pos_128, unsafe { allocator.pos() });
// free all of the heap
unsafe { allocator.move_cursor(pos) };
assert_eq!(pos, unsafe { allocator.pos() });
assert_ne!(pos + 1, unsafe { allocator.pos() });
}
}
/// taken from solana-program https://github.com/solana-labs/solana/blob/9a520fd5b42bafefa4815afe3e5390b4ea7482ca/sdk/program/src/entrypoint.rs#L374
#[test]
fn test_bump_allocator() {
// alloc the entire
{
let heap = [0u8; 128];
let allocator = BumpAllocator {
start: heap.as_ptr() as *const _ as usize,
len: heap.len(),
};
for i in 0..128 - size_of::<*mut u8>() {
let ptr = unsafe {
allocator.alloc(Layout::from_size_align(1, size_of::<u8>()).unwrap())
};
assert_eq!(
ptr as *const _ as usize,
heap.as_ptr() as *const _ as usize + heap.len() - 1 - i
);
}
assert_eq!(null_mut(), unsafe {
allocator.alloc(Layout::from_size_align(1, 1).unwrap())
});
}
// check alignment
{
let heap = [0u8; 128];
let allocator = BumpAllocator {
start: heap.as_ptr() as *const _ as usize,
len: heap.len(),
};
let ptr =
unsafe { allocator.alloc(Layout::from_size_align(1, size_of::<u8>()).unwrap()) };
assert_eq!(0, ptr.align_offset(size_of::<u8>()));
let ptr =
unsafe { allocator.alloc(Layout::from_size_align(1, size_of::<u16>()).unwrap()) };
assert_eq!(0, ptr.align_offset(size_of::<u16>()));
let ptr =
unsafe { allocator.alloc(Layout::from_size_align(1, size_of::<u32>()).unwrap()) };
assert_eq!(0, ptr.align_offset(size_of::<u32>()));
let ptr =
unsafe { allocator.alloc(Layout::from_size_align(1, size_of::<u64>()).unwrap()) };
assert_eq!(0, ptr.align_offset(size_of::<u64>()));
let ptr =
unsafe { allocator.alloc(Layout::from_size_align(1, size_of::<u128>()).unwrap()) };
assert_eq!(0, ptr.align_offset(size_of::<u128>()));
let ptr = unsafe { allocator.alloc(Layout::from_size_align(1, 64).unwrap()) };
assert_eq!(0, ptr.align_offset(64));
}
// alloc entire block (minus the pos ptr)
{
let heap = [0u8; 128];
let allocator = BumpAllocator {
start: heap.as_ptr() as *const _ as usize,
len: heap.len(),
};
let ptr =
unsafe { allocator.alloc(Layout::from_size_align(120, size_of::<u8>()).unwrap()) };
assert_ne!(ptr, null_mut());
assert_eq!(0, ptr.align_offset(size_of::<u64>()));
}
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/heap
|
solana_public_repos/Lightprotocol/light-protocol/heap/src/bench.rs
|
#[macro_export]
macro_rules! bench_sbf_start {
($custom_msg:literal) => {
// Conditionally compile only if on Solana OS and feature "bench-sbf" is enabled
#[cfg(all(target_os = "solana", feature = "bench-sbf"))]
{
// Log the total heap with a custom message indicating the start
light_heap::GLOBAL_ALLOCATOR
.log_total_heap(format!("{}_start_bench_cu", $custom_msg).as_str());
// Log the number of compute units used
anchor_lang::solana_program::log::sol_log_compute_units();
}
};
}
#[macro_export]
macro_rules! bench_sbf_end {
($custom_msg:literal) => {
// Conditionally compile only if on Solana OS and feature "bench-sbf" is enabled
#[cfg(all(target_os = "solana", feature = "bench-sbf"))]
{
// Log the total heap with a custom message indicating the end
light_heap::GLOBAL_ALLOCATOR
.log_total_heap(format!("{}_end_bench_cu", $custom_msg).as_str());
// Log the number of compute units used
anchor_lang::solana_program::log::sol_log_compute_units();
}
};
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol
|
solana_public_repos/Lightprotocol/light-protocol/light-program-test/Cargo.toml
|
[package]
name = "light-program-test"
version = "0.1.0"
edition = "2021"
[features]
default = []
devenv = []
[dependencies]
light-client = { workspace = true }
light-prover-client = { workspace = true }
light-sdk = { workspace = true }
light-indexed-merkle-tree = { workspace = true }
light-merkle-tree-reference = { workspace = true }
light-hasher = { workspace = true }
light-registry = { workspace = true }
light-system-program = { workspace = true }
light-compressed-token = { workspace = true }
light-utils = { workspace = true }
account-compression = { workspace = true }
forester-utils = { workspace = true }
solana-sdk = { workspace = true }
solana-banks-client = { workspace = true }
solana-program-test = { workspace = true }
log = { workspace = true }
borsh = { workspace = true }
tokio = { workspace = true }
async-trait = { workspace = true }
num-bigint = { workspace = true }
num-traits = { workspace = true }
reqwest = { workspace = true }
| 0
|
solana_public_repos/Lightprotocol/light-protocol/light-program-test
|
solana_public_repos/Lightprotocol/light-protocol/light-program-test/src/test_env.rs
|
use crate::env_accounts;
use crate::test_rpc::ProgramTestRpcConnection;
use account_compression::sdk::create_initialize_address_merkle_tree_and_queue_instruction;
use account_compression::utils::constants::GROUP_AUTHORITY_SEED;
use account_compression::{
sdk::create_initialize_merkle_tree_instruction, GroupAuthority, RegisteredProgram,
};
use account_compression::{AddressMerkleTreeConfig, AddressQueueConfig};
use account_compression::{NullifierQueueConfig, StateMerkleTreeConfig};
use forester_utils::forester_epoch::{Epoch, TreeAccounts, TreeType};
use forester_utils::registry::register_test_forester;
use forester_utils::{airdrop_lamports, create_account_instruction};
use light_client::rpc::errors::RpcError;
use light_client::rpc::solana_rpc::SolanaRpcUrl;
use light_client::rpc::{RpcConnection, SolanaRpcConnection};
use light_registry::account_compression_cpi::sdk::get_registered_program_pda;
use light_registry::protocol_config::state::ProtocolConfig;
use light_registry::sdk::{
create_deregister_program_instruction, create_finalize_registration_instruction,
create_initialize_governance_authority_instruction,
create_initialize_group_authority_instruction, create_register_program_instruction,
create_update_protocol_config_instruction,
};
use light_registry::utils::{
get_cpi_authority_pda, get_forester_pda, get_protocol_config_pda_address,
};
use light_registry::ForesterConfig;
use solana_program_test::{ProgramTest, ProgramTestContext};
use solana_sdk::signature::{read_keypair_file, Signature};
use solana_sdk::{
pubkey, pubkey::Pubkey, signature::Keypair, signature::Signer, system_instruction,
transaction::Transaction,
};
use std::path::PathBuf;
pub const CPI_CONTEXT_ACCOUNT_RENT: u64 = 143487360; // lamports of the cpi context account
pub const NOOP_PROGRAM_ID: Pubkey = pubkey!("noopb9bkMVfRPU8AsbpTUg8AQkHtKwMYZiFUjNRtMmV");
/// Setup test programs
/// deploys:
/// 1. light_registry program
/// 2. account_compression program
/// 3. light_compressed_token program
/// 4. light_system_program program
pub async fn setup_test_programs(
additional_programs: Option<Vec<(String, Pubkey)>>,
) -> ProgramTestContext {
let mut program_test = ProgramTest::default();
let sbf_path = std::env::var("SBF_OUT_DIR").unwrap();
// find path to bin where light cli stores program binaries.
let path = find_light_bin().unwrap();
std::env::set_var("SBF_OUT_DIR", path.to_str().unwrap());
program_test.add_program("light_registry", light_registry::ID, None);
program_test.add_program("account_compression", account_compression::ID, None);
program_test.add_program("light_compressed_token", light_compressed_token::ID, None);
program_test.add_program("light_system_program", light_system_program::ID, None);
program_test.add_program("spl_noop", NOOP_PROGRAM_ID, None);
std::env::set_var("SBF_OUT_DIR", sbf_path);
let registered_program = env_accounts::get_registered_program_pda();
program_test.add_account(
get_registered_program_pda(&light_system_program::ID),
registered_program,
);
let registered_program = env_accounts::get_registered_registry_program_pda();
program_test.add_account(
get_registered_program_pda(&light_registry::ID),
registered_program,
);
if let Some(programs) = additional_programs {
for (name, id) in programs {
program_test.add_program(&name, id, None);
}
}
program_test.set_compute_max_units(1_400_000u64);
program_test.start_with_context().await
}
fn find_light_bin() -> Option<PathBuf> {
// Run the 'which light' command to find the location of 'light' binary
#[cfg(not(feature = "devenv"))]
{
println!("Running 'which light' (feature 'devenv' is not enabled)");
use std::process::Command;
let output = Command::new("which")
.arg("light")
.output()
.expect("Failed to execute 'which light'");
if !output.status.success() {
return None;
}
// Convert the output into a string (removing any trailing newline)
let light_path = String::from_utf8_lossy(&output.stdout).trim().to_string();
// Get the parent directory of the 'light' binary
let mut light_bin_path = PathBuf::from(light_path);
light_bin_path.pop(); // Remove the 'light' binary itself
// Assuming the node_modules path starts from '/lib/node_modules/...'
let node_modules_bin =
light_bin_path.join("../lib/node_modules/@lightprotocol/zk-compression-cli/bin");
Some(node_modules_bin.canonicalize().unwrap_or(node_modules_bin))
}
#[cfg(feature = "devenv")]
{
println!("Using 'git rev-parse --show-toplevel' to find the location of 'light' binary");
let light_protocol_toplevel = String::from_utf8_lossy(
&std::process::Command::new("git")
.arg("rev-parse")
.arg("--show-toplevel")
.output()
.expect("Failed to get top-level directory")
.stdout,
)
.trim()
.to_string();
let light_path = PathBuf::from(format!("{}/target/deploy/", light_protocol_toplevel));
Some(light_path)
}
}
#[derive(Debug)]
pub struct EnvAccounts {
pub merkle_tree_pubkey: Pubkey,
pub nullifier_queue_pubkey: Pubkey,
pub governance_authority: Keypair,
pub governance_authority_pda: Pubkey,
pub group_pda: Pubkey,
pub forester: Keypair,
pub registered_program_pda: Pubkey,
pub registered_registry_program_pda: Pubkey,
pub address_merkle_tree_pubkey: Pubkey,
pub address_merkle_tree_queue_pubkey: Pubkey,
pub cpi_context_account_pubkey: Pubkey,
pub registered_forester_pda: Pubkey,
pub forester_epoch: Option<Epoch>,
}
impl EnvAccounts {
pub fn get_local_test_validator_accounts() -> EnvAccounts {
EnvAccounts {
merkle_tree_pubkey: pubkey!("smt1NamzXdq4AMqS2fS2F1i5KTYPZRhoHgWx38d8WsT"),
nullifier_queue_pubkey: pubkey!("nfq1NvQDJ2GEgnS8zt9prAe8rjjpAW1zFkrvZoBR148"),
governance_authority: Keypair::from_bytes(&PAYER_KEYPAIR).unwrap(),
governance_authority_pda: Pubkey::default(),
group_pda: Pubkey::default(),
forester: Keypair::new(),
registered_program_pda: get_registered_program_pda(&light_system_program::ID),
registered_registry_program_pda: get_registered_program_pda(&light_registry::ID),
address_merkle_tree_pubkey: pubkey!("amt1Ayt45jfbdw5YSo7iz6WZxUmnZsQTYXy82hVwyC2"),
address_merkle_tree_queue_pubkey: pubkey!(
"aq1S9z4reTSQAdgWHGD2zDaS39sjGrAxbR31vxJ2F4F"
),
cpi_context_account_pubkey: pubkey!("cpi1uHzrEhBG733DoEJNgHCyRS3XmmyVNZx5fonubE4"),
registered_forester_pda: Pubkey::default(),
forester_epoch: None, // Set to None or to an appropriate Epoch value if needed
}
}
}
#[derive(Debug)]
pub struct EnvAccountKeypairs {
pub state_merkle_tree: Keypair,
pub nullifier_queue: Keypair,
pub governance_authority: Keypair,
pub forester: Keypair,
pub address_merkle_tree: Keypair,
pub address_merkle_tree_queue: Keypair,
pub cpi_context_account: Keypair,
pub system_program: Keypair,
pub registry_program: Keypair,
}
impl EnvAccountKeypairs {
pub fn program_test_default() -> EnvAccountKeypairs {
EnvAccountKeypairs {
state_merkle_tree: Keypair::from_bytes(&MERKLE_TREE_TEST_KEYPAIR).unwrap(),
nullifier_queue: Keypair::from_bytes(&NULLIFIER_QUEUE_TEST_KEYPAIR).unwrap(),
governance_authority: Keypair::from_bytes(&PAYER_KEYPAIR).unwrap(),
forester: Keypair::from_bytes(&FORESTER_TEST_KEYPAIR).unwrap(),
address_merkle_tree: Keypair::from_bytes(&ADDRESS_MERKLE_TREE_TEST_KEYPAIR).unwrap(),
address_merkle_tree_queue: Keypair::from_bytes(&ADDRESS_MERKLE_TREE_QUEUE_TEST_KEYPAIR)
.unwrap(),
cpi_context_account: Keypair::from_bytes(&SIGNATURE_CPI_TEST_KEYPAIR).unwrap(),
system_program: Keypair::from_bytes(&OLD_SYSTEM_PROGRAM_ID_TEST_KEYPAIR).unwrap(),
registry_program: Keypair::from_bytes(&OLD_REGISTRY_ID_TEST_KEYPAIR).unwrap(),
}
}
pub fn from_target_folder() -> EnvAccountKeypairs {
let prefix = String::from("../../../light-keypairs/");
let target_prefix = String::from("../../target/");
let state_merkle_tree = read_keypair_file(format!(
"{}smt1NamzXdq4AMqS2fS2F1i5KTYPZRhoHgWx38d8WsT.json",
prefix
))
.unwrap();
let nullifier_queue = read_keypair_file(
"../../../light-keypairs/nfq1NvQDJ2GEgnS8zt9prAe8rjjpAW1zFkrvZoBR148.json",
)
.unwrap();
let governance_authority = read_keypair_file(format!(
"{}governance-authority-keypair.json",
target_prefix
))
.unwrap();
let forester =
read_keypair_file(format!("{}forester-keypair.json", target_prefix)).unwrap();
let address_merkle_tree = read_keypair_file(format!(
"{}amt1Ayt45jfbdw5YSo7iz6WZxUmnZsQTYXy82hVwyC2.json",
prefix
))
.unwrap();
let address_merkle_tree_queue = read_keypair_file(format!(
"{}aq1S9z4reTSQAdgWHGD2zDaS39sjGrAxbR31vxJ2F4F.json",
prefix
))
.unwrap();
let cpi_context_account = read_keypair_file(format!(
"{}cpi1uHzrEhBG733DoEJNgHCyRS3XmmyVNZx5fonubE4.json",
prefix
))
.unwrap();
let system_program = read_keypair_file(format!(
"{}SySTEM1eSU2p4BGQfQpimFEWWSC1XDFeun3Nqzz3rT7.json",
prefix
))
.unwrap();
let registry_program = read_keypair_file(format!(
"{}Lighton6oQpVkeewmo2mcPTQQp7kYHr4fWpAgJyEmDX.json",
prefix
))
.unwrap();
EnvAccountKeypairs {
state_merkle_tree,
nullifier_queue,
governance_authority,
forester,
address_merkle_tree,
address_merkle_tree_queue,
cpi_context_account,
system_program,
registry_program,
}
}
}
// Hardcoded keypairs for deterministic pubkeys for testing
pub const MERKLE_TREE_TEST_KEYPAIR: [u8; 64] = [
146, 193, 80, 51, 114, 21, 221, 27, 228, 203, 43, 26, 211, 158, 183, 129, 254, 206, 249, 89,
121, 99, 123, 196, 106, 29, 91, 144, 50, 161, 42, 139, 68, 77, 125, 32, 76, 128, 61, 180, 1,
207, 69, 44, 121, 118, 153, 17, 179, 183, 115, 34, 163, 127, 102, 214, 1, 87, 175, 177, 95, 49,
65, 69,
];
pub const NULLIFIER_QUEUE_TEST_KEYPAIR: [u8; 64] = [
222, 130, 14, 179, 120, 234, 200, 231, 112, 214, 179, 171, 214, 95, 225, 61, 71, 61, 96, 214,
47, 253, 213, 178, 11, 77, 16, 2, 7, 24, 106, 218, 45, 107, 25, 100, 70, 71, 137, 47, 210, 248,
220, 223, 11, 204, 205, 89, 248, 48, 211, 168, 11, 25, 219, 158, 99, 47, 127, 248, 142, 107,
196, 110,
];
pub const PAYER_KEYPAIR: [u8; 64] = [
17, 34, 231, 31, 83, 147, 93, 173, 61, 164, 25, 0, 204, 82, 234, 91, 202, 187, 228, 110, 146,
97, 112, 131, 180, 164, 96, 220, 57, 207, 65, 107, 2, 99, 226, 251, 88, 66, 92, 33, 25, 216,
211, 185, 112, 203, 212, 238, 105, 144, 72, 121, 176, 253, 106, 168, 115, 158, 154, 188, 62,
255, 166, 81,
];
pub const ADDRESS_MERKLE_TREE_TEST_KEYPAIR: [u8; 64] = [
145, 184, 150, 187, 7, 48, 33, 191, 136, 115, 127, 243, 135, 119, 163, 99, 186, 21, 67, 161,
22, 211, 102, 149, 158, 51, 182, 231, 97, 28, 77, 118, 165, 62, 148, 222, 135, 123, 222, 189,
109, 46, 57, 112, 159, 209, 86, 59, 62, 139, 159, 208, 193, 206, 130, 48, 119, 195, 103, 235,
231, 94, 83, 227,
];
pub const ADDRESS_MERKLE_TREE_QUEUE_TEST_KEYPAIR: [u8; 64] = [
177, 80, 56, 144, 179, 178, 209, 143, 125, 134, 80, 75, 74, 156, 241, 156, 228, 50, 210, 35,
149, 0, 28, 198, 132, 157, 54, 197, 173, 200, 104, 156, 243, 76, 173, 207, 166, 74, 210, 59,
59, 211, 75, 180, 111, 40, 13, 151, 57, 237, 103, 145, 136, 105, 65, 143, 250, 50, 64, 94, 214,
184, 217, 99,
];
pub const SIGNATURE_CPI_TEST_KEYPAIR: [u8; 64] = [
189, 58, 29, 111, 77, 118, 218, 228, 64, 122, 227, 119, 148, 83, 245, 92, 107, 168, 153, 61,
221, 100, 243, 106, 228, 231, 147, 200, 195, 156, 14, 10, 162, 100, 133, 197, 231, 125, 178,
71, 33, 62, 223, 145, 136, 210, 160, 96, 75, 148, 143, 30, 41, 89, 205, 141, 248, 204, 48, 157,
195, 216, 81, 204,
];
pub const GROUP_PDA_SEED_TEST_KEYPAIR: [u8; 64] = [
97, 41, 77, 16, 152, 43, 140, 41, 11, 146, 82, 50, 38, 162, 216, 34, 95, 6, 237, 11, 74, 227,
221, 137, 26, 136, 52, 144, 74, 212, 215, 155, 216, 47, 98, 199, 9, 61, 213, 72, 205, 237, 76,
74, 119, 253, 96, 1, 140, 92, 149, 148, 250, 32, 53, 54, 186, 15, 48, 130, 222, 205, 3, 98,
];
// The test program id keypairs are necessary because the program id keypair needs to sign
// to register the program to the security group.
// The program ids should only be used for localnet testing.
// Pubkey: H5sFv8VwWmjxHYS2GB4fTDsK7uTtnRT4WiixtHrET3bN
pub const OLD_SYSTEM_PROGRAM_ID_TEST_KEYPAIR: [u8; 64] = [
10, 62, 81, 156, 201, 11, 242, 85, 89, 182, 145, 223, 214, 144, 53, 147, 242, 197, 41, 55, 203,
212, 70, 178, 225, 209, 4, 211, 43, 153, 222, 21, 238, 250, 35, 216, 163, 90, 82, 72, 167, 209,
196, 227, 210, 173, 89, 255, 142, 20, 199, 150, 144, 215, 61, 164, 34, 47, 181, 228, 226, 153,
208, 17,
];
// Pubkey: 7Z9Yuy3HkBCc2Wf3xzMGnz6qpV4n7ciwcoEMGKqhAnj1
pub const OLD_REGISTRY_ID_TEST_KEYPAIR: [u8; 64] = [
43, 149, 192, 218, 153, 35, 206, 182, 230, 102, 193, 208, 163, 11, 195, 46, 228, 116, 113, 62,
161, 102, 207, 139, 128, 8, 120, 150, 30, 119, 150, 140, 97, 98, 96, 14, 138, 90, 82, 76, 254,
197, 232, 33, 204, 67, 237, 139, 100, 115, 187, 164, 115, 31, 164, 21, 246, 9, 162, 211, 227,
20, 96, 192,
];
pub const FORESTER_TEST_KEYPAIR: [u8; 64] = [
81, 4, 133, 152, 100, 67, 157, 52, 66, 70, 150, 214, 242, 90, 65, 199, 143, 192, 96, 172, 214,
44, 250, 77, 224, 55, 104, 35, 168, 1, 92, 200, 204, 184, 194, 21, 117, 231, 90, 62, 117, 179,
162, 181, 71, 36, 34, 47, 49, 195, 215, 90, 115, 3, 69, 74, 210, 75, 162, 191, 63, 51, 170,
204,
];
/// Setup test programs with accounts
/// deploys:
/// 1. light program
/// 2. account_compression program
/// 3. light_compressed_token program
/// 4. light_system_program program
///
/// Sets up the following accounts:
/// 5. creates and initializes governance authority
/// 6. creates and initializes group authority
/// 7. registers the light_system_program program with the group authority
/// 8. initializes Merkle tree owned by
/// Note:
/// - registers a forester
/// - advances to the active phase slot 2
/// - active phase doesn't end
// TODO(vadorovsky): Remove this function...
pub async fn setup_test_programs_with_accounts(
additional_programs: Option<Vec<(String, Pubkey)>>,
) -> (ProgramTestRpcConnection, EnvAccounts) {
setup_test_programs_with_accounts_with_protocol_config(
additional_programs,
ProtocolConfig {
// Init with an active epoch which doesn't end
active_phase_length: 1_000_000_000,
slot_length: 1_000_000_000 - 1,
genesis_slot: 0,
registration_phase_length: 2,
..Default::default()
},
true,
)
.await
}
/// Setup test programs with accounts
/// deploys:
/// 1. light program
/// 2. account_compression program
/// 3. light_compressed_token program
/// 4. light_system_program program
///
/// Sets up the following accounts:
/// 5. creates and initializes governance authority
/// 6. creates and initializes group authority
/// 7. registers the light_system_program program with the group authority
/// 8. initializes Merkle tree owned by
/// Note:
/// - registers a forester
/// - advances to the active phase slot 2
/// - active phase doesn't end
// TODO(vadorovsky): ...in favor of this one.
pub async fn setup_test_programs_with_accounts_v2(
additional_programs: Option<Vec<(String, Pubkey)>>,
) -> (ProgramTestRpcConnection, EnvAccounts) {
setup_test_programs_with_accounts_with_protocol_config_v2(
additional_programs,
ProtocolConfig {
// Init with an active epoch which doesn't end
active_phase_length: 1_000_000_000,
slot_length: 1_000_000_000 - 1,
genesis_slot: 0,
registration_phase_length: 2,
..Default::default()
},
true,
)
.await
}
// TODO(vadorovsky): Remote this function...
pub async fn setup_test_programs_with_accounts_with_protocol_config(
additional_programs: Option<Vec<(String, Pubkey)>>,
protocol_config: ProtocolConfig,
register_forester_and_advance_to_active_phase: bool,
) -> (ProgramTestRpcConnection, EnvAccounts) {
let context = setup_test_programs(additional_programs).await;
let mut context = ProgramTestRpcConnection { context };
let keypairs = EnvAccountKeypairs::program_test_default();
airdrop_lamports(
&mut context,
&keypairs.governance_authority.pubkey(),
100_000_000_000,
)
.await
.unwrap();
airdrop_lamports(&mut context, &keypairs.forester.pubkey(), 10_000_000_000)
.await
.unwrap();
let env_accounts = initialize_accounts(
&mut context,
keypairs,
protocol_config,
register_forester_and_advance_to_active_phase,
true,
)
.await;
(context, env_accounts)
}
// TODO(vadorovsky): ...in favor of this one.
pub async fn setup_test_programs_with_accounts_with_protocol_config_v2(
additional_programs: Option<Vec<(String, Pubkey)>>,
protocol_config: ProtocolConfig,
register_forester_and_advance_to_active_phase: bool,
) -> (ProgramTestRpcConnection, EnvAccounts) {
let context = setup_test_programs(additional_programs).await;
let mut context = ProgramTestRpcConnection { context };
let keypairs = EnvAccountKeypairs::program_test_default();
airdrop_lamports(
&mut context,
&keypairs.governance_authority.pubkey(),
100_000_000_000,
)
.await
.unwrap();
airdrop_lamports(&mut context, &keypairs.forester.pubkey(), 10_000_000_000)
.await
.unwrap();
let env_accounts = initialize_accounts(
&mut context,
keypairs,
protocol_config,
register_forester_and_advance_to_active_phase,
true,
)
.await;
(context, env_accounts)
}
pub async fn setup_accounts(keypairs: EnvAccountKeypairs, url: SolanaRpcUrl) -> EnvAccounts {
let mut rpc = SolanaRpcConnection::new(url, None);
initialize_accounts(&mut rpc, keypairs, ProtocolConfig::default(), false, false).await
}
pub async fn initialize_accounts<R: RpcConnection>(
context: &mut R,
keypairs: EnvAccountKeypairs,
protocol_config: ProtocolConfig,
register_forester_and_advance_to_active_phase: bool,
skip_register_programs: bool,
) -> EnvAccounts {
let cpi_authority_pda = get_cpi_authority_pda();
let protocol_config_pda = get_protocol_config_pda_address();
let instruction = create_initialize_governance_authority_instruction(
keypairs.governance_authority.pubkey(),
keypairs.governance_authority.pubkey(),
protocol_config,
);
let update_instruction = create_update_protocol_config_instruction(
keypairs.governance_authority.pubkey(),
Some(keypairs.governance_authority.pubkey()),
None,
);
context
.create_and_send_transaction(
&[instruction, update_instruction],
&keypairs.governance_authority.pubkey(),
&[&keypairs.governance_authority],
)
.await
.unwrap();
let group_seed_keypair = Keypair::from_bytes(&GROUP_PDA_SEED_TEST_KEYPAIR).unwrap();
let group_pda = initialize_new_group(
&group_seed_keypair,
&keypairs.governance_authority,
context,
cpi_authority_pda.0,
)
.await;
let gov_authority = context
.get_anchor_account::<GroupAuthority>(&protocol_config_pda.0)
.await
.unwrap()
.unwrap();
assert_eq!(
gov_authority.authority,
keypairs.governance_authority.pubkey()
);
println!("forester: {:?}", keypairs.forester.pubkey());
register_test_forester(
context,
&keypairs.governance_authority,
&keypairs.forester.pubkey(),
ForesterConfig::default(),
)
.await
.unwrap();
println!("Registered register_test_forester ");
if !skip_register_programs {
register_program_with_registry_program(
context,
&keypairs.governance_authority,
&group_pda,
&keypairs.system_program,
)
.await
.unwrap();
register_program_with_registry_program(
context,
&keypairs.governance_authority,
&group_pda,
&keypairs.registry_program,
)
.await
.unwrap();
}
println!("Registered system program");
let merkle_tree_pubkey = keypairs.state_merkle_tree.pubkey();
let nullifier_queue_pubkey = keypairs.nullifier_queue.pubkey();
create_state_merkle_tree_and_queue_account(
&keypairs.governance_authority,
true,
context,
&keypairs.state_merkle_tree,
&keypairs.nullifier_queue,
Some(&keypairs.cpi_context_account),
None,
None,
1,
&StateMerkleTreeConfig::default(),
&NullifierQueueConfig::default(),
)
.await
.unwrap();
create_address_merkle_tree_and_queue_account(
&keypairs.governance_authority,
true,
context,
&keypairs.address_merkle_tree,
&keypairs.address_merkle_tree_queue,
None,
None,
&AddressMerkleTreeConfig::default(),
&AddressQueueConfig::default(),
0,
)
.await
.unwrap();
let registered_system_program_pda = get_registered_program_pda(&light_system_program::ID);
let registered_registry_program_pda = get_registered_program_pda(&light_registry::ID);
let forester_epoch = if register_forester_and_advance_to_active_phase {
let mut registered_epoch = Epoch::register(
context,
&protocol_config,
&keypairs.forester,
&keypairs.forester.pubkey(),
)
.await
.unwrap()
.unwrap();
context
.warp_to_slot(registered_epoch.phases.active.start)
.await
.unwrap();
let tree_accounts = vec![
TreeAccounts {
tree_type: TreeType::State,
merkle_tree: merkle_tree_pubkey,
queue: nullifier_queue_pubkey,
is_rolledover: false,
},
TreeAccounts {
tree_type: TreeType::Address,
merkle_tree: keypairs.address_merkle_tree.pubkey(),
queue: keypairs.address_merkle_tree_queue.pubkey(),
is_rolledover: false,
},
];
registered_epoch
.fetch_account_and_add_trees_with_schedule(context, &tree_accounts)
.await
.unwrap();
let ix = create_finalize_registration_instruction(
&keypairs.forester.pubkey(),
&keypairs.forester.pubkey(),
0,
);
context
.create_and_send_transaction(&[ix], &keypairs.forester.pubkey(), &[&keypairs.forester])
.await
.unwrap();
Some(registered_epoch)
} else {
None
};
EnvAccounts {
merkle_tree_pubkey,
nullifier_queue_pubkey,
group_pda,
governance_authority: keypairs.governance_authority.insecure_clone(),
governance_authority_pda: protocol_config_pda.0,
forester: keypairs.forester.insecure_clone(),
registered_program_pda: registered_system_program_pda,
address_merkle_tree_pubkey: keypairs.address_merkle_tree.pubkey(),
address_merkle_tree_queue_pubkey: keypairs.address_merkle_tree_queue.pubkey(),
cpi_context_account_pubkey: keypairs.cpi_context_account.pubkey(),
registered_registry_program_pda,
registered_forester_pda: get_forester_pda(&keypairs.forester.pubkey()).0,
forester_epoch,
}
}
pub fn get_group_pda(seed: Pubkey) -> Pubkey {
Pubkey::find_program_address(
&[GROUP_AUTHORITY_SEED, seed.to_bytes().as_slice()],
&account_compression::ID,
)
.0
}
pub async fn initialize_new_group<R: RpcConnection>(
group_seed_keypair: &Keypair,
payer: &Keypair,
context: &mut R,
authority: Pubkey,
) -> Pubkey {
let group_pda = Pubkey::find_program_address(
&[
GROUP_AUTHORITY_SEED,
group_seed_keypair.pubkey().to_bytes().as_slice(),
],
&account_compression::ID,
)
.0;
let instruction = create_initialize_group_authority_instruction(
payer.pubkey(),
group_pda,
group_seed_keypair.pubkey(),
authority,
);
context
.create_and_send_transaction(
&[instruction],
&payer.pubkey(),
&[payer, group_seed_keypair],
)
.await
.unwrap();
let group_authority = context
.get_anchor_account::<GroupAuthority>(&group_pda)
.await
.unwrap()
.unwrap();
assert_eq!(group_authority.authority, authority);
assert_eq!(group_authority.seed, group_seed_keypair.pubkey());
group_pda
}
pub fn get_test_env_accounts() -> EnvAccounts {
let merkle_tree_keypair = Keypair::from_bytes(&MERKLE_TREE_TEST_KEYPAIR).unwrap();
let merkle_tree_pubkey = merkle_tree_keypair.pubkey();
let nullifier_queue_keypair = Keypair::from_bytes(&NULLIFIER_QUEUE_TEST_KEYPAIR).unwrap();
let nullifier_queue_pubkey = nullifier_queue_keypair.pubkey();
let group_seed_keypair = Keypair::from_bytes(&GROUP_PDA_SEED_TEST_KEYPAIR).unwrap();
let group_pda = get_group_pda(group_seed_keypair.pubkey());
let payer = Keypair::from_bytes(&PAYER_KEYPAIR).unwrap();
let protocol_config_pda = get_protocol_config_pda_address();
let (_, registered_program_pda) = create_register_program_instruction(
payer.pubkey(),
protocol_config_pda,
group_pda,
light_system_program::ID,
);
let address_merkle_tree_keypair =
Keypair::from_bytes(&ADDRESS_MERKLE_TREE_TEST_KEYPAIR).unwrap();
let address_merkle_tree_queue_keypair =
Keypair::from_bytes(&ADDRESS_MERKLE_TREE_QUEUE_TEST_KEYPAIR).unwrap();
let cpi_context_keypair = Keypair::from_bytes(&SIGNATURE_CPI_TEST_KEYPAIR).unwrap();
let registered_registry_program_pda = get_registered_program_pda(&light_registry::ID);
let forester = Keypair::from_bytes(&FORESTER_TEST_KEYPAIR).unwrap();
EnvAccounts {
merkle_tree_pubkey,
nullifier_queue_pubkey,
group_pda,
governance_authority: payer,
governance_authority_pda: protocol_config_pda.0,
registered_forester_pda: get_forester_pda(&forester.pubkey()).0,
forester,
registered_program_pda,
address_merkle_tree_pubkey: address_merkle_tree_keypair.pubkey(),
address_merkle_tree_queue_pubkey: address_merkle_tree_queue_keypair.pubkey(),
cpi_context_account_pubkey: cpi_context_keypair.pubkey(),
registered_registry_program_pda,
forester_epoch: None,
}
}
#[allow(clippy::too_many_arguments)]
pub async fn create_state_merkle_tree_and_queue_account<R: RpcConnection>(
payer: &Keypair,
registry: bool,
rpc: &mut R,
merkle_tree_keypair: &Keypair,
nullifier_queue_keypair: &Keypair,
cpi_context_keypair: Option<&Keypair>,
program_owner: Option<Pubkey>,
forester: Option<Pubkey>,
index: u64,
merkle_tree_config: &StateMerkleTreeConfig,
queue_config: &NullifierQueueConfig,
) -> Result<Signature, RpcError> {
use light_registry::account_compression_cpi::sdk::create_initialize_merkle_tree_instruction as create_initialize_merkle_tree_instruction_registry;
let size = account_compression::state::StateMerkleTreeAccount::size(
merkle_tree_config.height as usize,
merkle_tree_config.changelog_size as usize,
merkle_tree_config.roots_size as usize,
merkle_tree_config.canopy_depth as usize,
);
let merkle_tree_account_create_ix = create_account_instruction(
&payer.pubkey(),
size,
rpc.get_minimum_balance_for_rent_exemption(size)
.await
.unwrap(),
&account_compression::ID,
Some(merkle_tree_keypair),
);
let size =
account_compression::state::queue::QueueAccount::size(queue_config.capacity as usize)
.unwrap();
let nullifier_queue_account_create_ix = create_account_instruction(
&payer.pubkey(),
size,
rpc.get_minimum_balance_for_rent_exemption(size)
.await
.unwrap(),
&account_compression::ID,
Some(nullifier_queue_keypair),
);
let transaction = if registry {
let cpi_context_keypair = cpi_context_keypair.unwrap();
let rent_cpi_config = rpc
.get_minimum_balance_for_rent_exemption(
ProtocolConfig::default().cpi_context_size as usize,
)
.await
.unwrap();
let create_cpi_context_instruction = create_account_instruction(
&payer.pubkey(),
ProtocolConfig::default().cpi_context_size as usize,
rent_cpi_config,
&light_system_program::ID,
Some(cpi_context_keypair),
);
let instruction = create_initialize_merkle_tree_instruction_registry(
payer.pubkey(),
merkle_tree_keypair.pubkey(),
nullifier_queue_keypair.pubkey(),
cpi_context_keypair.pubkey(),
merkle_tree_config.clone(),
queue_config.clone(),
program_owner,
forester,
);
Transaction::new_signed_with_payer(
&[
create_cpi_context_instruction,
merkle_tree_account_create_ix,
nullifier_queue_account_create_ix,
instruction,
],
Some(&payer.pubkey()),
&vec![
payer,
merkle_tree_keypair,
nullifier_queue_keypair,
cpi_context_keypair,
],
rpc.get_latest_blockhash().await.unwrap(),
)
} else {
let instruction = create_initialize_merkle_tree_instruction(
payer.pubkey(),
None,
merkle_tree_keypair.pubkey(),
nullifier_queue_keypair.pubkey(),
merkle_tree_config.clone(),
queue_config.clone(),
program_owner,
forester,
index,
);
Transaction::new_signed_with_payer(
&[
merkle_tree_account_create_ix,
nullifier_queue_account_create_ix,
instruction,
],
Some(&payer.pubkey()),
&vec![payer, merkle_tree_keypair, nullifier_queue_keypair],
rpc.get_latest_blockhash().await.unwrap(),
)
};
rpc.process_transaction(transaction.clone()).await
}
#[allow(clippy::too_many_arguments)]
#[inline(never)]
pub async fn create_address_merkle_tree_and_queue_account<R: RpcConnection>(
payer: &Keypair,
registry: bool,
context: &mut R,
address_merkle_tree_keypair: &Keypair,
address_queue_keypair: &Keypair,
program_owner: Option<Pubkey>,
forester: Option<Pubkey>,
merkle_tree_config: &AddressMerkleTreeConfig,
queue_config: &AddressQueueConfig,
index: u64,
) -> Result<Signature, RpcError> {
use light_registry::account_compression_cpi::sdk::create_initialize_address_merkle_tree_and_queue_instruction as create_initialize_address_merkle_tree_and_queue_instruction_registry;
let size =
account_compression::state::QueueAccount::size(queue_config.capacity as usize).unwrap();
let account_create_ix = create_account_instruction(
&payer.pubkey(),
size,
context
.get_minimum_balance_for_rent_exemption(size)
.await
.unwrap(),
&account_compression::ID,
Some(address_queue_keypair),
);
let size = account_compression::state::AddressMerkleTreeAccount::size(
merkle_tree_config.height as usize,
merkle_tree_config.changelog_size as usize,
merkle_tree_config.roots_size as usize,
merkle_tree_config.canopy_depth as usize,
merkle_tree_config.address_changelog_size as usize,
);
let mt_account_create_ix = create_account_instruction(
&payer.pubkey(),
size,
context
.get_minimum_balance_for_rent_exemption(size)
.await
.unwrap(),
&account_compression::ID,
Some(address_merkle_tree_keypair),
);
let instruction = if registry {
create_initialize_address_merkle_tree_and_queue_instruction_registry(
payer.pubkey(),
forester,
program_owner,
address_merkle_tree_keypair.pubkey(),
address_queue_keypair.pubkey(),
merkle_tree_config.clone(),
queue_config.clone(),
)
} else {
create_initialize_address_merkle_tree_and_queue_instruction(
index,
payer.pubkey(),
None,
program_owner,
forester,
address_merkle_tree_keypair.pubkey(),
address_queue_keypair.pubkey(),
merkle_tree_config.clone(),
queue_config.clone(),
)
};
let transaction = Transaction::new_signed_with_payer(
&[account_create_ix, mt_account_create_ix, instruction],
Some(&payer.pubkey()),
&vec![&payer, &address_queue_keypair, &address_merkle_tree_keypair],
context.get_latest_blockhash().await.unwrap(),
);
let result = context.process_transaction(transaction.clone()).await;
#[allow(clippy::question_mark)]
if let Err(e) = result {
return Err(e);
}
result
}
pub async fn register_program_with_registry_program<R: RpcConnection>(
rpc: &mut R,
governance_authority: &Keypair,
group_pda: &Pubkey,
program_id_keypair: &Keypair,
) -> Result<Pubkey, RpcError> {
let governance_authority_pda = get_protocol_config_pda_address();
let (instruction, token_program_registered_program_pda) = create_register_program_instruction(
governance_authority.pubkey(),
governance_authority_pda,
*group_pda,
program_id_keypair.pubkey(),
);
let cpi_authority_pda = light_registry::utils::get_cpi_authority_pda();
let transfer_instruction = system_instruction::transfer(
&governance_authority.pubkey(),
&cpi_authority_pda.0,
rpc.get_minimum_balance_for_rent_exemption(RegisteredProgram::LEN)
.await
.unwrap(),
);
rpc.create_and_send_transaction(
&[transfer_instruction, instruction],
&governance_authority.pubkey(),
&[governance_authority, program_id_keypair],
)
.await?;
Ok(token_program_registered_program_pda)
}
pub async fn deregister_program_with_registry_program<R: RpcConnection>(
rpc: &mut R,
governance_authority: &Keypair,
group_pda: &Pubkey,
program_id_keypair: &Keypair,
) -> Result<Pubkey, light_client::rpc::errors::RpcError> {
let governance_authority_pda = get_protocol_config_pda_address();
let (instruction, token_program_registered_program_pda) = create_deregister_program_instruction(
governance_authority.pubkey(),
governance_authority_pda,
*group_pda,
program_id_keypair.pubkey(),
);
let cpi_authority_pda = light_registry::utils::get_cpi_authority_pda();
let transfer_instruction = system_instruction::transfer(
&governance_authority.pubkey(),
&cpi_authority_pda.0,
rpc.get_minimum_balance_for_rent_exemption(RegisteredProgram::LEN)
.await
.unwrap(),
);
rpc.create_and_send_transaction(
&[transfer_instruction, instruction],
&governance_authority.pubkey(),
&[governance_authority],
)
.await?;
Ok(token_program_registered_program_pda)
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/light-program-test
|
solana_public_repos/Lightprotocol/light-protocol/light-program-test/src/lib.rs
|
pub mod env_accounts;
pub mod test_env;
pub mod test_indexer;
pub mod test_rpc;
| 0
|
solana_public_repos/Lightprotocol/light-protocol/light-program-test
|
solana_public_repos/Lightprotocol/light-protocol/light-program-test/src/env_accounts.rs
|
// This file is generated by getAccountState.sh. Do not edit it manually.
use solana_sdk::account::Account;
use solana_sdk::pubkey::Pubkey;
use std::str::FromStr;
pub fn get_registered_program_pda() -> Account {
Account {
lamports: 1392000u64,
data: vec![
31u8, 251u8, 180u8, 235u8, 3u8, 116u8, 50u8, 4u8, 6u8, 167u8, 85u8, 248u8, 33u8, 57u8,
5u8, 77u8, 68u8, 36u8, 177u8, 90u8, 240u8, 196u8, 48u8, 207u8, 47u8, 75u8, 127u8,
152u8, 121u8, 58u8, 218u8, 18u8, 82u8, 212u8, 143u8, 54u8, 102u8, 198u8, 203u8, 206u8,
15u8, 216u8, 212u8, 71u8, 211u8, 163u8, 62u8, 85u8, 44u8, 152u8, 241u8, 31u8, 23u8,
118u8, 174u8, 50u8, 226u8, 14u8, 194u8, 135u8, 20u8, 8u8, 57u8, 68u8, 15u8, 93u8, 48u8,
198u8, 231u8, 87u8, 72u8, 216u8,
],
owner: Pubkey::from_str("compr6CUsB5m2jS4Y3831ztGSTnDpnKJTKS95d64XVq").unwrap(),
executable: false,
rent_epoch: 18446744073709551615u64,
}
}
pub fn get_registered_registry_program_pda() -> Account {
Account {
lamports: 1392000u64,
data: vec![
31u8, 251u8, 180u8, 235u8, 3u8, 116u8, 50u8, 4u8, 5u8, 13u8, 43u8, 19u8, 121u8, 81u8,
54u8, 133u8, 207u8, 2u8, 242u8, 181u8, 253u8, 82u8, 145u8, 189u8, 149u8, 155u8, 43u8,
6u8, 10u8, 165u8, 37u8, 234u8, 91u8, 52u8, 129u8, 59u8, 29u8, 185u8, 183u8, 110u8,
15u8, 216u8, 212u8, 71u8, 211u8, 163u8, 62u8, 85u8, 44u8, 152u8, 241u8, 31u8, 23u8,
118u8, 174u8, 50u8, 226u8, 14u8, 194u8, 135u8, 20u8, 8u8, 57u8, 68u8, 15u8, 93u8, 48u8,
198u8, 231u8, 87u8, 72u8, 216u8,
],
owner: Pubkey::from_str("compr6CUsB5m2jS4Y3831ztGSTnDpnKJTKS95d64XVq").unwrap(),
executable: false,
rent_epoch: 18446744073709551615u64,
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/light-program-test
|
solana_public_repos/Lightprotocol/light-protocol/light-program-test/src/test_rpc.rs
|
use std::fmt::{Debug, Formatter};
use async_trait::async_trait;
use borsh::BorshDeserialize;
use solana_banks_client::BanksClientError;
use solana_program_test::ProgramTestContext;
use solana_sdk::{
account::{Account, AccountSharedData},
clock::Slot,
commitment_config::CommitmentConfig,
epoch_info::EpochInfo,
hash::Hash,
instruction::{Instruction, InstructionError},
pubkey::Pubkey,
signature::{Keypair, Signature, Signer},
system_instruction,
transaction::{Transaction, TransactionError},
};
use light_client::rpc::{merkle_tree::MerkleTreeExt, RpcConnection, RpcError};
use light_client::transaction_params::TransactionParams;
pub struct ProgramTestRpcConnection {
pub context: ProgramTestContext,
}
impl Debug for ProgramTestRpcConnection {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "ProgramTestRpcConnection")
}
}
#[async_trait]
impl RpcConnection for ProgramTestRpcConnection {
fn new<U: ToString>(_url: U, _commitment_config: Option<CommitmentConfig>) -> Self
where
Self: Sized,
{
unimplemented!()
}
fn get_payer(&self) -> &Keypair {
&self.context.payer
}
fn get_url(&self) -> String {
unimplemented!("get_url doesn't make sense for ProgramTestRpcConnection")
}
async fn health(&self) -> Result<(), RpcError> {
unimplemented!()
}
async fn get_block_time(&self, _slot: u64) -> Result<i64, RpcError> {
unimplemented!()
}
async fn get_epoch_info(&self) -> Result<EpochInfo, RpcError> {
unimplemented!()
}
async fn get_program_accounts(
&self,
_program_id: &Pubkey,
) -> Result<Vec<(Pubkey, Account)>, RpcError> {
unimplemented!("get_program_accounts")
}
async fn process_transaction(
&mut self,
transaction: Transaction,
) -> Result<Signature, RpcError> {
let sig = *transaction.signatures.first().unwrap();
let result = self
.context
.banks_client
.process_transaction_with_metadata(transaction)
.await
.map_err(RpcError::from)?;
result.result.map_err(RpcError::TransactionError)?;
Ok(sig)
}
async fn process_transaction_with_context(
&mut self,
transaction: Transaction,
) -> Result<(Signature, Slot), RpcError> {
let sig = *transaction.signatures.first().unwrap();
let result = self
.context
.banks_client
.process_transaction_with_metadata(transaction)
.await
.map_err(RpcError::from)?;
result.result.map_err(RpcError::TransactionError)?;
let slot = self.context.banks_client.get_root_slot().await?;
Ok((sig, slot))
}
async fn create_and_send_transaction_with_event<T>(
&mut self,
instruction: &[Instruction],
payer: &Pubkey,
signers: &[&Keypair],
transaction_params: Option<TransactionParams>,
) -> Result<Option<(T, Signature, Slot)>, RpcError>
where
T: BorshDeserialize + Send + Debug,
{
let pre_balance = self
.context
.banks_client
.get_account(*payer)
.await?
.unwrap()
.lamports;
let transaction = Transaction::new_signed_with_payer(
instruction,
Some(payer),
signers,
self.context.get_new_latest_blockhash().await?,
);
let signature = transaction.signatures[0];
// Simulate the transaction. Currently, in banks-client/server, only
// simulations are able to track CPIs. Therefore, simulating is the
// only way to retrieve the event.
let simulation_result = self
.context
.banks_client
.simulate_transaction(transaction.clone())
.await?;
// Handle an error nested in the simulation result.
if let Some(Err(e)) = simulation_result.result {
let error = match e {
TransactionError::InstructionError(_, _) => RpcError::TransactionError(e),
_ => RpcError::from(BanksClientError::TransactionError(e)),
};
return Err(error);
}
// Retrieve the event.
let event = simulation_result
.simulation_details
.and_then(|details| details.inner_instructions)
.and_then(|instructions| {
instructions.iter().flatten().find_map(|inner_instruction| {
T::try_from_slice(inner_instruction.instruction.data.as_slice()).ok()
})
});
// If transaction was successful, execute it.
if let Some(Ok(())) = simulation_result.result {
let result = self
.context
.banks_client
.process_transaction(transaction)
.await;
if let Err(e) = result {
let error = RpcError::from(e);
return Err(error);
}
}
// assert correct rollover fee and network_fee distribution
if let Some(transaction_params) = transaction_params {
let mut deduped_signers = signers.to_vec();
deduped_signers.dedup();
let post_balance = self.get_account(*payer).await?.unwrap().lamports;
// a network_fee is charged if there are input compressed accounts or new addresses
let mut network_fee: i64 = 0;
if transaction_params.num_input_compressed_accounts != 0 {
network_fee += transaction_params.fee_config.network_fee as i64;
}
if transaction_params.num_new_addresses != 0 {
network_fee += transaction_params.fee_config.address_network_fee as i64;
}
let expected_post_balance = pre_balance as i64
- i64::from(transaction_params.num_new_addresses)
* transaction_params.fee_config.address_queue_rollover as i64
- i64::from(transaction_params.num_output_compressed_accounts)
* transaction_params.fee_config.state_merkle_tree_rollover as i64
- transaction_params.compress
- transaction_params.fee_config.solana_network_fee * deduped_signers.len() as i64
- network_fee;
if post_balance as i64 != expected_post_balance {
println!("transaction_params: {:?}", transaction_params);
println!("pre_balance: {}", pre_balance);
println!("post_balance: {}", post_balance);
println!("expected post_balance: {}", expected_post_balance);
println!(
"diff post_balance: {}",
post_balance as i64 - expected_post_balance
);
println!(
"rollover fee: {}",
transaction_params.fee_config.state_merkle_tree_rollover
);
println!(
"address_network_fee: {}",
transaction_params.fee_config.address_network_fee
);
println!("network_fee: {}", network_fee);
println!("num signers {}", deduped_signers.len());
return Err(RpcError::from(BanksClientError::TransactionError(
TransactionError::InstructionError(0, InstructionError::Custom(11111)),
)));
}
}
let slot = self.context.banks_client.get_root_slot().await?;
let result = event.map(|event| (event, signature, slot));
Ok(result)
}
async fn confirm_transaction(&self, _transaction: Signature) -> Result<bool, RpcError> {
Ok(true)
}
async fn get_account(&mut self, address: Pubkey) -> Result<Option<Account>, RpcError> {
self.context
.banks_client
.get_account(address)
.await
.map_err(RpcError::from)
}
fn set_account(&mut self, address: &Pubkey, account: &AccountSharedData) {
self.context.set_account(address, account);
}
async fn get_minimum_balance_for_rent_exemption(
&mut self,
data_len: usize,
) -> Result<u64, RpcError> {
let rent = self
.context
.banks_client
.get_rent()
.await
.map_err(RpcError::from);
Ok(rent?.minimum_balance(data_len))
}
async fn airdrop_lamports(
&mut self,
to: &Pubkey,
lamports: u64,
) -> Result<Signature, RpcError> {
// Create a transfer instruction
let transfer_instruction =
system_instruction::transfer(&self.context.payer.pubkey(), to, lamports);
let latest_blockhash = self.get_latest_blockhash().await.unwrap();
// Create and sign a transaction
let transaction = Transaction::new_signed_with_payer(
&[transfer_instruction],
Some(&self.get_payer().pubkey()),
&vec![&self.get_payer()],
latest_blockhash,
);
let sig = *transaction.signatures.first().unwrap();
// Send the transaction
self.context
.banks_client
.process_transaction(transaction)
.await?;
Ok(sig)
}
async fn get_balance(&mut self, pubkey: &Pubkey) -> Result<u64, RpcError> {
self.context
.banks_client
.get_balance(*pubkey)
.await
.map_err(RpcError::from)
}
async fn get_latest_blockhash(&mut self) -> Result<Hash, RpcError> {
self.context
.get_new_latest_blockhash()
.await
.map_err(|e| RpcError::from(BanksClientError::from(e)))
}
async fn get_slot(&mut self) -> Result<u64, RpcError> {
self.context
.banks_client
.get_root_slot()
.await
.map_err(RpcError::from)
}
async fn warp_to_slot(&mut self, slot: Slot) -> Result<(), RpcError> {
self.context
.warp_to_slot(slot)
.map_err(|_| RpcError::InvalidWarpSlot)
}
async fn send_transaction(&self, _transaction: &Transaction) -> Result<Signature, RpcError> {
unimplemented!("send transaction is unimplemented for ProgramTestRpcConnection")
}
}
impl MerkleTreeExt for ProgramTestRpcConnection {}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/light-program-test
|
solana_public_repos/Lightprotocol/light-protocol/light-program-test/src/test_indexer.rs
|
use borsh::BorshDeserialize;
use light_client::{
indexer::{
AddressMerkleTreeAccounts, AddressMerkleTreeBundle, Indexer, StateMerkleTreeAccounts,
StateMerkleTreeBundle,
},
rpc::{merkle_tree::MerkleTreeExt, RpcConnection},
transaction_params::FeeConfig,
};
use light_hasher::Poseidon;
use light_indexed_merkle_tree::{array::IndexedArray, reference::IndexedMerkleTree};
use light_merkle_tree_reference::MerkleTree;
use light_prover_client::gnark::helpers::{spawn_prover, ProofType, ProverConfig};
use light_prover_client::{
gnark::{
combined_json_formatter::CombinedJsonStruct,
constants::{PROVE_PATH, SERVER_ADDRESS},
helpers::health_check,
inclusion_json_formatter::BatchInclusionJsonStruct,
non_inclusion_json_formatter::BatchNonInclusionJsonStruct,
proof_helpers::{compress_proof, deserialize_gnark_proof_json, proof_from_json_struct},
},
inclusion::merkle_inclusion_proof_inputs::{InclusionMerkleProofInputs, InclusionProofInputs},
non_inclusion::merkle_non_inclusion_proof_inputs::{
get_non_inclusion_proof_inputs, NonInclusionProofInputs,
},
};
use light_sdk::{
compressed_account::CompressedAccountWithMerkleContext,
event::PublicTransactionEvent,
merkle_context::MerkleContext,
proof::{CompressedProof, ProofRpcResult},
token::{TokenData, TokenDataWithMerkleContext},
ADDRESS_MERKLE_TREE_CANOPY_DEPTH, ADDRESS_MERKLE_TREE_HEIGHT, PROGRAM_ID_LIGHT_SYSTEM,
STATE_MERKLE_TREE_CANOPY_DEPTH, STATE_MERKLE_TREE_HEIGHT,
TOKEN_COMPRESSED_ACCOUNT_DISCRIMINATOR,
};
use log::warn;
use num_bigint::BigInt;
use num_traits::FromBytes;
use reqwest::Client;
use solana_sdk::pubkey::Pubkey;
use std::{marker::PhantomData, time::Duration};
#[derive(Debug)]
pub struct TestIndexer<R>
where
R: RpcConnection + MerkleTreeExt,
{
pub state_merkle_trees: Vec<StateMerkleTreeBundle>,
pub address_merkle_trees: Vec<AddressMerkleTreeBundle>,
pub compressed_accounts: Vec<CompressedAccountWithMerkleContext>,
pub nullified_compressed_accounts: Vec<CompressedAccountWithMerkleContext>,
pub token_compressed_accounts: Vec<TokenDataWithMerkleContext>,
pub token_nullified_compressed_accounts: Vec<TokenDataWithMerkleContext>,
pub events: Vec<PublicTransactionEvent>,
_rpc: PhantomData<R>,
}
impl<R> Indexer<R> for TestIndexer<R>
where
R: RpcConnection + MerkleTreeExt,
{
fn add_event_and_compressed_accounts(
&mut self,
event: &PublicTransactionEvent,
) -> (
Vec<CompressedAccountWithMerkleContext>,
Vec<TokenDataWithMerkleContext>,
) {
for hash in event.input_compressed_account_hashes.iter() {
let index = self.compressed_accounts.iter().position(|x| {
x.compressed_account
.hash::<Poseidon>(
&x.merkle_context.merkle_tree_pubkey,
&x.merkle_context.leaf_index,
)
.unwrap()
== *hash
});
if let Some(index) = index {
self.nullified_compressed_accounts
.push(self.compressed_accounts[index].clone());
self.compressed_accounts.remove(index);
continue;
};
if index.is_none() {
let index = self
.token_compressed_accounts
.iter()
.position(|x| {
x.compressed_account
.compressed_account
.hash::<Poseidon>(
&x.compressed_account.merkle_context.merkle_tree_pubkey,
&x.compressed_account.merkle_context.leaf_index,
)
.unwrap()
== *hash
})
.expect("input compressed account not found");
self.token_nullified_compressed_accounts
.push(self.token_compressed_accounts[index].clone());
self.token_compressed_accounts.remove(index);
}
}
let mut compressed_accounts = Vec::new();
let mut token_compressed_accounts = Vec::new();
for (i, compressed_account) in event.output_compressed_accounts.iter().enumerate() {
let nullifier_queue_pubkey = self
.state_merkle_trees
.iter()
.find(|x| {
x.accounts.merkle_tree
== event.pubkey_array
[event.output_compressed_accounts[i].merkle_tree_index as usize]
})
.unwrap()
.accounts
.nullifier_queue;
// if data is some, try to deserialize token data, if it fails, add to compressed_accounts
// if data is none add to compressed_accounts
// new accounts are inserted in front so that the newest accounts are found first
match compressed_account.compressed_account.data.as_ref() {
Some(data) => {
if compressed_account.compressed_account.owner == PROGRAM_ID_LIGHT_SYSTEM
&& data.discriminator == TOKEN_COMPRESSED_ACCOUNT_DISCRIMINATOR
{
if let Ok(token_data) = TokenData::deserialize(&mut data.data.as_slice()) {
let token_account = TokenDataWithMerkleContext {
token_data,
compressed_account: CompressedAccountWithMerkleContext {
compressed_account: compressed_account
.compressed_account
.clone(),
merkle_context: MerkleContext {
leaf_index: event.output_leaf_indices[i],
merkle_tree_pubkey: event.pubkey_array[event
.output_compressed_accounts[i]
.merkle_tree_index
as usize],
nullifier_queue_pubkey,
queue_index: None,
},
},
};
token_compressed_accounts.push(token_account.clone());
self.token_compressed_accounts.insert(0, token_account);
}
} else {
let compressed_account = CompressedAccountWithMerkleContext {
compressed_account: compressed_account.compressed_account.clone(),
merkle_context: MerkleContext {
leaf_index: event.output_leaf_indices[i],
merkle_tree_pubkey: event.pubkey_array[event
.output_compressed_accounts[i]
.merkle_tree_index
as usize],
nullifier_queue_pubkey,
queue_index: None,
},
};
compressed_accounts.push(compressed_account.clone());
self.compressed_accounts.insert(0, compressed_account);
}
}
None => {
let compressed_account = CompressedAccountWithMerkleContext {
compressed_account: compressed_account.compressed_account.clone(),
merkle_context: MerkleContext {
leaf_index: event.output_leaf_indices[i],
merkle_tree_pubkey: event.pubkey_array
[event.output_compressed_accounts[i].merkle_tree_index as usize],
nullifier_queue_pubkey,
queue_index: None,
},
};
compressed_accounts.push(compressed_account.clone());
self.compressed_accounts.insert(0, compressed_account);
}
};
let merkle_tree = &mut self
.state_merkle_trees
.iter_mut()
.find(|x| {
x.accounts.merkle_tree
== event.pubkey_array
[event.output_compressed_accounts[i].merkle_tree_index as usize]
})
.unwrap()
.merkle_tree;
merkle_tree
.append(
&compressed_account
.compressed_account
.hash::<Poseidon>(
&event.pubkey_array
[event.output_compressed_accounts[i].merkle_tree_index as usize],
&event.output_leaf_indices[i],
)
.unwrap(),
)
.expect("insert failed");
}
self.events.push(event.clone());
(compressed_accounts, token_compressed_accounts)
}
async fn create_proof_for_compressed_accounts(
&mut self,
compressed_accounts: Option<&[[u8; 32]]>,
state_merkle_tree_pubkeys: Option<&[solana_sdk::pubkey::Pubkey]>,
new_addresses: Option<&[[u8; 32]]>,
address_merkle_tree_pubkeys: Option<Vec<solana_sdk::pubkey::Pubkey>>,
rpc: &mut R,
) -> ProofRpcResult {
if compressed_accounts.is_some()
&& ![1usize, 2usize, 3usize, 4usize, 8usize]
.contains(&compressed_accounts.unwrap().len())
{
panic!(
"compressed_accounts must be of length 1, 2, 3, 4 or 8 != {}",
compressed_accounts.unwrap().len()
)
}
if new_addresses.is_some() && ![1usize, 2usize].contains(&new_addresses.unwrap().len()) {
panic!("new_addresses must be of length 1, 2")
}
let client = Client::new();
let (root_indices, address_root_indices, json_payload) =
match (compressed_accounts, new_addresses) {
(Some(accounts), None) => {
let (payload, indices) = self
.process_inclusion_proofs(state_merkle_tree_pubkeys.unwrap(), accounts, rpc)
.await;
(indices, Vec::new(), payload.to_string())
}
(None, Some(addresses)) => {
let (payload, indices) = self
.process_non_inclusion_proofs(
address_merkle_tree_pubkeys.unwrap().as_slice(),
addresses,
rpc,
)
.await;
(Vec::<u16>::new(), indices, payload.to_string())
}
(Some(accounts), Some(addresses)) => {
let (inclusion_payload, inclusion_indices) = self
.process_inclusion_proofs(state_merkle_tree_pubkeys.unwrap(), accounts, rpc)
.await;
let (non_inclusion_payload, non_inclusion_indices) = self
.process_non_inclusion_proofs(
address_merkle_tree_pubkeys.unwrap().as_slice(),
addresses,
rpc,
)
.await;
let combined_payload = CombinedJsonStruct {
inclusion: inclusion_payload.inputs,
non_inclusion: non_inclusion_payload.inputs,
}
.to_string();
(inclusion_indices, non_inclusion_indices, combined_payload)
}
_ => {
panic!("At least one of compressed_accounts or new_addresses must be provided")
}
};
let mut retries = 3;
while retries > 0 {
let response_result = client
.post(&format!("{}{}", SERVER_ADDRESS, PROVE_PATH))
.header("Content-Type", "text/plain; charset=utf-8")
.body(json_payload.clone())
.send()
.await
.expect("Failed to execute request.");
if response_result.status().is_success() {
let body = response_result.text().await.unwrap();
let proof_json = deserialize_gnark_proof_json(&body).unwrap();
let (proof_a, proof_b, proof_c) = proof_from_json_struct(proof_json);
let (proof_a, proof_b, proof_c) = compress_proof(&proof_a, &proof_b, &proof_c);
return ProofRpcResult {
root_indices,
address_root_indices,
proof: CompressedProof {
a: proof_a,
b: proof_b,
c: proof_c,
},
};
} else {
warn!("Error: {}", response_result.text().await.unwrap());
tokio::time::sleep(Duration::from_secs(1)).await;
retries -= 1;
}
}
panic!("Failed to get proof from server");
}
/// Returns compressed accounts owned by the given `owner`.
fn get_compressed_accounts_by_owner(
&self,
owner: &Pubkey,
) -> Vec<CompressedAccountWithMerkleContext> {
self.compressed_accounts
.iter()
.filter(|x| x.compressed_account.owner == *owner)
.cloned()
.collect()
}
}
impl<R> TestIndexer<R>
where
R: RpcConnection + MerkleTreeExt,
{
pub async fn new(
state_merkle_tree_accounts: &[StateMerkleTreeAccounts],
address_merkle_tree_accounts: &[AddressMerkleTreeAccounts],
inclusion: bool,
non_inclusion: bool,
) -> Self {
let state_merkle_trees = state_merkle_tree_accounts
.iter()
.map(|accounts| {
let merkle_tree = Box::new(MerkleTree::<Poseidon>::new(
STATE_MERKLE_TREE_HEIGHT,
STATE_MERKLE_TREE_CANOPY_DEPTH,
));
StateMerkleTreeBundle {
accounts: *accounts,
merkle_tree,
rollover_fee: FeeConfig::default().state_merkle_tree_rollover,
}
})
.collect::<Vec<_>>();
let address_merkle_trees = address_merkle_tree_accounts
.iter()
.map(|accounts| Self::add_address_merkle_tree_bundle(accounts))
.collect::<Vec<_>>();
let mut prover_config = ProverConfig {
circuits: vec![],
run_mode: None,
};
if inclusion {
prover_config.circuits.push(ProofType::Inclusion);
}
if non_inclusion {
prover_config.circuits.push(ProofType::NonInclusion);
}
spawn_prover(true, prover_config).await;
health_check(20, 1).await;
Self {
state_merkle_trees,
address_merkle_trees,
compressed_accounts: Vec::new(),
nullified_compressed_accounts: Vec::new(),
token_compressed_accounts: Vec::new(),
token_nullified_compressed_accounts: Vec::new(),
events: Vec::new(),
_rpc: PhantomData,
}
}
pub fn add_address_merkle_tree_bundle(
accounts: &AddressMerkleTreeAccounts,
// TODO: add config here
) -> AddressMerkleTreeBundle {
let mut merkle_tree = Box::new(
IndexedMerkleTree::<Poseidon, usize>::new(
ADDRESS_MERKLE_TREE_HEIGHT,
ADDRESS_MERKLE_TREE_CANOPY_DEPTH,
)
.unwrap(),
);
merkle_tree.init().unwrap();
let mut indexed_array = Box::<IndexedArray<Poseidon, usize>>::default();
indexed_array.init().unwrap();
AddressMerkleTreeBundle {
merkle_tree,
indexed_array,
accounts: *accounts,
rollover_fee: FeeConfig::default().address_queue_rollover,
}
}
async fn process_inclusion_proofs(
&self,
merkle_tree_pubkeys: &[Pubkey],
accounts: &[[u8; 32]],
rpc: &mut R,
) -> (BatchInclusionJsonStruct, Vec<u16>) {
let mut inclusion_proofs = Vec::new();
let mut root_indices = Vec::new();
for (i, account) in accounts.iter().enumerate() {
let merkle_tree = &self
.state_merkle_trees
.iter()
.find(|x| x.accounts.merkle_tree == merkle_tree_pubkeys[i])
.unwrap()
.merkle_tree;
let leaf_index = merkle_tree.get_leaf_index(account).unwrap();
let proof = merkle_tree.get_proof_of_leaf(leaf_index, true).unwrap();
inclusion_proofs.push(InclusionMerkleProofInputs {
root: BigInt::from_be_bytes(merkle_tree.root().as_slice()),
leaf: BigInt::from_be_bytes(account),
path_index: BigInt::from_be_bytes(leaf_index.to_be_bytes().as_slice()),
path_elements: proof.iter().map(|x| BigInt::from_be_bytes(x)).collect(),
});
let onchain_merkle_tree = rpc
.get_state_merkle_tree(merkle_tree_pubkeys[i])
.await
.unwrap();
root_indices.push(onchain_merkle_tree.root_index() as u16);
}
let inclusion_proof_inputs = InclusionProofInputs(inclusion_proofs.as_slice());
let batch_inclusion_proof_inputs =
BatchInclusionJsonStruct::from_inclusion_proof_inputs(&inclusion_proof_inputs);
(batch_inclusion_proof_inputs, root_indices)
}
async fn process_non_inclusion_proofs(
&self,
address_merkle_tree_pubkeys: &[Pubkey],
addresses: &[[u8; 32]],
rpc: &mut R,
) -> (BatchNonInclusionJsonStruct, Vec<u16>) {
let mut non_inclusion_proofs = Vec::new();
let mut address_root_indices = Vec::new();
for (i, address) in addresses.iter().enumerate() {
let address_tree = &self
.address_merkle_trees
.iter()
.find(|x| x.accounts.merkle_tree == address_merkle_tree_pubkeys[i])
.unwrap();
let proof_inputs = get_non_inclusion_proof_inputs(
address,
&address_tree.merkle_tree,
&address_tree.indexed_array,
);
non_inclusion_proofs.push(proof_inputs);
let onchain_address_merkle_tree = rpc
.get_address_merkle_tree(address_merkle_tree_pubkeys[i])
.await
.unwrap();
address_root_indices.push(onchain_address_merkle_tree.root_index() as u16);
}
let non_inclusion_proof_inputs = NonInclusionProofInputs(non_inclusion_proofs.as_slice());
let batch_non_inclusion_proof_inputs =
BatchNonInclusionJsonStruct::from_non_inclusion_proof_inputs(
&non_inclusion_proof_inputs,
);
(batch_non_inclusion_proof_inputs, address_root_indices)
}
/// deserializes an event
/// adds the output_compressed_accounts to the compressed_accounts
/// removes the input_compressed_accounts from the compressed_accounts
/// adds the input_compressed_accounts to the nullified_compressed_accounts
/// deserialiazes token data from the output_compressed_accounts
/// adds the token_compressed_accounts to the token_compressed_accounts
pub fn add_compressed_accounts_with_token_data(&mut self, event: &PublicTransactionEvent) {
self.add_event_and_compressed_accounts(event);
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol
|
solana_public_repos/Lightprotocol/light-protocol/sdk/Cargo.toml
|
[package]
name = "light-sdk"
version = "0.11.0"
description = "Rust SDK for ZK Compression on Solana"
repository = "https://github.com/Lightprotocol/light-protocol"
license = "Apache-2.0"
edition = "2021"
[lib]
crate-type = ["cdylib", "lib"]
name = "light_sdk"
[features]
no-entrypoint = []
no-idl = []
no-log-ix-name = []
cpi = ["no-entrypoint"]
custom-heap = ["light-heap"]
mem-profiling = []
default = ["custom-heap"]
test-sbf = []
bench-sbf = []
idl-build = ["anchor-lang/idl-build"]
legacy = ["account-compression", "light-system-program"]
[dependencies]
# Solana
solana-program = { workspace = true }
# Anchor
anchor-lang = { workspace = true }
# Math and crypto
num-bigint = { workspace = true }
aligned-sized = { version = "1.1.0", path = "../macros/aligned-sized" }
light-macros = { version = "1.1.0", path = "../macros/light" }
light-sdk-macros = { version = "0.4.0", path = "../macros/light-sdk-macros" }
bytemuck = "1.17"
light-hasher = { version = "1.1.0", path = "../merkle-tree/hasher", features=["solana"] }
light-heap = { version = "1.1.0", path = "../heap", optional = true }
light-indexed-merkle-tree = { workspace = true }
account-compression = { workspace = true , optional = true }
light-system-program = { workspace = true , optional = true }
light-concurrent-merkle-tree = { path = "../merkle-tree/concurrent", version = "1.1.0" }
light-utils = { version = "1.1.0", path = "../utils" }
groth16-solana = "0.0.3"
light-verifier = { path = "../circuit-lib/verifier", version = "1.1.0", features = ["solana"] }
borsh = "0.10.0"
[target.'cfg(not(target_os = "solana"))'.dependencies]
solana-sdk = { workspace = true }
[dev-dependencies]
solana-banks-interface = { workspace = true }
solana-cli-output = { workspace = true }
solana-program-test = { workspace = true }
serde_json = "1.0.133"
reqwest = "0.12"
tokio = { workspace = true }
light-prover-client = { version = "1.2.0", path = "../circuit-lib/light-prover-client" }
light-merkle-tree-reference = { version = "1.1.0", path = "../merkle-tree/reference/" }
light-indexed-merkle-tree = { version = "1.1.0", path = "../merkle-tree/indexed/" }
num-bigint = "0.4.6"
num-traits = "0.2.19"
lazy_static = "1.4.0"
light-hash-set = { workspace = true, features = ["solana"] }
rand = "0.8.5"
| 0
|
solana_public_repos/Lightprotocol/light-protocol
|
solana_public_repos/Lightprotocol/light-protocol/sdk/readme.md
|
# Light SDK
Rust SDK with helpers to interact with ZK Compression on Solana.
Documentation is available at https://zkcompression.com
Source code: https://github.com/Lightprotocol/light-protocol/tree/main/programs/system
## Audit
This code is unaudited. Use at your own risk.
| 0
|
solana_public_repos/Lightprotocol/light-protocol
|
solana_public_repos/Lightprotocol/light-protocol/sdk/Xargo.toml
|
[target.bpfel-unknown-unknown.dependencies.std]
features = []
| 0
|
solana_public_repos/Lightprotocol/light-protocol/sdk
|
solana_public_repos/Lightprotocol/light-protocol/sdk/src/account_meta.rs
|
//! Types used
use anchor_lang::{AnchorDeserialize, AnchorSerialize};
use solana_program::pubkey::Pubkey;
use crate::{
compressed_account::CompressedAccountWithMerkleContext,
error::LightSdkError,
merkle_context::{
pack_address_merkle_context, pack_merkle_context, AddressMerkleContext,
PackedAddressMerkleContext, PackedMerkleContext, RemainingAccounts,
},
};
#[derive(Debug, Clone, AnchorSerialize, AnchorDeserialize, PartialEq, Default)]
pub struct LightAccountMeta {
/// Lamports.
pub lamports: Option<u64>,
/// Address of the account (the address can change).
pub address: Option<[u8; 32]>,
/// Data of the account.
pub data: Option<Vec<u8>>,
/// Merkle tree.
pub merkle_context: Option<PackedMerkleContext>,
/// Merkle tree root index.
pub merkle_tree_root_index: Option<u16>,
/// Output Merkle tree.
pub output_merkle_tree_index: Option<u8>,
/// Address Merkle tree. Set only when adding or updating the address.
pub address_merkle_context: Option<PackedAddressMerkleContext>,
/// Address Merkle tree root index. Set only when adding or updating the
/// address.
pub address_merkle_tree_root_index: Option<u16>,
/// Account is read only.
/// (not used for now, just a placeholder)
pub read_only: bool,
}
impl LightAccountMeta {
#[allow(clippy::too_many_arguments)]
pub fn new_init(
output_merkle_tree: &Pubkey,
address_merkle_context: Option<&AddressMerkleContext>,
address_merkle_tree_root_index: Option<u16>,
remaining_accounts: &mut RemainingAccounts,
) -> Result<Self, LightSdkError> {
let output_merkle_tree_index = remaining_accounts.insert_or_get(*output_merkle_tree);
let address_merkle_context =
address_merkle_context.map(|ctx| pack_address_merkle_context(ctx, remaining_accounts));
Ok(Self {
lamports: None,
address: None,
data: None,
merkle_context: None,
merkle_tree_root_index: None,
output_merkle_tree_index: Some(output_merkle_tree_index),
address_merkle_context,
address_merkle_tree_root_index,
read_only: false,
})
}
#[allow(clippy::too_many_arguments)]
pub fn new_mut(
compressed_account: &CompressedAccountWithMerkleContext,
merkle_tree_root_index: u16,
output_merkle_tree: &Pubkey,
remaining_accounts: &mut RemainingAccounts,
) -> Self {
let merkle_context =
pack_merkle_context(&compressed_account.merkle_context, remaining_accounts);
// If no output Merkle tree was specified, use the one used for the
// input account.
let output_merkle_tree_index = remaining_accounts.insert_or_get(*output_merkle_tree);
Self {
lamports: Some(compressed_account.compressed_account.lamports),
address: compressed_account.compressed_account.address,
data: compressed_account
.compressed_account
.data
.as_ref()
.map(|data| data.data.clone()),
merkle_context: Some(merkle_context),
merkle_tree_root_index: Some(merkle_tree_root_index),
output_merkle_tree_index: Some(output_merkle_tree_index),
address_merkle_context: None,
address_merkle_tree_root_index: None,
read_only: false,
}
}
pub fn new_close(
compressed_account: &CompressedAccountWithMerkleContext,
merkle_tree_root_index: u16,
remaining_accounts: &mut RemainingAccounts,
) -> Self {
let merkle_context =
pack_merkle_context(&compressed_account.merkle_context, remaining_accounts);
Self {
lamports: Some(compressed_account.compressed_account.lamports),
address: compressed_account.compressed_account.address,
data: compressed_account
.compressed_account
.data
.as_ref()
.map(|data| data.data.clone()),
merkle_context: Some(merkle_context),
merkle_tree_root_index: Some(merkle_tree_root_index),
output_merkle_tree_index: None,
address_merkle_context: None,
address_merkle_tree_root_index: None,
read_only: false,
}
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/sdk
|
solana_public_repos/Lightprotocol/light-protocol/sdk/src/account_info.rs
|
use std::{cell::RefCell, rc::Rc};
use anchor_lang::prelude::Result;
use solana_program::pubkey::Pubkey;
use crate::{
account_meta::LightAccountMeta,
address::PackedNewAddressParams,
compressed_account::{
CompressedAccount, CompressedAccountData, OutputCompressedAccountWithPackedContext,
PackedCompressedAccountWithMerkleContext,
},
error::LightSdkError,
merkle_context::PackedMerkleContext,
};
/// Information about compressed account which is being initialized.
#[derive(Debug)]
pub struct LightInputAccountInfo<'a> {
/// Lamports.
pub lamports: Option<u64>,
/// Address.
pub address: Option<[u8; 32]>,
/// Account data.
pub data: Option<&'a [u8]>,
/// Data hash.
pub data_hash: Option<[u8; 32]>,
/// Merkle tree context.
pub merkle_context: PackedMerkleContext,
/// Root index.
pub root_index: u16,
}
/// Information about compressed account which is being mutated.
#[derive(Debug)]
pub struct LightAccountInfo<'a> {
/// Input account.
pub(crate) input: Option<LightInputAccountInfo<'a>>,
/// Owner of the account.
///
/// Defaults to the program ID.
pub owner: &'a Pubkey,
/// Lamports.
pub lamports: Option<u64>,
/// Discriminator.
pub discriminator: Option<[u8; 8]>,
/// Account data.
pub data: Option<Rc<RefCell<Vec<u8>>>>,
/// Data hash.
pub data_hash: Option<[u8; 32]>,
/// Address.
pub address: Option<[u8; 32]>,
/// New Merkle tree index. Set `None` for `close` account infos.
pub output_merkle_tree_index: Option<u8>,
/// New address parameters.
pub new_address_params: Option<PackedNewAddressParams>,
}
impl<'a> LightAccountInfo<'a> {
pub fn from_meta_init(
meta: &'a LightAccountMeta,
discriminator: [u8; 8],
new_address: [u8; 32],
new_address_seed: [u8; 32],
space: Option<usize>,
owner: &'a Pubkey,
) -> Result<Self> {
let address_merkle_context = meta
.address_merkle_context
.as_ref()
.ok_or(LightSdkError::ExpectedAddressMerkleContext)?;
let new_address_params = PackedNewAddressParams {
seed: new_address_seed,
address_queue_account_index: address_merkle_context.address_queue_pubkey_index,
address_merkle_tree_account_index: address_merkle_context
.address_merkle_tree_pubkey_index,
address_merkle_tree_root_index: meta
.address_merkle_tree_root_index
.ok_or(LightSdkError::ExpectedAddressRootIndex)?,
};
let data = match space {
Some(space) => Vec::with_capacity(space),
None => Vec::new(),
};
let data = Some(Rc::new(RefCell::new(data)));
let account_info = LightAccountInfo {
input: None,
owner,
// Needs to be assigned by the program.
lamports: None,
// Needs to be assigned by the program.
discriminator: Some(discriminator),
data,
// Needs to be assigned by the program.
data_hash: None,
address: Some(new_address),
output_merkle_tree_index: meta.output_merkle_tree_index,
new_address_params: Some(new_address_params),
};
Ok(account_info)
}
pub fn from_meta_mut(
meta: &'a LightAccountMeta,
discriminator: [u8; 8],
owner: &'a Pubkey,
) -> Result<Self> {
let input = LightInputAccountInfo {
lamports: meta.lamports,
address: meta.address,
data: meta.data.as_deref(),
// Needs to be assigned by the program.
data_hash: None,
merkle_context: meta
.merkle_context
.ok_or(LightSdkError::ExpectedMerkleContext)?,
root_index: meta
.merkle_tree_root_index
.ok_or(LightSdkError::ExpectedRootIndex)?,
};
let account_info = LightAccountInfo {
input: Some(input),
owner,
// Needs to be assigned by the program.
lamports: None,
// Needs to be assigned by the program.
discriminator: Some(discriminator),
// NOTE(vadorovsky): A `clone()` here is unavoidable.
// What we have here is an immutable reference to `LightAccountMeta`,
// from which we can take an immutable reference to `data`.
//
// - That immutable reference can be used in the input account,
// since we don't make modifications there.
// - In the most cases, we intend to make modifications for the
// output account. We make a copy, which then we try not to
// copy again until the moment of creating a CPI call.
//
// The reason why `solana_account_info::AccountInfo` stores data as
// `Rc<RefCell<&'a mut [u8]>>` is that the reference points to
// runtime's memory region which provides the accout and is mutable
// by design.
//
// In our case, compressed accounts are part of instruction data.
// Instruction data is immutable (`&[u8]`). There is no way to
// mutate instruction data without copy.
data: meta
.data
.as_ref()
.map(|data| Rc::new(RefCell::new(data.clone()))),
// Needs to be assigned by the program.
data_hash: None,
address: meta.address,
output_merkle_tree_index: meta.output_merkle_tree_index,
new_address_params: None,
};
Ok(account_info)
}
pub fn from_meta_close(
meta: &'a LightAccountMeta,
discriminator: [u8; 8],
owner: &'a Pubkey,
) -> Result<Self> {
let input = LightInputAccountInfo {
lamports: meta.lamports,
address: meta.address,
data: meta.data.as_deref(),
// Needs to be assigned by the program.
data_hash: None,
merkle_context: meta
.merkle_context
.ok_or(LightSdkError::ExpectedMerkleContext)?,
root_index: meta
.merkle_tree_root_index
.ok_or(LightSdkError::ExpectedRootIndex)?,
};
let account_info = LightAccountInfo {
input: Some(input),
owner,
// Needs to be assigned by the program.
lamports: None,
// Needs to be assigned by the program.
discriminator: Some(discriminator),
data: None,
// Needs to be assigned by the program.
data_hash: None,
address: meta.address,
output_merkle_tree_index: None,
new_address_params: None,
};
Ok(account_info)
}
pub(crate) fn from_meta_init_without_output_data(
meta: &'a LightAccountMeta,
discriminator: [u8; 8],
new_address: [u8; 32],
new_address_seed: [u8; 32],
owner: &'a Pubkey,
) -> Result<Self> {
let address_merkle_context = meta
.address_merkle_context
.as_ref()
.ok_or(LightSdkError::ExpectedAddressMerkleContext)?;
let new_address_params = PackedNewAddressParams {
seed: new_address_seed,
address_queue_account_index: address_merkle_context.address_queue_pubkey_index,
address_merkle_tree_account_index: address_merkle_context
.address_merkle_tree_pubkey_index,
address_merkle_tree_root_index: meta
.address_merkle_tree_root_index
.ok_or(LightSdkError::ExpectedAddressRootIndex)?,
};
let account_info = LightAccountInfo {
input: None,
owner,
// Needs to be assigned by the program.
lamports: None,
// Needs to be assigned by the program.
discriminator: Some(discriminator),
data: None,
data_hash: None,
address: Some(new_address),
output_merkle_tree_index: meta.output_merkle_tree_index,
new_address_params: Some(new_address_params),
};
Ok(account_info)
}
/// Converts [`LightAcccountMeta`], representing either a `mut` or `close`
/// account, to a `LightAccountInfo` without output data set.
///
/// Not intended for external use, intended for building upper abstraction
/// layers which handle data serialization on their own.
pub(crate) fn from_meta_without_output_data(
meta: &'a LightAccountMeta,
discriminator: [u8; 8],
owner: &'a Pubkey,
) -> Result<Self> {
let input = LightInputAccountInfo {
lamports: meta.lamports,
address: meta.address,
data: meta.data.as_deref(),
// Needs to be assigned by the program.
data_hash: None,
merkle_context: meta
.merkle_context
.ok_or(LightSdkError::ExpectedMerkleContext)?,
root_index: meta
.merkle_tree_root_index
.ok_or(LightSdkError::ExpectedRootIndex)?,
};
let account_info = LightAccountInfo {
input: Some(input),
owner,
// Needs to be assigned by the program.
lamports: None,
discriminator: Some(discriminator),
// Needs to be assigned by the program.
data: None,
data_hash: None,
address: meta.address,
output_merkle_tree_index: meta.output_merkle_tree_index,
new_address_params: None,
};
Ok(account_info)
}
pub fn compress_and_add_sol(&mut self, lamports: u64) {
self.lamports = Some(lamports);
}
/// Returns the original data sent by the client, before any potential
/// modifications made by the program.
pub fn initial_data(&self) -> Option<&[u8]> {
self.input.as_ref().and_then(|input| input.data)
}
/// Converts the given [LightAccountInfo] into a
/// [PackedCompressedAccountWithMerkleContext] which can be sent to the
/// light-system program.
pub fn input_compressed_account(
&self,
) -> Result<Option<PackedCompressedAccountWithMerkleContext>> {
match self.input.as_ref() {
Some(input) => {
let data = match input.data {
Some(_) => {
let discriminator = self
.discriminator
.ok_or(LightSdkError::ExpectedDiscriminator)?;
let data_hash = input.data_hash.ok_or(LightSdkError::ExpectedHash)?;
Some(CompressedAccountData {
discriminator,
data: Vec::new(),
data_hash,
})
}
None => None,
};
Ok(Some(PackedCompressedAccountWithMerkleContext {
compressed_account: CompressedAccount {
owner: *self.owner,
lamports: input.lamports.unwrap_or(0),
address: input.address,
data,
},
merkle_context: input.merkle_context,
root_index: input.root_index,
read_only: false,
}))
}
None => Ok(None),
}
}
pub fn output_compressed_account(
&self,
) -> Result<Option<OutputCompressedAccountWithPackedContext>> {
match self.output_merkle_tree_index {
Some(merkle_tree_index) => {
let data = match self.data {
Some(_) => {
let discriminator = self
.discriminator
.ok_or(LightSdkError::ExpectedDiscriminator)?;
let data_hash = self.data_hash.ok_or(LightSdkError::ExpectedHash)?;
Some(CompressedAccountData {
discriminator,
data: Vec::new(),
data_hash,
})
}
None => None,
};
Ok(Some(OutputCompressedAccountWithPackedContext {
compressed_account: CompressedAccount {
owner: *self.owner,
lamports: self.lamports.unwrap_or(0),
address: self.address,
data,
},
merkle_tree_index,
}))
}
None => Ok(None),
}
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/sdk
|
solana_public_repos/Lightprotocol/light-protocol/sdk/src/constants.rs
|
use light_macros::pubkey;
use solana_program::pubkey::Pubkey;
/// Seed of the CPI authority.
pub const CPI_AUTHORITY_PDA_SEED: &[u8] = b"cpi_authority";
/// ID of the account-compression program.
pub const PROGRAM_ID_ACCOUNT_COMPRESSION: Pubkey =
pubkey!("compr6CUsB5m2jS4Y3831ztGSTnDpnKJTKS95d64XVq");
pub const PROGRAM_ID_NOOP: Pubkey = pubkey!("noopb9bkMVfRPU8AsbpTUg8AQkHtKwMYZiFUjNRtMmV");
/// ID of the light-system program.
pub const PROGRAM_ID_LIGHT_SYSTEM: Pubkey = pubkey!("SySTEM1eSU2p4BGQfQpimFEWWSC1XDFeun3Nqzz3rT7");
/// ID of the light-compressed-token program.
pub const PROGRAM_ID_LIGHT_TOKEN: Pubkey = pubkey!("cTokenmWW8bLPjZEBAUgYy3zKxQZW6VKi7bqNFEVv3m");
pub const STATE_MERKLE_TREE_HEIGHT: usize = 26;
pub const STATE_MERKLE_TREE_CHANGELOG: usize = 1400;
pub const STATE_MERKLE_TREE_ROOTS: usize = 2400;
pub const STATE_MERKLE_TREE_CANOPY_DEPTH: usize = 10;
pub const ADDRESS_MERKLE_TREE_HEIGHT: usize = 26;
pub const ADDRESS_MERKLE_TREE_CHANGELOG: usize = 1400;
pub const ADDRESS_MERKLE_TREE_ROOTS: usize = 2400;
pub const ADDRESS_MERKLE_TREE_CANOPY_DEPTH: usize = 10;
pub const ADDRESS_MERKLE_TREE_INDEXED_CHANGELOG: usize = 1400;
pub const TOKEN_COMPRESSED_ACCOUNT_DISCRIMINATOR: [u8; 8] = [2, 0, 0, 0, 0, 0, 0, 0];
| 0
|
solana_public_repos/Lightprotocol/light-protocol/sdk
|
solana_public_repos/Lightprotocol/light-protocol/sdk/src/legacy.rs
|
#![cfg(feature = "legacy")]
//! Legacy types re-imported from programs which should be removed as soon as
//! possible.
pub use light_system_program::{
invoke::processor::CompressedProof,
sdk::{
compressed_account::{
CompressedAccount, CompressedAccountData, CompressedAccountWithMerkleContext,
PackedCompressedAccountWithMerkleContext, PackedMerkleContext, QueueIndex,
},
CompressedCpiContext,
},
InstructionDataInvokeCpi, NewAddressParams, NewAddressParamsPacked,
OutputCompressedAccountWithPackedContext,
};
/// Helper function to create data for creating a single PDA.
pub fn create_cpi_inputs_for_new_account(
proof: CompressedProof,
new_address_params: NewAddressParamsPacked,
compressed_pda: OutputCompressedAccountWithPackedContext,
cpi_context: Option<CompressedCpiContext>,
) -> InstructionDataInvokeCpi {
InstructionDataInvokeCpi {
proof: Some(proof),
new_address_params: vec![new_address_params],
relay_fee: None,
input_compressed_accounts_with_merkle_context: vec![],
output_compressed_accounts: vec![compressed_pda],
compress_or_decompress_lamports: None,
is_compress: false,
cpi_context,
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/sdk
|
solana_public_repos/Lightprotocol/light-protocol/sdk/src/error.rs
|
use anchor_lang::prelude::error_code;
#[error_code]
pub enum LightSdkError {
#[msg("Constraint violation")]
ConstraintViolation,
#[msg("Invalid light-system-program ID")]
InvalidLightSystemProgram,
#[msg("Expected accounts in the instruction")]
ExpectedAccounts,
#[msg("Expected address Merkle context to be provided")]
ExpectedAddressMerkleContext,
#[msg("Expected address root index to be provided")]
ExpectedAddressRootIndex,
#[msg("Accounts with a specified input are expected to have data")]
ExpectedData,
#[msg("Accounts with specified data are expected to have a discriminator")]
ExpectedDiscriminator,
#[msg("Accounts with specified data are expected to have a hash")]
ExpectedHash,
#[msg("`mut` and `close` accounts are expected to have a Merkle context")]
ExpectedMerkleContext,
#[msg("Expected root index to be provided")]
ExpectedRootIndex,
#[msg("Cannot transfer lamports from an account without input")]
TransferFromNoInput,
#[msg("Cannot transfer from an account without lamports")]
TransferFromNoLamports,
#[msg("Account, from which a transfer was attempted, has insufficient amount of lamports")]
TransferFromInsufficientLamports,
#[msg("Integer overflow resulting from too large resulting amount")]
TransferIntegerOverflow,
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/sdk
|
solana_public_repos/Lightprotocol/light-protocol/sdk/src/token.rs
|
use anchor_lang::{AnchorDeserialize, AnchorSerialize};
use solana_program::pubkey::Pubkey;
use crate::compressed_account::CompressedAccountWithMerkleContext;
#[derive(Clone, Copy, Debug, PartialEq, Eq, AnchorDeserialize, AnchorSerialize)]
#[repr(u8)]
pub enum AccountState {
Initialized,
Frozen,
}
#[derive(Debug, PartialEq, Eq, AnchorDeserialize, AnchorSerialize, Clone)]
pub struct TokenData {
/// The mint associated with this account
pub mint: Pubkey,
/// The owner of this account.
pub owner: Pubkey,
/// The amount of tokens this account holds.
pub amount: u64,
/// If `delegate` is `Some` then `delegated_amount` represents
/// the amount authorized by the delegate
pub delegate: Option<Pubkey>,
/// The account's state
pub state: AccountState,
/// Placeholder for TokenExtension tlv data (unimplemented)
pub tlv: Option<Vec<u8>>,
}
#[derive(Debug, Clone)]
pub struct TokenDataWithMerkleContext {
pub token_data: TokenData,
pub compressed_account: CompressedAccountWithMerkleContext,
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/sdk
|
solana_public_repos/Lightprotocol/light-protocol/sdk/src/lib.rs
|
pub use light_macros::*;
pub use light_sdk_macros::*;
pub mod account;
pub mod account_info;
pub mod account_meta;
pub mod address;
pub mod compressed_account;
pub mod constants;
pub use constants::*;
pub mod context;
pub mod error;
pub mod event;
pub mod instruction_data;
pub mod legacy;
pub mod merkle_context;
pub mod program_merkle_context;
pub mod proof;
pub mod state;
pub mod token;
pub mod traits;
pub mod transfer;
pub mod utils;
pub mod verify;
| 0
|
solana_public_repos/Lightprotocol/light-protocol/sdk
|
solana_public_repos/Lightprotocol/light-protocol/sdk/src/merkle_context.rs
|
use std::collections::HashMap;
use anchor_lang::prelude::{AccountMeta, AnchorDeserialize, AnchorSerialize, Pubkey};
/// Collection of remaining accounts which are sent to the program.
#[derive(Default)]
pub struct RemainingAccounts {
next_index: u8,
map: HashMap<Pubkey, u8>,
}
impl RemainingAccounts {
/// Returns the index of the provided `pubkey` in the collection.
///
/// If the provided `pubkey` is not a part of the collection, it gets
/// inserted with a `next_index`.
///
/// If the privided `pubkey` already exists in the collection, its already
/// existing index is returned.
pub fn insert_or_get(&mut self, pubkey: Pubkey) -> u8 {
*self.map.entry(pubkey).or_insert_with(|| {
let index = self.next_index;
self.next_index += 1;
index
})
}
/// Converts the collection of accounts to a vector of
/// [`AccountMeta`](solana_sdk::instruction::AccountMeta), which can be used
/// as remaining accounts in instructions or CPI calls.
pub fn to_account_metas(&self) -> Vec<AccountMeta> {
let mut remaining_accounts = self
.map
.iter()
.map(|(k, i)| {
(
AccountMeta {
pubkey: *k,
is_signer: false,
is_writable: true,
},
*i as usize,
)
})
.collect::<Vec<(AccountMeta, usize)>>();
// hash maps are not sorted so we need to sort manually and collect into a vector again
remaining_accounts.sort_by(|a, b| a.1.cmp(&b.1));
let remaining_accounts = remaining_accounts
.iter()
.map(|(k, _)| k.clone())
.collect::<Vec<AccountMeta>>();
remaining_accounts
}
}
#[derive(Debug, Clone, Copy, AnchorSerialize, AnchorDeserialize, PartialEq, Default)]
pub struct QueueIndex {
/// Id of queue in queue account.
pub queue_id: u8,
/// Index of compressed account hash in queue.
pub index: u16,
}
#[derive(Debug, Clone, Copy, AnchorSerialize, AnchorDeserialize, PartialEq, Default)]
pub struct MerkleContext {
pub merkle_tree_pubkey: Pubkey,
pub nullifier_queue_pubkey: Pubkey,
pub leaf_index: u32,
/// Index of leaf in queue. Placeholder of batched Merkle tree updates
/// currently unimplemented.
pub queue_index: Option<QueueIndex>,
}
#[derive(Debug, Clone, Copy, AnchorSerialize, AnchorDeserialize, PartialEq, Default)]
pub struct PackedMerkleContext {
pub merkle_tree_pubkey_index: u8,
pub nullifier_queue_pubkey_index: u8,
pub leaf_index: u32,
/// Index of leaf in queue. Placeholder of batched Merkle tree updates
/// currently unimplemented.
pub queue_index: Option<QueueIndex>,
}
pub fn pack_merkle_contexts<'a, I>(
merkle_contexts: I,
remaining_accounts: &'a mut RemainingAccounts,
) -> impl Iterator<Item = PackedMerkleContext> + 'a
where
I: Iterator<Item = &'a MerkleContext> + 'a,
{
merkle_contexts.map(|x| pack_merkle_context(x, remaining_accounts))
}
pub fn pack_merkle_context(
merkle_context: &MerkleContext,
remaining_accounts: &mut RemainingAccounts,
) -> PackedMerkleContext {
let MerkleContext {
merkle_tree_pubkey,
nullifier_queue_pubkey,
leaf_index,
queue_index,
} = merkle_context;
let merkle_tree_pubkey_index = remaining_accounts.insert_or_get(*merkle_tree_pubkey);
let nullifier_queue_pubkey_index = remaining_accounts.insert_or_get(*nullifier_queue_pubkey);
PackedMerkleContext {
merkle_tree_pubkey_index,
nullifier_queue_pubkey_index,
leaf_index: *leaf_index,
queue_index: *queue_index,
}
}
#[derive(Debug, Clone, Copy, AnchorSerialize, AnchorDeserialize, PartialEq, Default)]
pub struct AddressMerkleContext {
pub address_merkle_tree_pubkey: Pubkey,
pub address_queue_pubkey: Pubkey,
}
#[derive(Debug, Clone, Copy, AnchorSerialize, AnchorDeserialize, PartialEq, Default)]
pub struct PackedAddressMerkleContext {
pub address_merkle_tree_pubkey_index: u8,
pub address_queue_pubkey_index: u8,
}
/// Returns an iterator of [`PackedAddressMerkleContext`] and fills up
/// `remaining_accounts` based on the given `merkle_contexts`.
pub fn pack_address_merkle_contexts<'a, I>(
address_merkle_contexts: I,
remaining_accounts: &'a mut RemainingAccounts,
) -> impl Iterator<Item = PackedAddressMerkleContext> + 'a
where
I: Iterator<Item = &'a AddressMerkleContext> + 'a,
{
address_merkle_contexts.map(|x| pack_address_merkle_context(x, remaining_accounts))
}
/// Returns a [`PackedAddressMerkleContext`] and fills up `remaining_accounts`
/// based on the given `merkle_context`.
pub fn pack_address_merkle_context(
address_merkle_context: &AddressMerkleContext,
remaining_accounts: &mut RemainingAccounts,
) -> PackedAddressMerkleContext {
let AddressMerkleContext {
address_merkle_tree_pubkey,
address_queue_pubkey,
} = address_merkle_context;
let address_merkle_tree_pubkey_index =
remaining_accounts.insert_or_get(*address_merkle_tree_pubkey);
let address_queue_pubkey_index = remaining_accounts.insert_or_get(*address_queue_pubkey);
PackedAddressMerkleContext {
address_merkle_tree_pubkey_index,
address_queue_pubkey_index,
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_remaining_accounts() {
let mut remaining_accounts = RemainingAccounts::default();
let pubkey_1 = Pubkey::new_unique();
let pubkey_2 = Pubkey::new_unique();
let pubkey_3 = Pubkey::new_unique();
let pubkey_4 = Pubkey::new_unique();
// Initial insertion.
assert_eq!(remaining_accounts.insert_or_get(pubkey_1), 0);
assert_eq!(remaining_accounts.insert_or_get(pubkey_2), 1);
assert_eq!(remaining_accounts.insert_or_get(pubkey_3), 2);
assert_eq!(
remaining_accounts.to_account_metas().as_slice(),
&[
AccountMeta {
pubkey: pubkey_1,
is_signer: false,
is_writable: true,
},
AccountMeta {
pubkey: pubkey_2,
is_signer: false,
is_writable: true,
},
AccountMeta {
pubkey: pubkey_3,
is_signer: false,
is_writable: true,
}
]
);
// Insertion of already existing pubkeys.
assert_eq!(remaining_accounts.insert_or_get(pubkey_1), 0);
assert_eq!(remaining_accounts.insert_or_get(pubkey_2), 1);
assert_eq!(remaining_accounts.insert_or_get(pubkey_3), 2);
assert_eq!(
remaining_accounts.to_account_metas().as_slice(),
&[
AccountMeta {
pubkey: pubkey_1,
is_signer: false,
is_writable: true,
},
AccountMeta {
pubkey: pubkey_2,
is_signer: false,
is_writable: true,
},
AccountMeta {
pubkey: pubkey_3,
is_signer: false,
is_writable: true,
}
]
);
// Again, initial insertion.
assert_eq!(remaining_accounts.insert_or_get(pubkey_4), 3);
assert_eq!(
remaining_accounts.to_account_metas().as_slice(),
&[
AccountMeta {
pubkey: pubkey_1,
is_signer: false,
is_writable: true,
},
AccountMeta {
pubkey: pubkey_2,
is_signer: false,
is_writable: true,
},
AccountMeta {
pubkey: pubkey_3,
is_signer: false,
is_writable: true,
},
AccountMeta {
pubkey: pubkey_4,
is_signer: false,
is_writable: true,
}
]
);
}
#[test]
fn test_pack_merkle_context() {
let mut remaining_accounts = RemainingAccounts::default();
let merkle_tree_pubkey = Pubkey::new_unique();
let nullifier_queue_pubkey = Pubkey::new_unique();
let merkle_context = MerkleContext {
merkle_tree_pubkey,
nullifier_queue_pubkey,
leaf_index: 69,
queue_index: None,
};
let packed_merkle_context = pack_merkle_context(&merkle_context, &mut remaining_accounts);
assert_eq!(
packed_merkle_context,
PackedMerkleContext {
merkle_tree_pubkey_index: 0,
nullifier_queue_pubkey_index: 1,
leaf_index: 69,
queue_index: None,
}
)
}
#[test]
fn test_pack_merkle_contexts() {
let mut remaining_accounts = RemainingAccounts::default();
let merkle_contexts = &[
MerkleContext {
merkle_tree_pubkey: Pubkey::new_unique(),
nullifier_queue_pubkey: Pubkey::new_unique(),
leaf_index: 10,
queue_index: None,
},
MerkleContext {
merkle_tree_pubkey: Pubkey::new_unique(),
nullifier_queue_pubkey: Pubkey::new_unique(),
leaf_index: 11,
queue_index: Some(QueueIndex {
queue_id: 69,
index: 420,
}),
},
MerkleContext {
merkle_tree_pubkey: Pubkey::new_unique(),
nullifier_queue_pubkey: Pubkey::new_unique(),
leaf_index: 12,
queue_index: None,
},
];
let packed_merkle_contexts =
pack_merkle_contexts(merkle_contexts.iter(), &mut remaining_accounts);
assert_eq!(
packed_merkle_contexts.collect::<Vec<_>>(),
&[
PackedMerkleContext {
merkle_tree_pubkey_index: 0,
nullifier_queue_pubkey_index: 1,
leaf_index: 10,
queue_index: None
},
PackedMerkleContext {
merkle_tree_pubkey_index: 2,
nullifier_queue_pubkey_index: 3,
leaf_index: 11,
queue_index: Some(QueueIndex {
queue_id: 69,
index: 420
})
},
PackedMerkleContext {
merkle_tree_pubkey_index: 4,
nullifier_queue_pubkey_index: 5,
leaf_index: 12,
queue_index: None,
}
]
);
}
#[test]
fn test_pack_address_merkle_context() {
let mut remaining_accounts = RemainingAccounts::default();
let address_merkle_context = AddressMerkleContext {
address_merkle_tree_pubkey: Pubkey::new_unique(),
address_queue_pubkey: Pubkey::new_unique(),
};
let packed_address_merkle_context =
pack_address_merkle_context(&address_merkle_context, &mut remaining_accounts);
assert_eq!(
packed_address_merkle_context,
PackedAddressMerkleContext {
address_merkle_tree_pubkey_index: 0,
address_queue_pubkey_index: 1,
}
)
}
#[test]
fn test_pack_address_merkle_contexts() {
let mut remaining_accounts = RemainingAccounts::default();
let address_merkle_contexts = &[
AddressMerkleContext {
address_merkle_tree_pubkey: Pubkey::new_unique(),
address_queue_pubkey: Pubkey::new_unique(),
},
AddressMerkleContext {
address_merkle_tree_pubkey: Pubkey::new_unique(),
address_queue_pubkey: Pubkey::new_unique(),
},
AddressMerkleContext {
address_merkle_tree_pubkey: Pubkey::new_unique(),
address_queue_pubkey: Pubkey::new_unique(),
},
];
let packed_address_merkle_contexts =
pack_address_merkle_contexts(address_merkle_contexts.iter(), &mut remaining_accounts);
assert_eq!(
packed_address_merkle_contexts.collect::<Vec<_>>(),
&[
PackedAddressMerkleContext {
address_merkle_tree_pubkey_index: 0,
address_queue_pubkey_index: 1,
},
PackedAddressMerkleContext {
address_merkle_tree_pubkey_index: 2,
address_queue_pubkey_index: 3,
},
PackedAddressMerkleContext {
address_merkle_tree_pubkey_index: 4,
address_queue_pubkey_index: 5,
}
]
);
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/sdk
|
solana_public_repos/Lightprotocol/light-protocol/sdk/src/address.rs
|
use anchor_lang::{solana_program::pubkey::Pubkey, AnchorDeserialize, AnchorSerialize};
use light_utils::{hash_to_bn254_field_size_be, hashv_to_bn254_field_size_be};
use solana_program::account_info::AccountInfo;
use crate::merkle_context::{AddressMerkleContext, RemainingAccounts};
#[derive(Debug, PartialEq, Default, Clone, AnchorDeserialize, AnchorSerialize)]
pub struct NewAddressParams {
pub seed: [u8; 32],
pub address_queue_pubkey: Pubkey,
pub address_merkle_tree_pubkey: Pubkey,
pub address_merkle_tree_root_index: u16,
}
#[derive(Debug, PartialEq, Default, Clone, Copy, AnchorDeserialize, AnchorSerialize)]
pub struct PackedNewAddressParams {
pub seed: [u8; 32],
pub address_queue_account_index: u8,
pub address_merkle_tree_account_index: u8,
pub address_merkle_tree_root_index: u16,
}
pub struct AddressWithMerkleContext {
pub address: [u8; 32],
pub address_merkle_context: AddressMerkleContext,
}
pub fn pack_new_addresses_params(
addresses_params: &[NewAddressParams],
remaining_accounts: &mut RemainingAccounts,
) -> Vec<PackedNewAddressParams> {
addresses_params
.iter()
.map(|x| {
let address_queue_account_index =
remaining_accounts.insert_or_get(x.address_queue_pubkey);
let address_merkle_tree_account_index =
remaining_accounts.insert_or_get(x.address_merkle_tree_pubkey);
PackedNewAddressParams {
seed: x.seed,
address_queue_account_index,
address_merkle_tree_account_index,
address_merkle_tree_root_index: x.address_merkle_tree_root_index,
}
})
.collect::<Vec<_>>()
}
pub fn pack_new_address_params(
address_params: NewAddressParams,
remaining_accounts: &mut RemainingAccounts,
) -> PackedNewAddressParams {
pack_new_addresses_params(&[address_params], remaining_accounts)[0]
}
pub fn unpack_new_address_params(
address_params: &PackedNewAddressParams,
remaining_accounts: &[AccountInfo],
) -> NewAddressParams {
let address_merkle_tree_pubkey =
remaining_accounts[address_params.address_merkle_tree_account_index as usize].key;
let address_queue_pubkey =
remaining_accounts[address_params.address_queue_account_index as usize].key;
NewAddressParams {
seed: address_params.seed,
address_queue_pubkey: *address_queue_pubkey,
address_merkle_tree_pubkey: *address_merkle_tree_pubkey,
address_merkle_tree_root_index: address_params.address_merkle_tree_root_index,
}
}
/// Derives a single address seed for a compressed account, based on the
/// provided multiple `seeds`, `program_id` and `merkle_tree_pubkey`.
///
/// # Examples
///
/// ```ignore
/// use light_sdk::{address::derive_address, pubkey};
///
/// let address = derive_address(
/// &[b"my_compressed_account"],
/// &crate::ID,
/// );
/// ```
pub(crate) fn derive_address_seed(seeds: &[&[u8]], program_id: &Pubkey) -> [u8; 32] {
let mut inputs = Vec::with_capacity(seeds.len() + 1);
let program_id = program_id.to_bytes();
inputs.push(program_id.as_slice());
inputs.extend(seeds);
let seed = hashv_to_bn254_field_size_be(inputs.as_slice());
seed
}
/// Derives an address for a compressed account, based on the provided singular
/// `seed` and `address_merkle_context`:
pub(crate) fn derive_address_from_seed(
address_seed: &[u8; 32],
address_merkle_context: &AddressMerkleContext,
) -> [u8; 32] {
let merkle_tree_pubkey = address_merkle_context.address_merkle_tree_pubkey.to_bytes();
let input = [merkle_tree_pubkey, *address_seed].concat();
// PANICS: Not being able to find the bump for truncating the hash is
// practically impossible. Quite frankly, we should just remove that error
// inside.
hash_to_bn254_field_size_be(input.as_slice()).unwrap().0
}
/// Derives an address from provided seeds. Returns that address and a singular
/// seed.
///
/// # Examples
///
/// ```ignore
/// use light_sdk::{address::derive_address, pubkey};
///
/// let address_merkle_context = {
/// address_merkle_tree_pubkey: pubkey!("amt1Ayt45jfbdw5YSo7iz6WZxUmnZsQTYXy82hVwyC2"),
/// address_queue_pubkey: pubkey!("aq1S9z4reTSQAdgWHGD2zDaS39sjGrAxbR31vxJ2F4F"),
/// };
/// let address = derive_address(
/// &[b"my_compressed_account"],
/// &address_merkle_context,
/// &crate::ID,
/// );
/// ```
pub fn derive_address(
seeds: &[&[u8]],
address_merkle_context: &AddressMerkleContext,
program_id: &Pubkey,
) -> ([u8; 32], [u8; 32]) {
let address_seed = derive_address_seed(seeds, program_id);
let address = derive_address_from_seed(&address_seed, address_merkle_context);
(address, address_seed)
}
/// Derives an address from provided parameters.
pub fn derive_address_from_params(params: NewAddressParams) -> [u8; 32] {
let NewAddressParams {
seed,
address_merkle_tree_pubkey,
..
} = params;
let input = [address_merkle_tree_pubkey.to_bytes(), seed].concat();
// PANICS: Not being able to find the bump for truncating the hash is
// practically impossible. Quite frankly, we should just remove that error
// inside.
hash_to_bn254_field_size_be(input.as_slice()).unwrap().0
}
#[cfg(test)]
mod test {
use light_macros::pubkey;
use super::*;
#[test]
fn test_derive_address_seed() {
let program_id = pubkey!("7yucc7fL3JGbyMwg4neUaenNSdySS39hbAk89Ao3t1Hz");
let address_seed = derive_address_seed(&[b"foo", b"bar"], &program_id);
assert_eq!(
address_seed,
[
0, 246, 150, 3, 192, 95, 53, 123, 56, 139, 206, 179, 253, 133, 115, 103, 120, 155,
251, 72, 250, 47, 117, 217, 118, 59, 174, 207, 49, 101, 201, 110
]
);
let address_seed = derive_address_seed(&[b"ayy", b"lmao"], &program_id);
assert_eq!(
address_seed,
[
0, 202, 44, 25, 221, 74, 144, 92, 69, 168, 38, 19, 206, 208, 29, 162, 53, 27, 120,
214, 152, 116, 15, 107, 212, 168, 33, 121, 187, 10, 76, 233
]
);
}
#[test]
fn test_derive_address() {
let address_merkle_context = AddressMerkleContext {
address_merkle_tree_pubkey: pubkey!("11111111111111111111111111111111"),
address_queue_pubkey: pubkey!("22222222222222222222222222222222222222222222"),
};
let program_id = pubkey!("7yucc7fL3JGbyMwg4neUaenNSdySS39hbAk89Ao3t1Hz");
let seeds: &[&[u8]] = &[b"foo", b"bar"];
let expected_address_seed = [
0, 246, 150, 3, 192, 95, 53, 123, 56, 139, 206, 179, 253, 133, 115, 103, 120, 155, 251,
72, 250, 47, 117, 217, 118, 59, 174, 207, 49, 101, 201, 110,
];
let expected_address = pubkey!("139uhyyBtEh4e1CBDJ68ooK5nCeWoncZf9HPyAfRrukA");
let address_seed = derive_address_seed(seeds, &program_id);
assert_eq!(address_seed, expected_address_seed);
let address = derive_address_from_seed(&address_seed, &address_merkle_context);
assert_eq!(address, expected_address.to_bytes());
let (address, address_seed) = derive_address(seeds, &address_merkle_context, &program_id);
assert_eq!(address_seed, expected_address_seed);
assert_eq!(address, expected_address.to_bytes());
let seeds: &[&[u8]] = &[b"ayy", b"lmao"];
let expected_address_seed = [
0, 202, 44, 25, 221, 74, 144, 92, 69, 168, 38, 19, 206, 208, 29, 162, 53, 27, 120, 214,
152, 116, 15, 107, 212, 168, 33, 121, 187, 10, 76, 233,
];
let expected_address = pubkey!("12bhHm6PQjbNmEn3Yu1Gq9k7XwVn2rZpzYokmLwbFazN");
let address_seed = derive_address_seed(seeds, &program_id);
assert_eq!(address_seed, expected_address_seed);
let address = derive_address_from_seed(&address_seed, &address_merkle_context);
assert_eq!(address, expected_address.to_bytes());
let (address, address_seed) = derive_address(seeds, &address_merkle_context, &program_id);
assert_eq!(address_seed, expected_address_seed);
assert_eq!(address, expected_address.to_bytes());
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/sdk
|
solana_public_repos/Lightprotocol/light-protocol/sdk/src/instruction_data.rs
|
use std::io::{self, Cursor};
use borsh::{BorshDeserialize, BorshSerialize};
use crate::{account_meta::LightAccountMeta, proof::ProofRpcResult};
pub struct LightInstructionData {
pub proof: Option<ProofRpcResult>,
pub accounts: Option<Vec<LightAccountMeta>>,
}
impl LightInstructionData {
pub fn deserialize(bytes: &[u8]) -> Result<Self, io::Error> {
let mut inputs = Cursor::new(bytes);
let proof = Option::<ProofRpcResult>::deserialize_reader(&mut inputs)?;
let accounts = Option::<Vec<LightAccountMeta>>::deserialize_reader(&mut inputs)?;
Ok(LightInstructionData { proof, accounts })
}
pub fn serialize(&self) -> Result<Vec<u8>, io::Error> {
let mut bytes = Vec::new();
self.proof.serialize(&mut bytes)?;
self.accounts.serialize(&mut bytes)?;
Ok(bytes)
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/sdk
|
solana_public_repos/Lightprotocol/light-protocol/sdk/src/event.rs
|
use anchor_lang::{AnchorDeserialize, AnchorSerialize};
use solana_program::pubkey::Pubkey;
use crate::compressed_account::OutputCompressedAccountWithPackedContext;
#[derive(Debug, Clone, AnchorDeserialize, AnchorSerialize, Default, PartialEq)]
pub struct MerkleTreeSequenceNumber {
pub pubkey: Pubkey,
pub seq: u64,
}
#[derive(Debug, Clone, AnchorDeserialize, AnchorSerialize, Default, PartialEq)]
pub struct PublicTransactionEvent {
pub input_compressed_account_hashes: Vec<[u8; 32]>,
pub output_compressed_account_hashes: Vec<[u8; 32]>,
pub output_compressed_accounts: Vec<OutputCompressedAccountWithPackedContext>,
pub output_leaf_indices: Vec<u32>,
pub sequence_numbers: Vec<MerkleTreeSequenceNumber>,
pub relay_fee: Option<u64>,
pub is_compress: bool,
pub compress_or_decompress_lamports: Option<u64>,
pub pubkey_array: Vec<Pubkey>,
pub message: Option<Vec<u8>>,
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/sdk
|
solana_public_repos/Lightprotocol/light-protocol/sdk/src/program_merkle_context.rs
|
use anchor_lang::{prelude::AccountInfo, Key};
use crate::merkle_context::{AddressMerkleContext, PackedAddressMerkleContext};
pub fn pack_address_merkle_contexts(
address_merkle_contexts: &[AddressMerkleContext],
remaining_accounts: &[AccountInfo],
) -> Vec<PackedAddressMerkleContext> {
address_merkle_contexts
.iter()
.map(|x| {
let address_merkle_tree_pubkey_index = remaining_accounts
.iter()
.position(|account| account.key() == x.address_merkle_tree_pubkey)
.unwrap() as u8;
let address_queue_pubkey_index = remaining_accounts
.iter()
.position(|account| account.key() == x.address_queue_pubkey)
.unwrap() as u8;
PackedAddressMerkleContext {
address_merkle_tree_pubkey_index,
address_queue_pubkey_index,
}
})
.collect::<Vec<_>>()
}
pub fn pack_address_merkle_context(
address_merkle_context: AddressMerkleContext,
remaining_accounts: &[AccountInfo],
) -> PackedAddressMerkleContext {
pack_address_merkle_contexts(&[address_merkle_context], remaining_accounts)[0]
}
pub fn unpack_address_merkle_contexts(
address_merkle_contexts: &[PackedAddressMerkleContext],
remaining_accounts: &[AccountInfo],
) -> Vec<AddressMerkleContext> {
address_merkle_contexts
.iter()
.map(|x| {
let address_merkle_tree_pubkey =
remaining_accounts[x.address_merkle_tree_pubkey_index as usize].key();
let address_queue_pubkey =
remaining_accounts[x.address_queue_pubkey_index as usize].key();
AddressMerkleContext {
address_merkle_tree_pubkey,
address_queue_pubkey,
}
})
.collect::<Vec<_>>()
}
pub fn unpack_address_merkle_context(
address_merkle_context: PackedAddressMerkleContext,
remaining_accounts: &[AccountInfo],
) -> AddressMerkleContext {
unpack_address_merkle_contexts(&[address_merkle_context], remaining_accounts)[0]
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/sdk
|
solana_public_repos/Lightprotocol/light-protocol/sdk/src/account.rs
|
use std::ops::{Deref, DerefMut};
use anchor_lang::prelude::{AnchorDeserialize, AnchorSerialize, Result};
use light_hasher::{DataHasher, Discriminator, Poseidon};
use solana_program::{program_error::ProgramError, pubkey::Pubkey};
use crate::{
account_info::LightAccountInfo,
account_meta::LightAccountMeta,
address::PackedNewAddressParams,
compressed_account::{
CompressedAccount, CompressedAccountData, OutputCompressedAccountWithPackedContext,
PackedCompressedAccountWithMerkleContext,
},
error::LightSdkError,
};
pub trait LightAccounts<'a>: Sized {
fn try_light_accounts(accounts: &'a [LightAccountInfo]) -> Result<Self>;
}
// TODO(vadorovsky): Implment `LightAccountLoader`.
/// A wrapper which abstracts away the UTXO model.
pub struct LightAccount<'info, T>
where
T: AnchorDeserialize + AnchorSerialize + Clone + DataHasher + Default + Discriminator,
{
/// State of the output account which can be modified by the developer in
/// the program code.
account_state: T,
/// Account information.
account_info: LightAccountInfo<'info>,
}
impl<'info, T> LightAccount<'info, T>
where
T: AnchorDeserialize + AnchorSerialize + Clone + DataHasher + Default + Discriminator,
{
pub fn from_meta_init(
meta: &'info LightAccountMeta,
discriminator: [u8; 8],
new_address: [u8; 32],
new_address_seed: [u8; 32],
owner: &'info Pubkey,
) -> Result<Self> {
let account_state = T::default();
let account_info = LightAccountInfo::from_meta_init_without_output_data(
meta,
discriminator,
new_address,
new_address_seed,
owner,
)?;
Ok(Self {
account_state,
account_info,
})
}
pub fn from_meta_mut(
meta: &'info LightAccountMeta,
discriminator: [u8; 8],
owner: &'info Pubkey,
) -> Result<Self> {
let mut account_info =
LightAccountInfo::from_meta_without_output_data(meta, discriminator, owner)?;
let account_state = T::try_from_slice(
meta.data
.as_ref()
.ok_or(LightSdkError::ExpectedData)?
.as_slice(),
)?;
let input_hash = account_state
.hash::<Poseidon>()
.map_err(ProgramError::from)?;
// Set the input account hash.
//
// PANICS: At this point we are sure `input` is `Some`
account_info.input.as_mut().unwrap().data_hash = Some(input_hash);
Ok(Self {
account_state,
account_info,
})
}
pub fn from_meta_close(
meta: &'info LightAccountMeta,
discriminator: [u8; 8],
owner: &'info Pubkey,
) -> Result<Self> {
let mut account_info =
LightAccountInfo::from_meta_without_output_data(meta, discriminator, owner)?;
let account_state = T::try_from_slice(
meta.data
.as_ref()
.ok_or(LightSdkError::ExpectedData)?
.as_slice(),
)?;
let input_hash = account_state
.hash::<Poseidon>()
.map_err(ProgramError::from)?;
// Set the input account hash.
//
// PANICS: At this point we are sure `input` is `Some`
account_info.input.as_mut().unwrap().data_hash = Some(input_hash);
Ok(Self {
account_state,
account_info,
})
}
pub fn new_address_params(&self) -> Option<PackedNewAddressParams> {
self.account_info.new_address_params
}
pub fn input_compressed_account(
&self,
) -> Result<Option<PackedCompressedAccountWithMerkleContext>> {
self.account_info.input_compressed_account()
}
pub fn output_compressed_account(
&self,
) -> Result<Option<OutputCompressedAccountWithPackedContext>> {
match self.account_info.output_merkle_tree_index {
Some(merkle_tree_index) => {
let data = {
let discriminator = T::discriminator();
let data_hash = self
.account_state
.hash::<Poseidon>()
.map_err(ProgramError::from)?;
Some(CompressedAccountData {
discriminator,
data: self.account_state.try_to_vec()?,
data_hash,
})
};
Ok(Some(OutputCompressedAccountWithPackedContext {
compressed_account: CompressedAccount {
owner: *self.account_info.owner,
lamports: self.account_info.lamports.unwrap_or(0),
address: self.account_info.address,
data,
},
merkle_tree_index,
}))
}
None => Ok(None),
}
}
}
impl<'a, T> Deref for LightAccount<'a, T>
where
T: AnchorDeserialize + AnchorSerialize + Clone + DataHasher + Default + Discriminator,
{
type Target = T;
fn deref(&self) -> &Self::Target {
&self.account_state
}
}
impl<'a, T> DerefMut for LightAccount<'a, T>
where
T: AnchorDeserialize + AnchorSerialize + Clone + DataHasher + Default + Discriminator,
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.account_state
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/sdk
|
solana_public_repos/Lightprotocol/light-protocol/sdk/src/state.rs
|
use anchor_lang::{AnchorDeserialize, AnchorSerialize};
use solana_program::pubkey::Pubkey;
#[derive(AnchorDeserialize, AnchorSerialize, Debug, PartialEq, Default)]
pub struct MerkleTreeMetadata {
pub access_metadata: AccessMetadata,
pub rollover_metadata: RolloverMetadata,
// Queue associated with this Merkle tree.
pub associated_queue: Pubkey,
// Next Merkle tree to be used after rollover.
pub next_merkle_tree: Pubkey,
}
#[derive(AnchorDeserialize, AnchorSerialize, Debug, PartialEq, Default)]
pub struct AccessMetadata {
/// Owner of the Merkle tree.
pub owner: Pubkey,
/// Program owner of the Merkle tree. This will be used for program owned Merkle trees.
pub program_owner: Pubkey,
/// Optional privileged forester pubkey, can be set for custom Merkle trees
/// without a network fee. Merkle trees without network fees are not
/// forested by light foresters. The variable is not used in the account
/// compression program but the registry program. The registry program
/// implements access control to prevent contention during forester. The
/// forester pubkey specified in this struct can bypass contention checks.
pub forester: Pubkey,
}
#[derive(AnchorDeserialize, AnchorSerialize, Debug, PartialEq, Default)]
pub struct RolloverMetadata {
/// Unique index.
pub index: u64,
/// This fee is used for rent for the next account.
/// It accumulates in the account so that once the corresponding Merkle tree account is full it can be rolled over
pub rollover_fee: u64,
/// The threshold in percentage points when the account should be rolled over (95 corresponds to 95% filled).
pub rollover_threshold: u64,
/// Tip for maintaining the account.
pub network_fee: u64,
/// The slot when the account was rolled over, a rolled over account should not be written to.
pub rolledover_slot: u64,
/// If current slot is greater than rolledover_slot + close_threshold and
/// the account is empty it can be closed. No 'close' functionality has been
/// implemented yet.
pub close_threshold: u64,
/// Placeholder for bytes of additional accounts which are tied to the
/// Merkle trees operation and need to be rolled over as well.
pub additional_bytes: u64,
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/sdk
|
solana_public_repos/Lightprotocol/light-protocol/sdk/src/verify.rs
|
use anchor_lang::{prelude::*, Bumps};
use light_hasher::{DataHasher, Discriminator};
use solana_program::{instruction::Instruction, program::invoke_signed};
use crate::{
account::LightAccount,
address::PackedNewAddressParams,
compressed_account::{
OutputCompressedAccountWithPackedContext, PackedCompressedAccountWithMerkleContext,
},
error::LightSdkError,
proof::{CompressedProof, ProofRpcResult},
traits::{
InvokeAccounts, InvokeCpiAccounts, InvokeCpiContextAccount, LightSystemAccount,
SignerAccounts,
},
CPI_AUTHORITY_PDA_SEED, PROGRAM_ID_LIGHT_SYSTEM,
};
pub fn find_cpi_signer(program_id: &Pubkey) -> Pubkey {
Pubkey::find_program_address([CPI_AUTHORITY_PDA_SEED].as_slice(), program_id).0
}
#[derive(AnchorSerialize, AnchorDeserialize, Debug, Clone, Copy, PartialEq, Eq, Default)]
pub struct CompressedCpiContext {
/// Is set by the program that is invoking the CPI to signal that is should
/// set the cpi context.
pub set_context: bool,
/// Is set to wipe the cpi context since someone could have set it before
/// with unrelated data.
pub first_set_context: bool,
/// Index of cpi context account in remaining accounts.
pub cpi_context_account_index: u8,
}
#[derive(Debug, PartialEq, Default, Clone, AnchorDeserialize, AnchorSerialize)]
pub struct InstructionDataInvokeCpi {
pub proof: Option<CompressedProof>,
pub new_address_params: Vec<PackedNewAddressParams>,
pub input_compressed_accounts_with_merkle_context:
Vec<PackedCompressedAccountWithMerkleContext>,
pub output_compressed_accounts: Vec<OutputCompressedAccountWithPackedContext>,
pub relay_fee: Option<u64>,
pub compress_or_decompress_lamports: Option<u64>,
pub is_compress: bool,
pub cpi_context: Option<CompressedCpiContext>,
}
#[inline(always)]
pub fn setup_cpi_accounts<'info>(
ctx: &Context<
'_,
'_,
'_,
'info,
impl InvokeAccounts<'info>
+ LightSystemAccount<'info>
+ InvokeCpiAccounts<'info>
+ SignerAccounts<'info>
+ InvokeCpiContextAccount<'info>
+ Bumps,
>,
) -> (Vec<AccountInfo<'info>>, Vec<AccountMeta>) {
// The trick for having `None` accounts is to pass program ID, see
// https://github.com/coral-xyz/anchor/pull/2101
let none_account_info = ctx.accounts.get_light_system_program().to_account_info();
let (cpi_context_account_info, cpi_context_account_meta) =
match ctx.accounts.get_cpi_context_account() {
Some(acc) => (
acc.to_account_info(),
AccountMeta {
pubkey: acc.key(),
is_signer: false,
is_writable: true,
},
),
None => (
none_account_info.clone(),
AccountMeta {
pubkey: ctx.accounts.get_light_system_program().key(),
is_signer: false,
is_writable: false,
},
),
};
let mut account_infos = vec![
// fee_payer
ctx.accounts.get_fee_payer().to_account_info(),
// authority
ctx.accounts.get_authority().to_account_info(),
// registered_program_pda
ctx.accounts.get_registered_program_pda().to_account_info(),
// noop_program
ctx.accounts.get_noop_program().to_account_info(),
// account_compression_authority
ctx.accounts
.get_account_compression_authority()
.to_account_info(),
// account_compression_program
ctx.accounts
.get_account_compression_program()
.to_account_info(),
// invoking_program
ctx.accounts.get_invoking_program().to_account_info(),
// sol_pool_pda
none_account_info.clone(),
// decompression_recipient
none_account_info,
// system_program
ctx.accounts.get_system_program().to_account_info(),
// cpi_context_account
cpi_context_account_info,
];
for remaining_account in ctx.remaining_accounts {
account_infos.push(remaining_account.to_owned());
}
let mut account_metas = vec![
// fee_payer
AccountMeta {
pubkey: account_infos[0].key(),
is_signer: true,
is_writable: true,
},
// authority
AccountMeta {
pubkey: account_infos[1].key(),
is_signer: true,
is_writable: false,
},
// registered_program_pda
AccountMeta {
pubkey: account_infos[2].key(),
is_signer: false,
is_writable: false,
},
// noop_program
AccountMeta {
pubkey: account_infos[3].key(),
is_signer: false,
is_writable: false,
},
// account_compression_authority
AccountMeta {
pubkey: account_infos[4].key(),
is_signer: false,
is_writable: false,
},
// account_compression_program
AccountMeta {
pubkey: account_infos[5].key(),
is_signer: false,
is_writable: false,
},
// invoking_program
AccountMeta {
pubkey: account_infos[6].key(),
is_signer: false,
is_writable: false,
},
// sol_pool_pda
AccountMeta {
pubkey: account_infos[7].key(),
is_signer: false,
is_writable: false,
},
// decompression_recipient
AccountMeta {
pubkey: account_infos[8].key(),
is_signer: false,
is_writable: false,
},
// system_program
AccountMeta {
pubkey: account_infos[9].key(),
is_signer: false,
is_writable: false,
},
cpi_context_account_meta,
];
for remaining_account in ctx.remaining_accounts {
account_metas.extend(remaining_account.to_account_metas(None));
}
(account_infos, account_metas)
}
#[derive(AnchorDeserialize, AnchorSerialize)]
pub struct InvokeCpi {
pub inputs: Vec<u8>,
}
#[inline(always)]
pub fn invoke_cpi(
account_infos: &[AccountInfo],
accounts_metas: Vec<AccountMeta>,
inputs: Vec<u8>,
signer_seeds: &[&[&[u8]]],
) -> Result<()> {
let instruction_data = InvokeCpi { inputs };
// `InvokeCpi`'s discriminator
let mut data = [49, 212, 191, 129, 39, 194, 43, 196].to_vec();
data.extend(instruction_data.try_to_vec()?);
let instruction = Instruction {
program_id: PROGRAM_ID_LIGHT_SYSTEM,
accounts: accounts_metas,
data,
};
invoke_signed(&instruction, account_infos, signer_seeds)?;
Ok(())
}
/// Invokes the light system program to verify and apply a zk-compressed state
/// transition. Serializes CPI instruction data, configures necessary accounts,
/// and executes the CPI.
pub fn verify<'info, 'a, 'b, 'c, T>(
ctx: &Context<
'_,
'_,
'_,
'info,
impl InvokeAccounts<'info>
+ LightSystemAccount<'info>
+ InvokeCpiAccounts<'info>
+ SignerAccounts<'info>
+ InvokeCpiContextAccount<'info>
+ Bumps,
>,
inputs: &T,
signer_seeds: &'a [&'b [&'c [u8]]],
) -> Result<()>
where
T: AnchorSerialize,
{
if ctx.accounts.get_light_system_program().key() != PROGRAM_ID_LIGHT_SYSTEM {
return err!(LightSdkError::InvalidLightSystemProgram);
}
let inputs = inputs.try_to_vec()?;
let (account_infos, account_metas) = setup_cpi_accounts(ctx);
invoke_cpi(&account_infos, account_metas, inputs, signer_seeds)?;
Ok(())
}
pub fn verify_light_accounts<'info, T>(
ctx: &Context<
'_,
'_,
'_,
'info,
impl InvokeAccounts<'info>
+ LightSystemAccount<'info>
+ InvokeCpiAccounts<'info>
+ SignerAccounts<'info>
+ InvokeCpiContextAccount<'info>
+ Bumps,
>,
proof: Option<ProofRpcResult>,
light_accounts: &[LightAccount<T>],
compress_or_decompress_lamports: Option<u64>,
is_compress: bool,
cpi_context: Option<CompressedCpiContext>,
) -> Result<()>
where
T: AnchorDeserialize + AnchorSerialize + Clone + DataHasher + Default + Discriminator,
{
let bump = Pubkey::find_program_address(
&[CPI_AUTHORITY_PDA_SEED],
&ctx.accounts.get_invoking_program().key(),
)
.1;
let signer_seeds = [CPI_AUTHORITY_PDA_SEED, &[bump]];
let mut new_address_params = Vec::with_capacity(light_accounts.len());
let mut input_compressed_accounts_with_merkle_context =
Vec::with_capacity(light_accounts.len());
let mut output_compressed_accounts = Vec::with_capacity(light_accounts.len());
for light_account in light_accounts.iter() {
if let Some(new_address_param) = light_account.new_address_params() {
new_address_params.push(new_address_param);
}
if let Some(input_account) = light_account.input_compressed_account()? {
input_compressed_accounts_with_merkle_context.push(input_account);
}
if let Some(output_account) = light_account.output_compressed_account()? {
output_compressed_accounts.push(output_account);
}
}
let instruction = InstructionDataInvokeCpi {
proof: proof.map(|proof| proof.proof),
new_address_params,
relay_fee: None,
input_compressed_accounts_with_merkle_context,
output_compressed_accounts,
compress_or_decompress_lamports,
is_compress,
cpi_context,
};
verify(ctx, &instruction, &[&signer_seeds[..]])?;
Ok(())
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/sdk
|
solana_public_repos/Lightprotocol/light-protocol/sdk/src/compressed_account.rs
|
use anchor_lang::prelude::{AnchorDeserialize, AnchorSerialize, ProgramError, Pubkey, Result};
use light_hasher::{DataHasher, Discriminator, Hasher, Poseidon};
use light_utils::hash_to_bn254_field_size_be;
use crate::merkle_context::{
pack_merkle_context, MerkleContext, PackedMerkleContext, RemainingAccounts,
};
#[derive(Debug, PartialEq, Default, Clone, AnchorDeserialize, AnchorSerialize)]
pub struct CompressedAccount {
pub owner: Pubkey,
pub lamports: u64,
pub address: Option<[u8; 32]>,
pub data: Option<CompressedAccountData>,
}
/// Hashing scheme:
/// H(owner || leaf_index || merkle_tree_pubkey || lamports || address || data.discriminator || data.data_hash)
impl CompressedAccount {
pub fn hash_with_hashed_values<H: Hasher>(
&self,
&owner_hashed: &[u8; 32],
&merkle_tree_hashed: &[u8; 32],
leaf_index: &u32,
) -> Result<[u8; 32]> {
let capacity = 3
+ std::cmp::min(self.lamports, 1) as usize
+ self.address.is_some() as usize
+ self.data.is_some() as usize * 2;
let mut vec: Vec<&[u8]> = Vec::with_capacity(capacity);
vec.push(owner_hashed.as_slice());
// leaf index and merkle tree pubkey are used to make every compressed account hash unique
let leaf_index = leaf_index.to_le_bytes();
vec.push(leaf_index.as_slice());
vec.push(merkle_tree_hashed.as_slice());
// Lamports are only hashed if non-zero to safe CU
// For safety we prefix the lamports with 1 in 1 byte.
// Thus even if the discriminator has the same value as the lamports, the hash will be different.
let mut lamports_bytes = [1, 0, 0, 0, 0, 0, 0, 0, 0];
if self.lamports != 0 {
lamports_bytes[1..].copy_from_slice(&self.lamports.to_le_bytes());
vec.push(lamports_bytes.as_slice());
}
if self.address.is_some() {
vec.push(self.address.as_ref().unwrap().as_slice());
}
let mut discriminator_bytes = [2, 0, 0, 0, 0, 0, 0, 0, 0];
if let Some(data) = &self.data {
discriminator_bytes[1..].copy_from_slice(&data.discriminator);
vec.push(&discriminator_bytes);
vec.push(&data.data_hash);
}
let hash = H::hashv(&vec).map_err(ProgramError::from)?;
Ok(hash)
}
pub fn hash<H: Hasher>(
&self,
&merkle_tree_pubkey: &Pubkey,
leaf_index: &u32,
) -> Result<[u8; 32]> {
self.hash_with_hashed_values::<H>(
&hash_to_bn254_field_size_be(&self.owner.to_bytes())
.unwrap()
.0,
&hash_to_bn254_field_size_be(&merkle_tree_pubkey.to_bytes())
.unwrap()
.0,
leaf_index,
)
}
}
#[derive(Debug, PartialEq, Default, Clone, AnchorDeserialize, AnchorSerialize)]
pub struct CompressedAccountData {
pub discriminator: [u8; 8],
pub data: Vec<u8>,
pub data_hash: [u8; 32],
}
#[derive(Debug, PartialEq, Default, Clone, AnchorDeserialize, AnchorSerialize)]
pub struct CompressedAccountWithMerkleContext {
pub compressed_account: CompressedAccount,
pub merkle_context: MerkleContext,
}
impl CompressedAccountWithMerkleContext {
pub fn hash(&self) -> Result<[u8; 32]> {
self.compressed_account.hash::<Poseidon>(
&self.merkle_context.merkle_tree_pubkey,
&self.merkle_context.leaf_index,
)
}
}
#[derive(Debug, PartialEq, Default, Clone, AnchorDeserialize, AnchorSerialize)]
pub struct PackedCompressedAccountWithMerkleContext {
pub compressed_account: CompressedAccount,
pub merkle_context: PackedMerkleContext,
/// Index of root used in inclusion validity proof.
pub root_index: u16,
/// Placeholder to mark accounts read-only unimplemented set to false.
pub read_only: bool,
}
#[derive(Debug, PartialEq, Default, Clone, AnchorDeserialize, AnchorSerialize)]
pub struct OutputCompressedAccountWithPackedContext {
pub compressed_account: CompressedAccount,
pub merkle_tree_index: u8,
}
/// Hashes a compressed account.
///
/// This function should be used for input accounts, where including only a
/// hash is sufficient.
pub fn hash_input_account<T>(account: &T) -> Result<CompressedAccountData>
where
T: AnchorSerialize + DataHasher + Discriminator,
{
let data_hash = account.hash::<Poseidon>().map_err(ProgramError::from)?;
Ok(CompressedAccountData {
discriminator: T::discriminator(),
// Sending only data hash to the system program is sufficient.
data: Vec::new(),
data_hash,
})
}
/// Serializes and hashes a compressed account.
///
/// This function should be used for output accounts, where data has to be
/// included for system-program to log in the ledger.
pub fn serialize_and_hash_output_account<T>(account: &T) -> Result<CompressedAccountData>
where
T: AnchorSerialize + DataHasher + Discriminator,
{
let data = account.try_to_vec()?;
let data_hash = account.hash::<Poseidon>().map_err(ProgramError::from)?;
Ok(CompressedAccountData {
discriminator: T::discriminator(),
data,
data_hash,
})
}
pub fn pack_compressed_accounts(
compressed_accounts: &[CompressedAccountWithMerkleContext],
root_indices: &[u16],
remaining_accounts: &mut RemainingAccounts,
) -> Vec<PackedCompressedAccountWithMerkleContext> {
compressed_accounts
.iter()
.zip(root_indices.iter())
.map(|(x, root_index)| PackedCompressedAccountWithMerkleContext {
compressed_account: x.compressed_account.clone(),
merkle_context: pack_merkle_context(&x.merkle_context, remaining_accounts),
root_index: *root_index,
read_only: false,
})
.collect::<Vec<_>>()
}
pub fn pack_compressed_account(
compressed_account: CompressedAccountWithMerkleContext,
root_index: u16,
remaining_accounts: &mut RemainingAccounts,
) -> PackedCompressedAccountWithMerkleContext {
pack_compressed_accounts(&[compressed_account], &[root_index], remaining_accounts)[0].clone()
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/sdk
|
solana_public_repos/Lightprotocol/light-protocol/sdk/src/traits.rs
|
// Ported from light-system-program, adjusted for caller programs.
use anchor_lang::prelude::*;
pub trait InvokeAccounts<'info> {
fn get_registered_program_pda(&self) -> &AccountInfo<'info>;
fn get_noop_program(&self) -> &AccountInfo<'info>;
fn get_account_compression_authority(&self) -> &AccountInfo<'info>;
fn get_account_compression_program(&self) -> &AccountInfo<'info>;
fn get_system_program(&self) -> &Program<'info, System>;
fn get_compressed_sol_pda(&self) -> Option<&AccountInfo<'info>>;
fn get_compression_recipient(&self) -> Option<&AccountInfo<'info>>;
}
pub trait LightSystemAccount<'info> {
fn get_light_system_program(&self) -> &AccountInfo<'info>;
}
pub trait SignerAccounts<'info> {
fn get_fee_payer(&self) -> &Signer<'info>;
fn get_authority(&self) -> &AccountInfo<'info>;
}
// Only used within the systemprogram
pub trait InvokeCpiContextAccountMut<'info> {
fn get_cpi_context_account_mut(&mut self) -> &mut Option<AccountInfo<'info>>;
}
pub trait InvokeCpiContextAccount<'info> {
fn get_cpi_context_account(&self) -> Option<&AccountInfo<'info>>;
}
pub trait InvokeCpiAccounts<'info> {
fn get_invoking_program(&self) -> &AccountInfo<'info>;
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/sdk
|
solana_public_repos/Lightprotocol/light-protocol/sdk/src/proof.rs
|
use anchor_lang::{AnchorDeserialize, AnchorSerialize};
use light_indexed_merkle_tree::array::IndexedElement;
use num_bigint::BigUint;
use solana_program::pubkey::Pubkey;
#[derive(Debug, Clone)]
pub struct MerkleProof {
pub hash: [u8; 32],
pub leaf_index: u64,
pub merkle_tree: Pubkey,
pub proof: Vec<[u8; 32]>,
pub root_seq: u64,
}
// For consistency with the Photon API.
#[derive(Clone, Default, Debug, PartialEq)]
pub struct NewAddressProofWithContext {
pub merkle_tree: Pubkey,
pub root: [u8; 32],
pub root_seq: u64,
pub low_address_index: u64,
pub low_address_value: [u8; 32],
pub low_address_next_index: u64,
pub low_address_next_value: [u8; 32],
pub low_address_proof: [[u8; 32]; 16],
pub new_low_element: Option<IndexedElement<usize>>,
pub new_element: Option<IndexedElement<usize>>,
pub new_element_next_value: Option<BigUint>,
}
#[derive(Debug, Clone, PartialEq, Eq, AnchorDeserialize, AnchorSerialize)]
pub struct CompressedProof {
pub a: [u8; 32],
pub b: [u8; 64],
pub c: [u8; 32],
}
#[derive(Debug, AnchorDeserialize, AnchorSerialize)]
pub struct ProofRpcResult {
pub proof: CompressedProof,
pub root_indices: Vec<u16>,
pub address_root_indices: Vec<u16>,
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/sdk
|
solana_public_repos/Lightprotocol/light-protocol/sdk/src/transfer.rs
|
use anchor_lang::Result;
use crate::{account_info::LightAccountInfo, error::LightSdkError};
/// Transfers a specified amount of lamports from one account to another.
///
/// Attempts to transfer `lamports` from the `from` account to the `to`
/// account. It will update the lamport balances of both accounts if the
/// transfer is successful.
pub fn transfer_compressed_sol(
from: &mut LightAccountInfo,
to: &mut LightAccountInfo,
lamports: u64,
) -> Result<()> {
let output_from = from
.input
.as_ref()
.ok_or(LightSdkError::TransferFromNoInput)?
.lamports
.ok_or(LightSdkError::TransferFromNoLamports)?
.checked_sub(lamports)
.ok_or(LightSdkError::TransferFromInsufficientLamports)?;
let output_to = to
.input
.as_ref()
.and_then(|input| input.lamports)
.unwrap_or(0)
.checked_add(lamports)
.ok_or(LightSdkError::TransferIntegerOverflow)?;
from.lamports = Some(output_from);
to.lamports = Some(output_to);
Ok(())
}
#[cfg(test)]
mod tests {
use solana_program::pubkey::Pubkey;
use crate::{account_info::LightInputAccountInfo, merkle_context::PackedMerkleContext};
use super::*;
/// Creates a mock account with the given input lamports.
fn mock_account<'a>(owner: &'a Pubkey, lamports: Option<u64>) -> LightAccountInfo<'a> {
LightAccountInfo {
input: Some(LightInputAccountInfo {
lamports,
// None of the following values matter.
address: Some([1; 32]),
data: Some(b"ayy"),
data_hash: Some([0; 32]),
merkle_context: PackedMerkleContext {
merkle_tree_pubkey_index: 0,
nullifier_queue_pubkey_index: 0,
leaf_index: 0,
queue_index: None,
},
root_index: 0,
}),
owner,
// None of the following values matter.
lamports: None,
discriminator: Some([0; 8]),
data: None,
data_hash: None,
address: Some([1; 32]),
output_merkle_tree_index: None,
new_address_params: None,
}
}
/// Creates a mock account without input.
fn mock_account_without_input<'a>(owner: &'a Pubkey) -> LightAccountInfo<'a> {
LightAccountInfo {
input: None,
owner,
// None of the following values matter.
lamports: None,
discriminator: Some([0; 8]),
data: None,
data_hash: None,
address: Some([1; 32]),
output_merkle_tree_index: None,
new_address_params: None,
}
}
#[test]
fn test_transfer_success() {
let from_pubkey = Pubkey::new_unique();
let mut from = mock_account(&from_pubkey, Some(1000));
let to_pubkey = Pubkey::new_unique();
let mut to = mock_account(&to_pubkey, Some(500));
let result = transfer_compressed_sol(&mut from, &mut to, 300);
assert!(result.is_ok());
assert_eq!(from.lamports, Some(700));
assert_eq!(to.lamports, Some(800));
}
#[test]
fn test_transfer_from_no_input() {
let from_pubkey = Pubkey::new_unique();
let mut from = mock_account_without_input(&from_pubkey);
let to_pubkey = Pubkey::new_unique();
let mut to = mock_account(&to_pubkey, Some(500));
let result = transfer_compressed_sol(&mut from, &mut to, 300);
assert_eq!(result, Err(LightSdkError::TransferFromNoInput.into()));
}
#[test]
fn test_transfer_from_no_lamports() {
let from_pubkey = Pubkey::new_unique();
let mut from = mock_account(&from_pubkey, None);
let to_pubkey = Pubkey::new_unique();
let mut to = mock_account(&to_pubkey, Some(500));
let result = transfer_compressed_sol(&mut from, &mut to, 300);
assert_eq!(result, Err(LightSdkError::TransferFromNoLamports.into()));
}
#[test]
fn test_transfer_insufficient_lamports() {
let from_pubkey = Pubkey::new_unique();
let mut from = mock_account(&from_pubkey, Some(200));
let to_pubkey = Pubkey::new_unique();
let mut to = mock_account(&to_pubkey, Some(500));
let result = transfer_compressed_sol(&mut from, &mut to, 300);
assert_eq!(
result,
Err(LightSdkError::TransferFromInsufficientLamports.into())
);
}
#[test]
fn test_transfer_integer_overflow() {
let from_pubkey = Pubkey::new_unique();
let mut from = mock_account(&from_pubkey, Some(1000));
let to_pubkey = Pubkey::new_unique();
let mut to = mock_account(&to_pubkey, Some(u64::MAX - 500));
let result = transfer_compressed_sol(&mut from, &mut to, 600);
assert_eq!(result, Err(LightSdkError::TransferIntegerOverflow.into()));
}
#[test]
fn test_transfer_to_no_lamports() {
let from_pubkey = Pubkey::new_unique();
let mut from = mock_account(&from_pubkey, Some(1000));
let to_pubkey = Pubkey::new_unique();
let mut to = mock_account(&to_pubkey, None);
let result = transfer_compressed_sol(&mut from, &mut to, 500);
assert!(result.is_ok());
assert_eq!(from.lamports, Some(500));
assert_eq!(to.lamports, Some(500));
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/sdk
|
solana_public_repos/Lightprotocol/light-protocol/sdk/src/utils.rs
|
use anchor_lang::solana_program::pubkey::Pubkey;
use crate::{
address::PackedNewAddressParams,
compressed_account::{
OutputCompressedAccountWithPackedContext, PackedCompressedAccountWithMerkleContext,
},
proof::CompressedProof,
verify::{CompressedCpiContext, InstructionDataInvokeCpi},
PROGRAM_ID_ACCOUNT_COMPRESSION,
};
pub fn get_registered_program_pda(program_id: &Pubkey) -> Pubkey {
Pubkey::find_program_address(
&[program_id.to_bytes().as_slice()],
&PROGRAM_ID_ACCOUNT_COMPRESSION,
)
.0
}
pub fn get_cpi_authority_pda(program_id: &Pubkey) -> Pubkey {
Pubkey::find_program_address(&[b"cpi_authority"], program_id).0
}
/// Helper function to create data for creating a single PDA.
pub fn create_cpi_inputs_for_new_account(
proof: CompressedProof,
new_address_params: PackedNewAddressParams,
compressed_pda: OutputCompressedAccountWithPackedContext,
cpi_context: Option<CompressedCpiContext>,
) -> InstructionDataInvokeCpi {
InstructionDataInvokeCpi {
proof: Some(proof),
new_address_params: vec![new_address_params],
relay_fee: None,
input_compressed_accounts_with_merkle_context: vec![],
output_compressed_accounts: vec![compressed_pda],
compress_or_decompress_lamports: None,
is_compress: false,
cpi_context,
}
}
pub fn create_cpi_inputs_for_account_update(
proof: CompressedProof,
old_compressed_pda: PackedCompressedAccountWithMerkleContext,
new_compressed_pda: OutputCompressedAccountWithPackedContext,
cpi_context: Option<CompressedCpiContext>,
) -> InstructionDataInvokeCpi {
InstructionDataInvokeCpi {
proof: Some(proof),
new_address_params: vec![],
input_compressed_accounts_with_merkle_context: vec![old_compressed_pda],
output_compressed_accounts: vec![new_compressed_pda],
relay_fee: None,
compress_or_decompress_lamports: None,
is_compress: false,
cpi_context,
}
}
pub fn create_cpi_inputs_for_account_deletion(
proof: CompressedProof,
compressed_pda: PackedCompressedAccountWithMerkleContext,
cpi_context: Option<CompressedCpiContext>,
) -> InstructionDataInvokeCpi {
InstructionDataInvokeCpi {
proof: Some(proof),
new_address_params: vec![],
input_compressed_accounts_with_merkle_context: vec![compressed_pda],
output_compressed_accounts: vec![],
relay_fee: None,
compress_or_decompress_lamports: None,
is_compress: false,
cpi_context,
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/sdk
|
solana_public_repos/Lightprotocol/light-protocol/sdk/src/context.rs
|
use std::ops::{Deref, DerefMut};
use anchor_lang::{context::Context, Bumps, Result};
use crate::{
account::LightAccounts,
account_info::LightAccountInfo,
traits::{
InvokeAccounts, InvokeCpiAccounts, InvokeCpiContextAccount, LightSystemAccount,
SignerAccounts,
},
};
/// Provides non-argument inputs to the program, including light accounts and
/// regular accounts.
///
/// # Example
/// ```ignore
/// pub fn set_data(ctx: Context<SetData>, age: u64, other_data: u32) -> Result<()> {
/// // Set account data like this
/// (*ctx.accounts.my_account).age = age;
/// (*ctx.accounts.my_account).other_data = other_data;
/// // or like this
/// let my_account = &mut ctx.account.my_account;
/// my_account.age = age;
/// my_account.other_data = other_data;
/// Ok(())
/// }
/// ```
pub struct LightContext<'a, 'b, 'c, 'info, T, U>
where
T: Bumps,
U: LightAccounts<'a>,
{
/// Context provided by Anchor.
pub anchor_context: Context<'a, 'b, 'c, 'info, T>,
pub light_accounts: U,
}
impl<'a, 'b, 'c, 'info, T, U> Deref for LightContext<'a, 'b, 'c, 'info, T, U>
where
T: Bumps,
U: LightAccounts<'a>,
{
type Target = Context<'a, 'b, 'c, 'info, T>;
fn deref(&self) -> &Self::Target {
&self.anchor_context
}
}
impl<'a, 'b, 'c, 'info, T, U> DerefMut for LightContext<'a, 'b, 'c, 'info, T, U>
where
T: Bumps,
U: LightAccounts<'a>,
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.anchor_context
}
}
impl<'a, 'b, 'c, 'info, T, U> LightContext<'a, 'b, 'c, 'info, T, U>
where
T: Bumps
+ InvokeAccounts<'info>
+ InvokeCpiAccounts<'info>
+ InvokeCpiContextAccount<'info>
+ LightSystemAccount<'info>
+ SignerAccounts<'info>,
U: LightAccounts<'a>,
{
pub fn new(
anchor_context: Context<'a, 'b, 'c, 'info, T>,
account_infos: &'a mut [LightAccountInfo],
) -> Result<Self> {
let light_accounts = U::try_light_accounts(account_infos)?;
Ok(Self {
anchor_context,
light_accounts,
})
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples
|
solana_public_repos/Lightprotocol/light-protocol/examples/name-service/.prettierignore
|
.anchor
.DS_Store
target
node_modules
dist
build
test-ledger
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples
|
solana_public_repos/Lightprotocol/light-protocol/examples/name-service/README.md
|
# Name Service program example
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples
|
solana_public_repos/Lightprotocol/light-protocol/examples/name-service/Anchor.toml
|
[toolchain]
[features]
seeds = false
skip-lint = false
[programs.localnet]
name_service = "7yucc7fL3JGbyMwg4neUaenNSdySS39hbAk89Ao3t1Hz"
[registry]
url = "https://api.apr.dev"
[provider]
cluster = "Localnet"
wallet = "~/.config/solana/id.json"
[scripts]
test = "yarn run ts-mocha -p ./tsconfig.json -t 1000000 tests/**/*.ts"
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples
|
solana_public_repos/Lightprotocol/light-protocol/examples/name-service/package.json
|
{
"scripts": {
"lint:fix": "prettier \"*/**/*{.js,.ts}\" -w",
"lint": "prettier \"*/**/*{.js,.ts}\" --check",
"test": "cargo test-sbf -p name-service -- --test-threads 1"
},
"dependencies": {
"@coral-xyz/anchor": "^0.29.0"
},
"devDependencies": {
"@lightprotocol/zk-compression-cli": "workspace:*",
"chai": "^5.1.2",
"mocha": "^10.7.3",
"ts-mocha": "^10.0.0",
"@types/bn.js": "^5.1.0",
"@types/chai": "^5.0.0",
"@types/mocha": "^10.0.7",
"typescript": "^5.5.4",
"prettier": "^3.4.2"
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples
|
solana_public_repos/Lightprotocol/light-protocol/examples/name-service/tsconfig.json
|
{
"compilerOptions": {
"types": ["mocha", "chai"],
"typeRoots": ["./node_modules/@types"],
"lib": ["es2015"],
"module": "commonjs",
"target": "es6",
"esModuleInterop": true
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples/name-service
|
solana_public_repos/Lightprotocol/light-protocol/examples/name-service/migrations/deploy.ts
|
// Migrations are an early feature. Currently, they're nothing more than this
// single deploy script that's invoked from the CLI, injecting a provider
// configured from the workspace's Anchor.toml.
const anchor = require("@coral-xyz/anchor");
module.exports = async function (provider) {
// Configure client to use the provider.
anchor.setProvider(provider);
// Add your deploy script here.
};
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples/name-service/programs
|
solana_public_repos/Lightprotocol/light-protocol/examples/name-service/programs/name-service-without-macros/Cargo.toml
|
[package]
name = "name-service-without-macros"
version = "0.7.0"
description = "Created with Anchor"
edition = "2021"
rust-version = "1.75.0"
license = "Apache-2.0"
[lib]
crate-type = ["cdylib", "lib"]
name = "name_service_without_macros"
[features]
no-entrypoint = []
no-idl = []
no-log-ix-name = []
cpi = ["no-entrypoint"]
default = ["idl-build"]
test-sbf = []
bench-sbf = []
idl-build = ["anchor-lang/idl-build", "light-sdk/idl-build"]
[dependencies]
anchor-lang = { workspace=true}
borsh = { workspace = true }
light-hasher = { workspace = true, features = ["solana"] }
light-macros = { workspace = true }
light-sdk = { workspace = true }
light-sdk-macros = { workspace = true }
light-utils = { workspace = true }
light-verifier = { workspace = true }
[target.'cfg(not(target_os = "solana"))'.dependencies]
solana-sdk = { workspace = true }
[dev-dependencies]
light-client = { workspace = true , features = ["devenv"]}
light-test-utils = { path = "../../../../test-utils", version = "1.2.0", features = ["devenv"] }
light-program-test = { workspace = true, features = ["devenv"] }
solana-program-test = { workspace = true }
tokio = "1.36.0"
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples/name-service/programs
|
solana_public_repos/Lightprotocol/light-protocol/examples/name-service/programs/name-service-without-macros/Xargo.toml
|
[target.bpfel-unknown-unknown.dependencies.std]
features = []
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples/name-service/programs/name-service-without-macros
|
solana_public_repos/Lightprotocol/light-protocol/examples/name-service/programs/name-service-without-macros/tests/test.rs
|
#![cfg(feature = "test-sbf")]
use std::net::{Ipv4Addr, Ipv6Addr};
use anchor_lang::{AnchorDeserialize, InstructionData, ToAccountMetas};
use light_client::indexer::{AddressMerkleTreeAccounts, Indexer, StateMerkleTreeAccounts};
use light_client::rpc::merkle_tree::MerkleTreeExt;
use light_program_test::test_env::{setup_test_programs_with_accounts_v2, EnvAccounts};
use light_program_test::test_indexer::TestIndexer;
use light_program_test::test_rpc::ProgramTestRpcConnection;
use light_sdk::account_meta::LightAccountMeta;
use light_sdk::address::derive_address;
use light_sdk::compressed_account::CompressedAccountWithMerkleContext;
use light_sdk::error::LightSdkError;
use light_sdk::instruction_data::LightInstructionData;
use light_sdk::merkle_context::{AddressMerkleContext, RemainingAccounts};
use light_sdk::utils::get_cpi_authority_pda;
use light_sdk::verify::find_cpi_signer;
use light_sdk::{PROGRAM_ID_ACCOUNT_COMPRESSION, PROGRAM_ID_LIGHT_SYSTEM, PROGRAM_ID_NOOP};
use light_test_utils::{RpcConnection, RpcError};
use name_service_without_macros::{CustomError, NameRecord, RData};
use solana_sdk::instruction::{Instruction, InstructionError};
use solana_sdk::native_token::LAMPORTS_PER_SOL;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, Signer};
use solana_sdk::transaction::{Transaction, TransactionError};
#[tokio::test]
async fn test_name_service() {
let (mut rpc, env) = setup_test_programs_with_accounts_v2(Some(vec![(
String::from("name_service_without_macros"),
name_service_without_macros::ID,
)]))
.await;
let payer = rpc.get_payer().insecure_clone();
let mut test_indexer: TestIndexer<ProgramTestRpcConnection> = TestIndexer::new(
&[StateMerkleTreeAccounts {
merkle_tree: env.merkle_tree_pubkey,
nullifier_queue: env.nullifier_queue_pubkey,
cpi_context: env.cpi_context_account_pubkey,
}],
&[AddressMerkleTreeAccounts {
merkle_tree: env.address_merkle_tree_pubkey,
queue: env.address_merkle_tree_queue_pubkey,
}],
true,
true,
)
.await;
let name = "example.io";
let mut remaining_accounts = RemainingAccounts::default();
let address_merkle_context = AddressMerkleContext {
address_merkle_tree_pubkey: env.address_merkle_tree_pubkey,
address_queue_pubkey: env.address_merkle_tree_queue_pubkey,
};
let (address, _) = derive_address(
&[b"name-service", name.as_bytes()],
&address_merkle_context,
&name_service_without_macros::ID,
);
let account_compression_authority = get_cpi_authority_pda(&PROGRAM_ID_LIGHT_SYSTEM);
let registered_program_pda = Pubkey::find_program_address(
&[PROGRAM_ID_LIGHT_SYSTEM.to_bytes().as_slice()],
&PROGRAM_ID_ACCOUNT_COMPRESSION,
)
.0;
// Create the example.io -> 10.0.1.25 record.
let rdata_1 = RData::A(Ipv4Addr::new(10, 0, 1, 25));
create_record(
&name,
&rdata_1,
&mut rpc,
&mut test_indexer,
&env,
&mut remaining_accounts,
&payer,
&address,
&account_compression_authority,
®istered_program_pda,
&PROGRAM_ID_LIGHT_SYSTEM,
)
.await
.unwrap();
// Create with invalid light-system-program ID, should not succeed.
{
let result = create_record(
&name,
&rdata_1,
&mut rpc,
&mut test_indexer,
&env,
&mut remaining_accounts,
&payer,
&address,
&account_compression_authority,
®istered_program_pda,
&Pubkey::new_unique(),
)
.await;
assert!(matches!(
result,
Err(RpcError::TransactionError(
TransactionError::InstructionError(0, InstructionError::Custom(error))
))if error == u32::from(LightSdkError::InvalidLightSystemProgram)
));
}
// Check that it was created correctly.
let compressed_accounts =
test_indexer.get_compressed_accounts_by_owner(&name_service_without_macros::ID);
assert_eq!(compressed_accounts.len(), 1);
let compressed_account = &compressed_accounts[0];
let record = &compressed_account
.compressed_account
.data
.as_ref()
.unwrap()
.data;
let record = NameRecord::deserialize(&mut &record[..]).unwrap();
assert_eq!(record.name, "example.io");
assert_eq!(record.rdata, rdata_1);
// Update the record to example.io -> 2001:db8::1.
let rdata_2 = RData::AAAA(Ipv6Addr::new(8193, 3512, 0, 0, 0, 0, 0, 1));
update_record(
&mut rpc,
&mut test_indexer,
&mut remaining_accounts,
&rdata_2,
&payer,
compressed_account,
&account_compression_authority,
®istered_program_pda,
&PROGRAM_ID_LIGHT_SYSTEM,
)
.await
.unwrap();
// Update with invalid owner, should not succeed.
{
let invalid_signer = Keypair::new();
rpc.airdrop_lamports(&invalid_signer.pubkey(), LAMPORTS_PER_SOL * 1)
.await
.unwrap();
let result = update_record(
&mut rpc,
&mut test_indexer,
&mut remaining_accounts,
&rdata_2,
&invalid_signer,
compressed_account,
&account_compression_authority,
®istered_program_pda,
&PROGRAM_ID_LIGHT_SYSTEM,
)
.await;
assert!(matches!(
result,
Err(RpcError::TransactionError(
TransactionError::InstructionError(0, InstructionError::Custom(error))
))if error == u32::from(CustomError::Unauthorized)
));
}
// Update with invalid light-system-program ID, should not succeed.
{
let result = update_record(
&mut rpc,
&mut test_indexer,
&mut remaining_accounts,
&rdata_2,
&payer,
compressed_account,
&account_compression_authority,
®istered_program_pda,
&Pubkey::new_unique(),
)
.await;
assert!(matches!(
result,
Err(RpcError::TransactionError(
TransactionError::InstructionError(0, InstructionError::Custom(error))
))if error == u32::from(LightSdkError::InvalidLightSystemProgram)
));
}
// Check that it was updated correctly.
let compressed_accounts =
test_indexer.get_compressed_accounts_by_owner(&name_service_without_macros::ID);
assert_eq!(compressed_accounts.len(), 1);
let compressed_account = &compressed_accounts[0];
let record = &compressed_account
.compressed_account
.data
.as_ref()
.unwrap()
.data;
let record = NameRecord::deserialize(&mut &record[..]).unwrap();
assert_eq!(record.name, "example.io");
assert_eq!(record.rdata, rdata_2);
// Delete with invalid owner, should not succeed.
{
let invalid_signer = Keypair::new();
rpc.airdrop_lamports(&invalid_signer.pubkey(), LAMPORTS_PER_SOL * 1)
.await
.unwrap();
let result = delete_record(
&mut rpc,
&mut test_indexer,
&mut remaining_accounts,
&invalid_signer,
compressed_account,
&account_compression_authority,
®istered_program_pda,
&PROGRAM_ID_LIGHT_SYSTEM,
)
.await;
assert!(matches!(
result,
Err(RpcError::TransactionError(
TransactionError::InstructionError(0, InstructionError::Custom(error))
))if error == u32::from(CustomError::Unauthorized)
));
}
// Delete with invalid light-system-program ID, should not succeed.
{
let result = delete_record(
&mut rpc,
&mut test_indexer,
&mut remaining_accounts,
&payer,
compressed_account,
&account_compression_authority,
®istered_program_pda,
&Pubkey::new_unique(),
)
.await;
assert!(matches!(
result,
Err(RpcError::TransactionError(
TransactionError::InstructionError(0, InstructionError::Custom(error))
))if error == u32::from(LightSdkError::InvalidLightSystemProgram)
));
}
// Delete the example.io record.
delete_record(
&mut rpc,
&mut test_indexer,
&mut remaining_accounts,
&payer,
compressed_account,
&account_compression_authority,
®istered_program_pda,
&PROGRAM_ID_LIGHT_SYSTEM,
)
.await
.unwrap();
}
async fn create_record<R>(
name: &str,
rdata: &RData,
rpc: &mut R,
test_indexer: &mut TestIndexer<R>,
env: &EnvAccounts,
remaining_accounts: &mut RemainingAccounts,
payer: &Keypair,
address: &[u8; 32],
account_compression_authority: &Pubkey,
registered_program_pda: &Pubkey,
light_system_program: &Pubkey,
) -> Result<(), RpcError>
where
R: RpcConnection + MerkleTreeExt,
{
let rpc_result = test_indexer
.create_proof_for_compressed_accounts(
None,
None,
Some(&[*address]),
Some(vec![env.address_merkle_tree_pubkey]),
rpc,
)
.await;
let address_merkle_context = AddressMerkleContext {
address_merkle_tree_pubkey: env.address_merkle_tree_pubkey,
address_queue_pubkey: env.address_merkle_tree_queue_pubkey,
};
let account = LightAccountMeta::new_init(
&env.merkle_tree_pubkey,
Some(&address_merkle_context),
Some(rpc_result.address_root_indices[0]),
remaining_accounts,
)
.unwrap();
let inputs = LightInstructionData {
proof: Some(rpc_result),
accounts: Some(vec![account]),
};
let inputs = inputs.serialize().unwrap();
let instruction_data = name_service_without_macros::instruction::CreateRecord {
inputs,
name: name.to_string(),
rdata: rdata.clone(),
};
let cpi_signer = find_cpi_signer(&name_service_without_macros::ID);
let accounts = name_service_without_macros::accounts::CreateRecord {
signer: payer.pubkey(),
light_system_program: *light_system_program,
account_compression_program: PROGRAM_ID_ACCOUNT_COMPRESSION,
account_compression_authority: *account_compression_authority,
registered_program_pda: *registered_program_pda,
noop_program: PROGRAM_ID_NOOP,
self_program: name_service_without_macros::ID,
cpi_signer,
system_program: solana_sdk::system_program::id(),
};
let remaining_accounts = remaining_accounts.to_account_metas();
let instruction = Instruction {
program_id: name_service_without_macros::ID,
accounts: [accounts.to_account_metas(Some(true)), remaining_accounts].concat(),
data: instruction_data.data(),
};
let event = rpc
.create_and_send_transaction_with_event(&[instruction], &payer.pubkey(), &[payer], None)
.await?;
test_indexer.add_compressed_accounts_with_token_data(&event.unwrap().0);
Ok(())
}
async fn update_record<R>(
rpc: &mut R,
test_indexer: &mut TestIndexer<R>,
remaining_accounts: &mut RemainingAccounts,
new_rdata: &RData,
payer: &Keypair,
compressed_account: &CompressedAccountWithMerkleContext,
account_compression_authority: &Pubkey,
registered_program_pda: &Pubkey,
light_system_program: &Pubkey,
) -> Result<(), RpcError>
where
R: RpcConnection + MerkleTreeExt,
{
let hash = compressed_account.hash().unwrap();
let merkle_tree_pubkey = compressed_account.merkle_context.merkle_tree_pubkey;
let rpc_result = test_indexer
.create_proof_for_compressed_accounts(
Some(&[hash]),
Some(&[merkle_tree_pubkey]),
None,
None,
rpc,
)
.await;
let compressed_account = LightAccountMeta::new_mut(
compressed_account,
rpc_result.root_indices[0],
&merkle_tree_pubkey,
remaining_accounts,
);
let inputs = LightInstructionData {
proof: Some(rpc_result),
accounts: Some(vec![compressed_account]),
};
let inputs = inputs.serialize().unwrap();
let instruction_data = name_service_without_macros::instruction::UpdateRecord {
inputs,
new_rdata: new_rdata.clone(),
};
let cpi_signer = find_cpi_signer(&name_service_without_macros::ID);
let accounts = name_service_without_macros::accounts::UpdateRecord {
signer: payer.pubkey(),
light_system_program: *light_system_program,
account_compression_program: PROGRAM_ID_ACCOUNT_COMPRESSION,
account_compression_authority: *account_compression_authority,
registered_program_pda: *registered_program_pda,
noop_program: PROGRAM_ID_NOOP,
self_program: name_service_without_macros::ID,
cpi_signer,
system_program: solana_sdk::system_program::id(),
};
let remaining_accounts = remaining_accounts.to_account_metas();
let instruction = Instruction {
program_id: name_service_without_macros::ID,
accounts: [accounts.to_account_metas(Some(true)), remaining_accounts].concat(),
data: instruction_data.data(),
};
let event = rpc
.create_and_send_transaction_with_event(&[instruction], &payer.pubkey(), &[payer], None)
.await?;
test_indexer.add_compressed_accounts_with_token_data(&event.unwrap().0);
Ok(())
}
async fn delete_record<R>(
rpc: &mut R,
test_indexer: &mut TestIndexer<R>,
remaining_accounts: &mut RemainingAccounts,
payer: &Keypair,
compressed_account: &CompressedAccountWithMerkleContext,
account_compression_authority: &Pubkey,
registered_program_pda: &Pubkey,
light_system_program: &Pubkey,
) -> Result<(), RpcError>
where
R: RpcConnection + MerkleTreeExt,
{
let hash = compressed_account.hash().unwrap();
let merkle_tree_pubkey = compressed_account.merkle_context.merkle_tree_pubkey;
let rpc_result = test_indexer
.create_proof_for_compressed_accounts(
Some(&[hash]),
Some(&[merkle_tree_pubkey]),
None,
None,
rpc,
)
.await;
let compressed_account = LightAccountMeta::new_close(
compressed_account,
rpc_result.root_indices[0],
remaining_accounts,
);
let inputs = LightInstructionData {
proof: Some(rpc_result),
accounts: Some(vec![compressed_account]),
};
let inputs = inputs.serialize().unwrap();
let instruction_data = name_service_without_macros::instruction::DeleteRecord { inputs };
let cpi_signer = find_cpi_signer(&name_service_without_macros::ID);
let accounts = name_service_without_macros::accounts::DeleteRecord {
signer: payer.pubkey(),
light_system_program: *light_system_program,
account_compression_program: PROGRAM_ID_ACCOUNT_COMPRESSION,
account_compression_authority: *account_compression_authority,
registered_program_pda: *registered_program_pda,
noop_program: PROGRAM_ID_NOOP,
self_program: name_service_without_macros::ID,
cpi_signer,
system_program: solana_sdk::system_program::id(),
};
let remaining_accounts = remaining_accounts.to_account_metas();
let instruction = Instruction {
program_id: name_service_without_macros::ID,
accounts: [accounts.to_account_metas(Some(true)), remaining_accounts].concat(),
data: instruction_data.data(),
};
let transaction = Transaction::new_signed_with_payer(
&[instruction],
Some(&payer.pubkey()),
&[&payer],
rpc.get_latest_blockhash().await.unwrap(),
);
rpc.process_transaction(transaction).await?;
Ok(())
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples/name-service/programs/name-service-without-macros
|
solana_public_repos/Lightprotocol/light-protocol/examples/name-service/programs/name-service-without-macros/src/lib.rs
|
use std::net::{Ipv4Addr, Ipv6Addr};
use anchor_lang::prelude::*;
use borsh::{BorshDeserialize, BorshSerialize};
use light_hasher::bytes::AsByteVec;
use light_sdk::{
account::LightAccount, instruction_data::LightInstructionData, light_system_accounts,
verify::verify_light_accounts, LightDiscriminator, LightHasher, LightTraits,
};
declare_id!("7yucc7fL3JGbyMwg4neUaenNSdySS39hbAk89Ao3t1Hz");
#[program]
pub mod name_service {
use light_hasher::Discriminator;
use light_sdk::{
address::derive_address, error::LightSdkError,
program_merkle_context::unpack_address_merkle_context,
};
use super::*;
pub fn create_record<'info>(
ctx: Context<'_, '_, '_, 'info, CreateRecord<'info>>,
inputs: Vec<u8>,
name: String,
rdata: RData,
) -> Result<()> {
let inputs = LightInstructionData::deserialize(&inputs)?;
let accounts = inputs
.accounts
.as_ref()
.ok_or(LightSdkError::ExpectedAccounts)?;
let address_merkle_context = accounts[0]
.address_merkle_context
.ok_or(LightSdkError::ExpectedAddressMerkleContext)?;
let address_merkle_context =
unpack_address_merkle_context(address_merkle_context, ctx.remaining_accounts);
let (address, address_seed) = derive_address(
&[b"name-service", name.as_bytes()],
&address_merkle_context,
&crate::ID,
);
let mut record: LightAccount<'_, NameRecord> = LightAccount::from_meta_init(
&accounts[0],
NameRecord::discriminator(),
address,
address_seed,
&crate::ID,
)?;
record.owner = ctx.accounts.signer.key();
record.name = name;
record.rdata = rdata;
verify_light_accounts(&ctx, inputs.proof, &[record], None, false, None)?;
Ok(())
}
pub fn update_record<'info>(
ctx: Context<'_, '_, '_, 'info, UpdateRecord<'info>>,
inputs: Vec<u8>,
new_rdata: RData,
) -> Result<()> {
// Deserialize the Light Protocol related data.
let inputs = LightInstructionData::deserialize(&inputs)?;
// Require accounts to be provided.
let accounts = inputs
.accounts
.as_ref()
.ok_or(LightSdkError::ExpectedAccounts)?;
// Convert `LightAccountMeta` to `LightAccount`.
let mut record: LightAccount<'_, NameRecord> =
LightAccount::from_meta_mut(&accounts[0], NameRecord::discriminator(), &crate::ID)?;
// Check the ownership of the `record`.
if record.owner != ctx.accounts.signer.key() {
return err!(CustomError::Unauthorized);
}
record.rdata = new_rdata;
verify_light_accounts(&ctx, inputs.proof, &[record], None, false, None)?;
Ok(())
}
pub fn delete_record<'info>(
ctx: Context<'_, '_, '_, 'info, DeleteRecord<'info>>,
inputs: Vec<u8>,
) -> Result<()> {
let inputs = LightInstructionData::deserialize(&inputs)?;
let accounts = inputs
.accounts
.as_ref()
.ok_or(LightSdkError::ExpectedAccounts)?;
let record: LightAccount<'_, NameRecord> =
LightAccount::from_meta_close(&accounts[0], NameRecord::discriminator(), &crate::ID)?;
if record.owner != ctx.accounts.signer.key() {
return err!(CustomError::Unauthorized);
}
verify_light_accounts(&ctx, inputs.proof, &[record], None, false, None)?;
Ok(())
}
}
#[derive(Clone, Debug, Eq, PartialEq, BorshDeserialize, BorshSerialize)]
pub enum RData {
A(Ipv4Addr),
AAAA(Ipv6Addr),
CName(String),
}
impl anchor_lang::IdlBuild for RData {}
impl AsByteVec for RData {
fn as_byte_vec(&self) -> Vec<Vec<u8>> {
match self {
Self::A(ipv4_addr) => vec![ipv4_addr.octets().to_vec()],
Self::AAAA(ipv6_addr) => vec![ipv6_addr.octets().to_vec()],
Self::CName(cname) => cname.as_byte_vec(),
}
}
}
impl Default for RData {
fn default() -> Self {
Self::A(Ipv4Addr::new(127, 0, 0, 1))
}
}
#[derive(
Clone, Debug, Default, AnchorDeserialize, AnchorSerialize, LightDiscriminator, LightHasher,
)]
pub struct NameRecord {
#[truncate]
pub owner: Pubkey,
#[truncate]
pub name: String,
pub rdata: RData,
}
#[error_code]
pub enum CustomError {
#[msg("No authority to perform this action")]
Unauthorized,
}
#[light_system_accounts]
#[derive(Accounts, LightTraits)]
pub struct CreateRecord<'info> {
#[account(mut)]
#[fee_payer]
pub signer: Signer<'info>,
#[self_program]
pub self_program: Program<'info, crate::program::NameService>,
/// CHECK: Checked in light-system-program.
#[authority]
pub cpi_signer: AccountInfo<'info>,
}
#[light_system_accounts]
#[derive(Accounts, LightTraits)]
pub struct UpdateRecord<'info> {
#[account(mut)]
#[fee_payer]
pub signer: Signer<'info>,
#[self_program]
pub self_program: Program<'info, crate::program::NameService>,
/// CHECK: Checked in light-system-program.
#[authority]
pub cpi_signer: AccountInfo<'info>,
}
#[light_system_accounts]
#[derive(Accounts, LightTraits)]
pub struct DeleteRecord<'info> {
#[account(mut)]
#[fee_payer]
pub signer: Signer<'info>,
#[self_program]
pub self_program: Program<'info, crate::program::NameService>,
/// CHECK: Checked in light-system-program.
#[authority]
pub cpi_signer: AccountInfo<'info>,
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples/name-service/programs
|
solana_public_repos/Lightprotocol/light-protocol/examples/name-service/programs/name-service/Cargo.toml
|
[package]
name = "name-service"
version = "0.7.0"
description = "Created with Anchor"
edition = "2021"
rust-version = "1.75.0"
license = "Apache-2.0"
[lib]
crate-type = ["cdylib", "lib"]
name = "name_service"
[features]
no-entrypoint = []
no-idl = []
no-log-ix-name = []
cpi = ["no-entrypoint"]
default = ["idl-build"]
test-sbf = []
bench-sbf = []
idl-build = ["anchor-lang/idl-build", "light-sdk/idl-build"]
[dependencies]
anchor-lang = { workspace=true}
borsh = { workspace = true }
light-hasher = { workspace = true, features = ["solana"] }
light-macros = { workspace = true }
light-sdk = { workspace = true }
light-sdk-macros = { workspace = true }
light-utils = { workspace = true }
light-verifier = { workspace = true }
[target.'cfg(not(target_os = "solana"))'.dependencies]
solana-sdk = { workspace = true }
[dev-dependencies]
light-client = { workspace = true , features = ["devenv"]}
light-test-utils = { path = "../../../../test-utils", version = "1.2.0", features = ["devenv"] }
light-program-test = { workspace = true }
solana-program-test = { workspace = true }
tokio = "1.36.0"
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples/name-service/programs
|
solana_public_repos/Lightprotocol/light-protocol/examples/name-service/programs/name-service/Xargo.toml
|
[target.bpfel-unknown-unknown.dependencies.std]
features = []
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples/name-service/programs/name-service
|
solana_public_repos/Lightprotocol/light-protocol/examples/name-service/programs/name-service/tests/test.rs
|
#![cfg(feature = "test-sbf")]
use std::net::{Ipv4Addr, Ipv6Addr};
use anchor_lang::{AnchorDeserialize, InstructionData, ToAccountMetas};
use light_client::indexer::{AddressMerkleTreeAccounts, Indexer, StateMerkleTreeAccounts};
use light_client::rpc::merkle_tree::MerkleTreeExt;
use light_program_test::test_env::{setup_test_programs_with_accounts_v2, EnvAccounts};
use light_program_test::test_indexer::TestIndexer;
use light_program_test::test_rpc::ProgramTestRpcConnection;
use light_sdk::address::{derive_address, derive_address_seed};
use light_sdk::compressed_account::CompressedAccountWithMerkleContext;
use light_sdk::error::LightSdkError;
use light_sdk::merkle_context::{
pack_address_merkle_context, pack_merkle_context, AddressMerkleContext, MerkleContext,
PackedAddressMerkleContext, PackedMerkleContext, RemainingAccounts,
};
use light_sdk::utils::get_cpi_authority_pda;
use light_sdk::verify::find_cpi_signer;
use light_sdk::{PROGRAM_ID_ACCOUNT_COMPRESSION, PROGRAM_ID_LIGHT_SYSTEM, PROGRAM_ID_NOOP};
use light_test_utils::{RpcConnection, RpcError};
use name_service::{CustomError, NameRecord, RData};
use solana_sdk::instruction::{Instruction, InstructionError};
use solana_sdk::native_token::LAMPORTS_PER_SOL;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, Signer};
use solana_sdk::transaction::{Transaction, TransactionError};
#[tokio::test]
async fn test_name_service() {
let (mut rpc, env) = setup_test_programs_with_accounts_v2(Some(vec![(
String::from("name_service"),
name_service::ID,
)]))
.await;
let payer = rpc.get_payer().insecure_clone();
let mut test_indexer: TestIndexer<ProgramTestRpcConnection> = TestIndexer::new(
&[StateMerkleTreeAccounts {
merkle_tree: env.merkle_tree_pubkey,
nullifier_queue: env.nullifier_queue_pubkey,
cpi_context: env.cpi_context_account_pubkey,
}],
&[AddressMerkleTreeAccounts {
merkle_tree: env.address_merkle_tree_pubkey,
queue: env.address_merkle_tree_queue_pubkey,
}],
true,
true,
)
.await;
let name = "example.io";
let mut remaining_accounts = RemainingAccounts::default();
let merkle_context = MerkleContext {
merkle_tree_pubkey: env.merkle_tree_pubkey,
nullifier_queue_pubkey: env.nullifier_queue_pubkey,
leaf_index: 0,
queue_index: None,
};
let merkle_context = pack_merkle_context(merkle_context, &mut remaining_accounts);
let address_merkle_context = AddressMerkleContext {
address_merkle_tree_pubkey: env.address_merkle_tree_pubkey,
address_queue_pubkey: env.address_merkle_tree_queue_pubkey,
};
let address_seed = derive_address_seed(&[b"name-service", name.as_bytes()], &name_service::ID);
let address = derive_address(&address_seed, &address_merkle_context);
let address_merkle_context =
pack_address_merkle_context(address_merkle_context, &mut remaining_accounts);
let account_compression_authority = get_cpi_authority_pda(&PROGRAM_ID_LIGHT_SYSTEM);
let registered_program_pda = Pubkey::find_program_address(
&[PROGRAM_ID_LIGHT_SYSTEM.to_bytes().as_slice()],
&PROGRAM_ID_ACCOUNT_COMPRESSION,
)
.0;
// Create the example.io -> 10.0.1.25 record.
let rdata_1 = RData::A(Ipv4Addr::new(10, 0, 1, 25));
create_record(
&name,
&rdata_1,
&mut rpc,
&mut test_indexer,
&env,
&mut remaining_accounts,
&payer,
&address,
&merkle_context,
&address_merkle_context,
&account_compression_authority,
®istered_program_pda,
&PROGRAM_ID_LIGHT_SYSTEM,
)
.await
.unwrap();
// Create with invalid light-system-program ID, should not succeed.
{
let result = create_record(
&name,
&rdata_1,
&mut rpc,
&mut test_indexer,
&env,
&mut remaining_accounts,
&payer,
&address,
&merkle_context,
&address_merkle_context,
&account_compression_authority,
®istered_program_pda,
&Pubkey::new_unique(),
)
.await;
assert!(matches!(
result,
Err(RpcError::TransactionError(
TransactionError::InstructionError(0, InstructionError::Custom(error))
))if error == u32::from(LightSdkError::InvalidLightSystemProgram)
));
}
// Check that it was created correctly.
let compressed_accounts = test_indexer.get_compressed_accounts_by_owner(&name_service::ID);
assert_eq!(compressed_accounts.len(), 1);
let compressed_account = &compressed_accounts[0];
let record = &compressed_account
.compressed_account
.data
.as_ref()
.unwrap()
.data;
let record = NameRecord::deserialize(&mut &record[..]).unwrap();
assert_eq!(record.name, "example.io");
assert_eq!(record.rdata, rdata_1);
// Update the record to example.io -> 2001:db8::1.
let rdata_2 = RData::AAAA(Ipv6Addr::new(8193, 3512, 0, 0, 0, 0, 0, 1));
update_record(
&mut rpc,
&mut test_indexer,
&mut remaining_accounts,
&rdata_2,
&payer,
compressed_account,
&address_merkle_context,
&account_compression_authority,
®istered_program_pda,
&PROGRAM_ID_LIGHT_SYSTEM,
)
.await
.unwrap();
// Update with invalid owner, should not succeed.
{
let invalid_signer = Keypair::new();
rpc.airdrop_lamports(&invalid_signer.pubkey(), LAMPORTS_PER_SOL * 1)
.await
.unwrap();
let result = update_record(
&mut rpc,
&mut test_indexer,
&mut remaining_accounts,
&rdata_2,
&invalid_signer,
compressed_account,
&address_merkle_context,
&account_compression_authority,
®istered_program_pda,
&PROGRAM_ID_LIGHT_SYSTEM,
)
.await;
assert!(matches!(
result,
Err(RpcError::TransactionError(
TransactionError::InstructionError(0, InstructionError::Custom(error))
))if error == u32::from(CustomError::Unauthorized)
));
}
// Update with invalid light-system-program ID, should not succeed.
{
let result = update_record(
&mut rpc,
&mut test_indexer,
&mut remaining_accounts,
&rdata_2,
&payer,
compressed_account,
&address_merkle_context,
&account_compression_authority,
®istered_program_pda,
&Pubkey::new_unique(),
)
.await;
assert!(matches!(
result,
Err(RpcError::TransactionError(
TransactionError::InstructionError(0, InstructionError::Custom(error))
))if error == u32::from(LightSdkError::InvalidLightSystemProgram)
));
}
// Check that it was updated correctly.
let compressed_accounts = test_indexer.get_compressed_accounts_by_owner(&name_service::ID);
assert_eq!(compressed_accounts.len(), 1);
let compressed_account = &compressed_accounts[0];
let record = &compressed_account
.compressed_account
.data
.as_ref()
.unwrap()
.data;
let record = NameRecord::deserialize(&mut &record[..]).unwrap();
assert_eq!(record.name, "example.io");
assert_eq!(record.rdata, rdata_2);
// Delete with invalid owner, should not succeed.
{
let invalid_signer = Keypair::new();
rpc.airdrop_lamports(&invalid_signer.pubkey(), LAMPORTS_PER_SOL * 1)
.await
.unwrap();
let result = delete_record(
&mut rpc,
&mut test_indexer,
&mut remaining_accounts,
&invalid_signer,
compressed_account,
&address_merkle_context,
&account_compression_authority,
®istered_program_pda,
&PROGRAM_ID_LIGHT_SYSTEM,
)
.await;
assert!(matches!(
result,
Err(RpcError::TransactionError(
TransactionError::InstructionError(0, InstructionError::Custom(error))
))if error == u32::from(CustomError::Unauthorized)
));
}
// Delete with invalid light-system-program ID, should not succeed.
{
let result = delete_record(
&mut rpc,
&mut test_indexer,
&mut remaining_accounts,
&payer,
compressed_account,
&address_merkle_context,
&account_compression_authority,
®istered_program_pda,
&Pubkey::new_unique(),
)
.await;
assert!(matches!(
result,
Err(RpcError::TransactionError(
TransactionError::InstructionError(0, InstructionError::Custom(error))
))if error == u32::from(LightSdkError::InvalidLightSystemProgram)
));
}
// Delete the example.io record.
delete_record(
&mut rpc,
&mut test_indexer,
&mut remaining_accounts,
&payer,
compressed_account,
&address_merkle_context,
&account_compression_authority,
®istered_program_pda,
&PROGRAM_ID_LIGHT_SYSTEM,
)
.await
.unwrap();
}
async fn create_record<R>(
name: &str,
rdata: &RData,
rpc: &mut R,
test_indexer: &mut TestIndexer<R>,
env: &EnvAccounts,
remaining_accounts: &mut RemainingAccounts,
payer: &Keypair,
address: &[u8; 32],
merkle_context: &PackedMerkleContext,
address_merkle_context: &PackedAddressMerkleContext,
account_compression_authority: &Pubkey,
registered_program_pda: &Pubkey,
light_system_program: &Pubkey,
) -> Result<(), RpcError>
where
R: RpcConnection + MerkleTreeExt,
{
let rpc_result = test_indexer
.create_proof_for_compressed_accounts(
None,
None,
Some(&[*address]),
Some(vec![env.address_merkle_tree_pubkey]),
rpc,
)
.await;
let instruction_data = name_service::instruction::CreateRecord {
inputs: Vec::new(),
proof: rpc_result.proof,
merkle_context: *merkle_context,
merkle_tree_root_index: 0,
address_merkle_context: *address_merkle_context,
address_merkle_tree_root_index: rpc_result.address_root_indices[0],
name: name.to_string(),
rdata: rdata.clone(),
};
let cpi_signer = find_cpi_signer(&name_service::ID);
let accounts = name_service::accounts::CreateRecord {
signer: payer.pubkey(),
light_system_program: *light_system_program,
account_compression_program: PROGRAM_ID_ACCOUNT_COMPRESSION,
account_compression_authority: *account_compression_authority,
registered_program_pda: *registered_program_pda,
noop_program: PROGRAM_ID_NOOP,
self_program: name_service::ID,
cpi_signer,
system_program: solana_sdk::system_program::id(),
};
let remaining_accounts = remaining_accounts.to_account_metas();
let instruction = Instruction {
program_id: name_service::ID,
accounts: [accounts.to_account_metas(Some(true)), remaining_accounts].concat(),
data: instruction_data.data(),
};
let event = rpc
.create_and_send_transaction_with_event(&[instruction], &payer.pubkey(), &[payer], None)
.await?;
test_indexer.add_compressed_accounts_with_token_data(&event.unwrap().0);
Ok(())
}
async fn update_record<R>(
rpc: &mut R,
test_indexer: &mut TestIndexer<R>,
remaining_accounts: &mut RemainingAccounts,
new_rdata: &RData,
payer: &Keypair,
compressed_account: &CompressedAccountWithMerkleContext,
address_merkle_context: &PackedAddressMerkleContext,
account_compression_authority: &Pubkey,
registered_program_pda: &Pubkey,
light_system_program: &Pubkey,
) -> Result<(), RpcError>
where
R: RpcConnection + MerkleTreeExt,
{
let hash = compressed_account.hash().unwrap();
let merkle_tree_pubkey = compressed_account.merkle_context.merkle_tree_pubkey;
let rpc_result = test_indexer
.create_proof_for_compressed_accounts(
Some(&[hash]),
Some(&[merkle_tree_pubkey]),
None,
None,
rpc,
)
.await;
let merkle_context = pack_merkle_context(compressed_account.merkle_context, remaining_accounts);
let inputs = vec![
compressed_account
.compressed_account
.data
.clone()
.unwrap()
.data,
];
let instruction_data = name_service::instruction::UpdateRecord {
inputs,
proof: rpc_result.proof,
merkle_context,
merkle_tree_root_index: rpc_result.root_indices[0],
address_merkle_context: *address_merkle_context,
address_merkle_tree_root_index: 0,
new_rdata: new_rdata.clone(),
};
let cpi_signer = find_cpi_signer(&name_service::ID);
let accounts = name_service::accounts::UpdateRecord {
signer: payer.pubkey(),
light_system_program: *light_system_program,
account_compression_program: PROGRAM_ID_ACCOUNT_COMPRESSION,
account_compression_authority: *account_compression_authority,
registered_program_pda: *registered_program_pda,
noop_program: PROGRAM_ID_NOOP,
self_program: name_service::ID,
cpi_signer,
system_program: solana_sdk::system_program::id(),
};
let remaining_accounts = remaining_accounts.to_account_metas();
let instruction = Instruction {
program_id: name_service::ID,
accounts: [accounts.to_account_metas(Some(true)), remaining_accounts].concat(),
data: instruction_data.data(),
};
let event = rpc
.create_and_send_transaction_with_event(&[instruction], &payer.pubkey(), &[payer], None)
.await?;
test_indexer.add_compressed_accounts_with_token_data(&event.unwrap().0);
Ok(())
}
async fn delete_record<R>(
rpc: &mut R,
test_indexer: &mut TestIndexer<R>,
remaining_accounts: &mut RemainingAccounts,
payer: &Keypair,
compressed_account: &CompressedAccountWithMerkleContext,
address_merkle_context: &PackedAddressMerkleContext,
account_compression_authority: &Pubkey,
registered_program_pda: &Pubkey,
light_system_program: &Pubkey,
) -> Result<(), RpcError>
where
R: RpcConnection + MerkleTreeExt,
{
let hash = compressed_account.hash().unwrap();
let merkle_tree_pubkey = compressed_account.merkle_context.merkle_tree_pubkey;
let rpc_result = test_indexer
.create_proof_for_compressed_accounts(
Some(&[hash]),
Some(&[merkle_tree_pubkey]),
None,
None,
rpc,
)
.await;
let merkle_context = pack_merkle_context(compressed_account.merkle_context, remaining_accounts);
let inputs = vec![
compressed_account
.compressed_account
.data
.clone()
.unwrap()
.data,
];
let instruction_data = name_service::instruction::DeleteRecord {
inputs,
proof: rpc_result.proof,
merkle_context,
merkle_tree_root_index: rpc_result.root_indices[0],
address_merkle_context: *address_merkle_context,
address_merkle_tree_root_index: 0,
};
let cpi_signer = find_cpi_signer(&name_service::ID);
let accounts = name_service::accounts::DeleteRecord {
signer: payer.pubkey(),
light_system_program: *light_system_program,
account_compression_program: PROGRAM_ID_ACCOUNT_COMPRESSION,
account_compression_authority: *account_compression_authority,
registered_program_pda: *registered_program_pda,
noop_program: PROGRAM_ID_NOOP,
self_program: name_service::ID,
cpi_signer,
system_program: solana_sdk::system_program::id(),
};
let remaining_accounts = remaining_accounts.to_account_metas();
let instruction = Instruction {
program_id: name_service::ID,
accounts: [accounts.to_account_metas(Some(true)), remaining_accounts].concat(),
data: instruction_data.data(),
};
let transaction = Transaction::new_signed_with_payer(
&[instruction],
Some(&payer.pubkey()),
&[&payer],
rpc.get_latest_blockhash().await.unwrap(),
);
rpc.process_transaction(transaction).await?;
Ok(())
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples/name-service/programs/name-service
|
solana_public_repos/Lightprotocol/light-protocol/examples/name-service/programs/name-service/src/lib.rs
|
use std::net::{Ipv4Addr, Ipv6Addr};
use anchor_lang::prelude::*;
use borsh::{BorshDeserialize, BorshSerialize};
use light_hasher::bytes::AsByteVec;
use light_sdk::{
account::LightAccount, light_account, light_accounts, light_program,
merkle_context::PackedAddressMerkleContext,
};
declare_id!("7yucc7fL3JGbyMwg4neUaenNSdySS39hbAk89Ao3t1Hz");
#[light_program]
#[program]
pub mod name_service {
use super::*;
pub fn create_record<'info>(
ctx: LightContext<'_, '_, '_, 'info, CreateRecord<'info>>,
name: String,
rdata: RData,
) -> Result<()> {
ctx.light_accounts.record.owner = ctx.accounts.signer.key();
ctx.light_accounts.record.name = name;
ctx.light_accounts.record.rdata = rdata;
Ok(())
}
pub fn update_record<'info>(
ctx: LightContext<'_, '_, '_, 'info, UpdateRecord<'info>>,
new_rdata: RData,
) -> Result<()> {
ctx.light_accounts.record.rdata = new_rdata;
Ok(())
}
pub fn delete_record<'info>(
ctx: LightContext<'_, '_, '_, 'info, DeleteRecord<'info>>,
) -> Result<()> {
Ok(())
}
}
#[derive(Clone, Debug, Eq, PartialEq, BorshDeserialize, BorshSerialize)]
pub enum RData {
A(Ipv4Addr),
AAAA(Ipv6Addr),
CName(String),
}
impl anchor_lang::IdlBuild for RData {}
impl AsByteVec for RData {
fn as_byte_vec(&self) -> Vec<Vec<u8>> {
match self {
Self::A(ipv4_addr) => vec![ipv4_addr.octets().to_vec()],
Self::AAAA(ipv6_addr) => vec![ipv6_addr.octets().to_vec()],
Self::CName(cname) => cname.as_byte_vec(),
}
}
}
impl Default for RData {
fn default() -> Self {
Self::A(Ipv4Addr::new(127, 0, 0, 1))
}
}
#[light_account]
#[derive(Clone, Debug, Default)]
pub struct NameRecord {
#[truncate]
pub owner: Pubkey,
#[truncate]
pub name: String,
pub rdata: RData,
}
#[error_code]
pub enum CustomError {
#[msg("No authority to perform this action")]
Unauthorized,
}
#[light_accounts]
#[instruction(name: String)]
pub struct CreateRecord<'info> {
#[account(mut)]
#[fee_payer]
pub signer: Signer<'info>,
#[self_program]
pub self_program: Program<'info, crate::program::NameService>,
/// CHECK: Checked in light-system-program.
#[authority]
pub cpi_signer: AccountInfo<'info>,
#[light_account(init, seeds = [b"name-service", name.as_bytes()])]
pub record: LightAccount<NameRecord>,
}
#[light_accounts]
pub struct UpdateRecord<'info> {
#[account(mut)]
#[fee_payer]
pub signer: Signer<'info>,
#[self_program]
pub self_program: Program<'info, crate::program::NameService>,
/// CHECK: Checked in light-system-program.
#[authority]
pub cpi_signer: AccountInfo<'info>,
#[light_account(
mut,
seeds = [b"name-service", record.name.as_bytes()],
constraint = record.owner == signer.key() @ CustomError::Unauthorized
)]
pub record: LightAccount<NameRecord>,
}
#[light_accounts]
pub struct DeleteRecord<'info> {
#[account(mut)]
#[fee_payer]
pub signer: Signer<'info>,
#[self_program]
pub self_program: Program<'info, crate::program::NameService>,
/// CHECK: Checked in light-system-program.
#[authority]
pub cpi_signer: AccountInfo<'info>,
#[light_account(
close,
seeds = [b"name-service", record.name.as_bytes()],
constraint = record.owner == signer.key() @ CustomError::Unauthorized
)]
pub record: LightAccount<NameRecord>,
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples/browser
|
solana_public_repos/Lightprotocol/light-protocol/examples/browser/nextjs/next.config.mjs
|
/** @type {import('next').NextConfig} */
const nextConfig = {
webpack: (config, { isServer }) => {
// Fix for Node.js modules in Webpack 5+
if (!isServer) {
config.resolve.fallback = {
...config.resolve.fallback,
fs: false,
path: false,
os: false,
};
}
return config;
},
};
export default nextConfig;
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples/browser
|
solana_public_repos/Lightprotocol/light-protocol/examples/browser/nextjs/README.md
|
This project demonstrates how to use `@lightprotocol/stateless.js` to interact
with the ZK Compression API in a browser environment.
0. Build the Monorepo.
```bash
cd ../../../ &&
. ./scripts/devenv.sh &&
./scripts/install.sh &&
./scripts/build.sh
```
1. Start a light test-validator using the CLI
```bash
cd cli &&
light test-validator
```
2. Start the app
```bash
cd ../examples/browser/nextjs &&
pnpm dev
```
This will serve and mount the app at http://localhost:1234 and run the code
defined in `page.tsx`.
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples/browser
|
solana_public_repos/Lightprotocol/light-protocol/examples/browser/nextjs/package.json
|
{
"name": "nextjs",
"version": "0.1.0",
"private": true,
"scripts": {
"dev": "next dev -p 1234",
"build": "next build",
"build:browser": "next build",
"start": "next start -p 1234",
"test-validator": "./../../../cli/test_bin/run test-validator",
"lint": "next lint"
},
"dependencies": {
"bs58": "^6.0.0",
"@coral-xyz/anchor": "^0.30.0",
"@lightprotocol/stateless.js": "workspace:*",
"@lightprotocol/compressed-token": "workspace:*",
"@solana/wallet-adapter-base": "^0.9.23",
"@solana/wallet-adapter-react": "^0.15.35",
"@solana/wallet-adapter-react-ui": "^0.9.35",
"@solana/wallet-adapter-unsafe-burner": "^0.1.7",
"@solana/web3.js": "^1.95.3",
"next": "15.0.4",
"react": "^19",
"react-dom": "^19"
},
"devDependencies": {
"@types/node": "^22.4.1",
"@types/react": "^19",
"@types/react-dom": "^19",
"eslint": "8.57.0",
"eslint-config-next": "15.1.0",
"typescript": "^5.5.4"
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples/browser
|
solana_public_repos/Lightprotocol/light-protocol/examples/browser/nextjs/.prettierrc
|
{
"semi": true,
"trailingComma": "all",
"singleQuote": true,
"printWidth": 80,
"useTabs": false,
"tabWidth": 4,
"bracketSpacing": true,
"arrowParens": "avoid"
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples/browser
|
solana_public_repos/Lightprotocol/light-protocol/examples/browser/nextjs/tsconfig.json
|
{
"compilerOptions": {
"lib": [
"dom",
"dom.iterable",
"esnext"
],
"allowJs": true,
"skipLibCheck": true,
"strict": true,
"noEmit": true,
"esModuleInterop": true,
"module": "esnext",
"moduleResolution": "bundler",
"resolveJsonModule": true,
"isolatedModules": true,
"jsx": "preserve",
"incremental": true,
"plugins": [
{
"name": "next"
}
],
"paths": {
"@/*": [
"./src/*"
]
},
"target": "ES2017"
},
"include": [
"next-env.d.ts",
"**/*.ts",
"**/*.tsx",
".next/types/**/*.ts"
],
"exclude": [
"node_modules"
]
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples/browser
|
solana_public_repos/Lightprotocol/light-protocol/examples/browser/nextjs/.eslintrc.json
|
{
"extends": "next/core-web-vitals"
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples/browser/nextjs
|
solana_public_repos/Lightprotocol/light-protocol/examples/browser/nextjs/public/vercel.svg
|
<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 283 64"><path fill="black" d="M141 16c-11 0-19 7-19 18s9 18 20 18c7 0 13-3 16-7l-7-5c-2 3-6 4-9 4-5 0-9-3-10-7h28v-3c0-11-8-18-19-18zm-9 15c1-4 4-7 9-7s8 3 9 7h-18zm117-15c-11 0-19 7-19 18s9 18 20 18c6 0 12-3 16-7l-8-5c-2 3-5 4-8 4-5 0-9-3-11-7h28l1-3c0-11-8-18-19-18zm-10 15c2-4 5-7 10-7s8 3 9 7h-19zm-39 3c0 6 4 10 10 10 4 0 7-2 9-5l8 5c-3 5-9 8-17 8-11 0-19-7-19-18s8-18 19-18c8 0 14 3 17 8l-8 5c-2-3-5-5-9-5-6 0-10 4-10 10zm83-29v46h-9V5h9zM37 0l37 64H0L37 0zm92 5-27 48L74 5h10l18 30 17-30h10zm59 12v10l-3-1c-6 0-10 4-10 10v15h-9V17h9v9c0-5 6-9 13-9z"/></svg>
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples/browser/nextjs
|
solana_public_repos/Lightprotocol/light-protocol/examples/browser/nextjs/public/next.svg
|
<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 394 80"><path fill="#000" d="M262 0h68.5v12.7h-27.2v66.6h-13.6V12.7H262V0ZM149 0v12.7H94v20.4h44.3v12.6H94v21h55v12.6H80.5V0h68.7zm34.3 0h-17.8l63.8 79.4h17.9l-32-39.7 32-39.6h-17.9l-23 28.6-23-28.6zm18.3 56.7-9-11-27.1 33.7h17.8l18.3-22.7z"/><path fill="#000" d="M81 79.3 17 0H0v79.3h13.6V17l50.2 62.3H81Zm252.6-.4c-1 0-1.8-.4-2.5-1s-1.1-1.6-1.1-2.6.3-1.8 1-2.5 1.6-1 2.6-1 1.8.3 2.5 1a3.4 3.4 0 0 1 .6 4.3 3.7 3.7 0 0 1-3 1.8zm23.2-33.5h6v23.3c0 2.1-.4 4-1.3 5.5a9.1 9.1 0 0 1-3.8 3.5c-1.6.8-3.5 1.3-5.7 1.3-2 0-3.7-.4-5.3-1s-2.8-1.8-3.7-3.2c-.9-1.3-1.4-3-1.4-5h6c.1.8.3 1.6.7 2.2s1 1.2 1.6 1.5c.7.4 1.5.5 2.4.5 1 0 1.8-.2 2.4-.6a4 4 0 0 0 1.6-1.8c.3-.8.5-1.8.5-3V45.5zm30.9 9.1a4.4 4.4 0 0 0-2-3.3 7.5 7.5 0 0 0-4.3-1.1c-1.3 0-2.4.2-3.3.5-.9.4-1.6 1-2 1.6a3.5 3.5 0 0 0-.3 4c.3.5.7.9 1.3 1.2l1.8 1 2 .5 3.2.8c1.3.3 2.5.7 3.7 1.2a13 13 0 0 1 3.2 1.8 8.1 8.1 0 0 1 3 6.5c0 2-.5 3.7-1.5 5.1a10 10 0 0 1-4.4 3.5c-1.8.8-4.1 1.2-6.8 1.2-2.6 0-4.9-.4-6.8-1.2-2-.8-3.4-2-4.5-3.5a10 10 0 0 1-1.7-5.6h6a5 5 0 0 0 3.5 4.6c1 .4 2.2.6 3.4.6 1.3 0 2.5-.2 3.5-.6 1-.4 1.8-1 2.4-1.7a4 4 0 0 0 .8-2.4c0-.9-.2-1.6-.7-2.2a11 11 0 0 0-2.1-1.4l-3.2-1-3.8-1c-2.8-.7-5-1.7-6.6-3.2a7.2 7.2 0 0 1-2.4-5.7 8 8 0 0 1 1.7-5 10 10 0 0 1 4.3-3.5c2-.8 4-1.2 6.4-1.2 2.3 0 4.4.4 6.2 1.2 1.8.8 3.2 2 4.3 3.4 1 1.4 1.5 3 1.5 5h-5.8z"/></svg>
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples/browser/nextjs/src
|
solana_public_repos/Lightprotocol/light-protocol/examples/browser/nextjs/src/app/page.module.css
|
.main {
display: flex;
flex-direction: column;
justify-content: space-between;
align-items: center;
padding: 6rem;
min-height: 100vh;
}
.description {
display: inherit;
justify-content: inherit;
align-items: inherit;
font-size: 0.85rem;
max-width: var(--max-width);
width: 100%;
z-index: 2;
font-family: var(--font-mono);
}
.description a {
display: flex;
justify-content: center;
align-items: center;
gap: 0.5rem;
}
.description p {
position: relative;
margin: 0;
padding: 1rem;
background-color: rgba(var(--callout-rgb), 0.5);
border: 1px solid rgba(var(--callout-border-rgb), 0.3);
border-radius: var(--border-radius);
}
.code {
font-weight: 700;
font-family: var(--font-mono);
}
.grid {
display: grid;
grid-template-columns: repeat(4, minmax(25%, auto));
max-width: 100%;
width: var(--max-width);
}
.card {
padding: 1rem 1.2rem;
border-radius: var(--border-radius);
background: rgba(var(--card-rgb), 0);
border: 1px solid rgba(var(--card-border-rgb), 0);
transition: background 200ms, border 200ms;
}
.card span {
display: inline-block;
transition: transform 200ms;
}
.card h2 {
font-weight: 600;
margin-bottom: 0.7rem;
}
.card p {
margin: 0;
opacity: 0.6;
font-size: 0.9rem;
line-height: 1.5;
max-width: 30ch;
text-wrap: balance;
}
.center {
display: flex;
justify-content: center;
align-items: center;
position: relative;
padding: 4rem 0;
}
.center::before {
background: var(--secondary-glow);
border-radius: 50%;
width: 480px;
height: 360px;
margin-left: -400px;
}
.center::after {
background: var(--primary-glow);
width: 240px;
height: 180px;
z-index: -1;
}
.center::before,
.center::after {
content: "";
left: 50%;
position: absolute;
filter: blur(45px);
transform: translateZ(0);
}
.logo {
position: relative;
}
/* Enable hover only on non-touch devices */
@media (hover: hover) and (pointer: fine) {
.card:hover {
background: rgba(var(--card-rgb), 0.1);
border: 1px solid rgba(var(--card-border-rgb), 0.15);
}
.card:hover span {
transform: translateX(4px);
}
}
@media (prefers-reduced-motion) {
.card:hover span {
transform: none;
}
}
/* Mobile */
@media (max-width: 700px) {
.content {
padding: 4rem;
}
.grid {
grid-template-columns: 1fr;
margin-bottom: 120px;
max-width: 320px;
text-align: center;
}
.card {
padding: 1rem 2.5rem;
}
.card h2 {
margin-bottom: 0.5rem;
}
.center {
padding: 8rem 0 6rem;
}
.center::before {
transform: none;
height: 300px;
}
.description {
font-size: 0.8rem;
}
.description a {
padding: 1rem;
}
.description p,
.description div {
display: flex;
justify-content: center;
position: fixed;
width: 100%;
}
.description p {
align-items: center;
inset: 0 0 auto;
padding: 2rem 1rem 1.4rem;
border-radius: 0;
border: none;
border-bottom: 1px solid rgba(var(--callout-border-rgb), 0.25);
background: linear-gradient(
to bottom,
rgba(var(--background-start-rgb), 1),
rgba(var(--callout-rgb), 0.5)
);
background-clip: padding-box;
backdrop-filter: blur(24px);
}
.description div {
align-items: flex-end;
pointer-events: none;
inset: auto 0 0;
padding: 2rem;
height: 200px;
background: linear-gradient(
to bottom,
transparent 0%,
rgb(var(--background-end-rgb)) 40%
);
z-index: 1;
}
}
/* Tablet and Smaller Desktop */
@media (min-width: 701px) and (max-width: 1120px) {
.grid {
grid-template-columns: repeat(2, 50%);
}
}
@media (prefers-color-scheme: dark) {
.vercelLogo {
filter: invert(1);
}
.logo {
filter: invert(1) drop-shadow(0 0 0.3rem #ffffff70);
}
}
@keyframes rotate {
from {
transform: rotate(360deg);
}
to {
transform: rotate(0deg);
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples/browser/nextjs/src
|
solana_public_repos/Lightprotocol/light-protocol/examples/browser/nextjs/src/app/layout.tsx
|
import type { Metadata } from 'next';
import { Inter } from 'next/font/google';
import './globals.css';
const inter = Inter({ subsets: ['latin'] });
export const metadata: Metadata = {
title: 'Create Next App',
description: 'Generated by create next app',
};
export default function RootLayout({
children,
}: Readonly<{
children: React.ReactNode;
}>) {
return (
<html lang="en">
<body className={inter.className}>{children}</body>
</html>
);
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples/browser/nextjs/src
|
solana_public_repos/Lightprotocol/light-protocol/examples/browser/nextjs/src/app/page.tsx
|
'use client';
import React, { FC, useCallback, useMemo } from 'react';
import {
ComputeBudgetProgram,
Keypair,
TransactionMessage,
VersionedTransaction,
} from '@solana/web3.js';
import {
ConnectionProvider,
WalletProvider,
useWallet,
} from '@solana/wallet-adapter-react';
import { WalletNotConnectedError } from '@solana/wallet-adapter-base';
import { UnsafeBurnerWalletAdapter } from '@solana/wallet-adapter-unsafe-burner';
import {
WalletModalProvider,
WalletDisconnectButton,
WalletMultiButton,
} from '@solana/wallet-adapter-react-ui';
import {
LightSystemProgram,
bn,
buildTx,
confirmTx,
defaultTestStateTreeAccounts,
selectMinCompressedSolAccountsForTransfer,
createRpc,
} from '@lightprotocol/stateless.js';
// Default styles that can be overridden by your app
require('@solana/wallet-adapter-react-ui/styles.css');
const SendButton: FC = () => {
const { publicKey, sendTransaction } = useWallet();
const onClick = useCallback(async () => {
const connection = await createRpc();
if (!publicKey) throw new WalletNotConnectedError();
/// airdrop
await confirmTx(
connection,
await connection.requestAirdrop(publicKey, 1e9),
);
/// compress to self
const compressInstruction = await LightSystemProgram.compress({
payer: publicKey,
toAddress: publicKey,
lamports: 1e8,
outputStateTree: defaultTestStateTreeAccounts().merkleTree,
});
const compressInstructions = [
ComputeBudgetProgram.setComputeUnitLimit({ units: 1_000_000 }),
compressInstruction,
];
const {
context: { slot: minContextSlot },
value: blockhashCtx,
} = await connection.getLatestBlockhashAndContext();
const tx = buildTx(
compressInstructions,
publicKey,
blockhashCtx.blockhash,
);
const signature = await sendTransaction(tx, connection, {
minContextSlot,
});
await connection.confirmTransaction({
blockhash: blockhashCtx.blockhash,
lastValidBlockHeight: blockhashCtx.lastValidBlockHeight,
signature,
});
console.log(
`Compressed ${1e8} lamports! txId: https://explorer.solana.com/tx/${signature}?cluster=custom`,
);
/// Send compressed SOL to a random address
const recipient = Keypair.generate().publicKey;
/// 1. We need to fetch our sol balance
const accounts =
await connection.getCompressedAccountsByOwner(publicKey);
console.log('accounts', accounts);
const [selectedAccounts, _] = selectMinCompressedSolAccountsForTransfer(
accounts.items,
1e7,
);
console.log('selectedAccounts', selectedAccounts);
/// 2. Retrieve validity proof for our selected balance
const { compressedProof, rootIndices } =
await connection.getValidityProof(
selectedAccounts.map(account => bn(account.hash)),
);
/// 3. Create and send compressed transfer
const sendInstruction = await LightSystemProgram.transfer({
payer: publicKey,
toAddress: recipient,
lamports: 1e7,
inputCompressedAccounts: selectedAccounts,
outputStateTrees: [defaultTestStateTreeAccounts().merkleTree],
recentValidityProof: compressedProof,
recentInputStateRootIndices: rootIndices,
});
const sendInstructions = [
ComputeBudgetProgram.setComputeUnitLimit({ units: 1_000_000 }),
sendInstruction,
];
const {
context: { slot: minContextSlotSend },
value: {
blockhash: blockhashSend,
lastValidBlockHeight: lastValidBlockHeightSend,
},
} = await connection.getLatestBlockhashAndContext();
const messageV0Send = new TransactionMessage({
payerKey: publicKey,
recentBlockhash: blockhashSend,
instructions: sendInstructions,
}).compileToV0Message();
const transactionSend = new VersionedTransaction(messageV0Send);
const signatureSend = await sendTransaction(
transactionSend,
connection,
{
minContextSlot: minContextSlotSend,
},
);
await connection.confirmTransaction({
blockhash: blockhashSend,
lastValidBlockHeight: lastValidBlockHeightSend,
signature: signatureSend,
});
console.log(
`Sent ${1e7} lamports to ${recipient.toBase58()} ! txId: https://explorer.solana.com/tx/${signatureSend}?cluster=custom`,
);
}, [publicKey, sendTransaction]);
return (
<button
style={{
fontSize: '1rem',
padding: '1rem',
backgroundColor: '#0066ff',
cursor: 'pointer',
}}
onClick={onClick}
disabled={!publicKey}
>
Get airdrop, compress and send SOL to a random address!
</button>
);
};
export default function Home() {
const endpoint = useMemo(() => 'http://127.0.0.1:8899', []);
const wallets = useMemo(() => [new UnsafeBurnerWalletAdapter()], []);
return (
<ConnectionProvider endpoint={endpoint}>
<WalletProvider wallets={wallets} autoConnect>
<WalletModalProvider>
<WalletMultiButton />
<WalletDisconnectButton />
<div>
<label style={{ fontSize: '1.5rem' }}>
Welcome to this very simple example using
Compression in a browser :)
</label>
</div>
<div>
<label>Check the terminal for tx signatures!</label>
</div>
<SendButton />
</WalletModalProvider>
</WalletProvider>
</ConnectionProvider>
);
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples/browser/nextjs/src
|
solana_public_repos/Lightprotocol/light-protocol/examples/browser/nextjs/src/app/globals.css
|
:root {
--max-width: 1100px;
--border-radius: 12px;
--font-mono: ui-monospace, Menlo, Monaco, "Cascadia Mono", "Segoe UI Mono",
"Roboto Mono", "Oxygen Mono", "Ubuntu Monospace", "Source Code Pro",
"Fira Mono", "Droid Sans Mono", "Courier New", monospace;
--foreground-rgb: 0, 0, 0;
--background-start-rgb: 214, 219, 220;
--background-end-rgb: 255, 255, 255;
--primary-glow: conic-gradient(
from 180deg at 50% 50%,
#16abff33 0deg,
#0885ff33 55deg,
#54d6ff33 120deg,
#0071ff33 160deg,
transparent 360deg
);
--secondary-glow: radial-gradient(
rgba(255, 255, 255, 1),
rgba(255, 255, 255, 0)
);
--tile-start-rgb: 239, 245, 249;
--tile-end-rgb: 228, 232, 233;
--tile-border: conic-gradient(
#00000080,
#00000040,
#00000030,
#00000020,
#00000010,
#00000010,
#00000080
);
--callout-rgb: 238, 240, 241;
--callout-border-rgb: 172, 175, 176;
--card-rgb: 180, 185, 188;
--card-border-rgb: 131, 134, 135;
}
@media (prefers-color-scheme: dark) {
:root {
--foreground-rgb: 255, 255, 255;
--background-start-rgb: 0, 0, 0;
--background-end-rgb: 0, 0, 0;
--primary-glow: radial-gradient(rgba(1, 65, 255, 0.4), rgba(1, 65, 255, 0));
--secondary-glow: linear-gradient(
to bottom right,
rgba(1, 65, 255, 0),
rgba(1, 65, 255, 0),
rgba(1, 65, 255, 0.3)
);
--tile-start-rgb: 2, 13, 46;
--tile-end-rgb: 2, 5, 19;
--tile-border: conic-gradient(
#ffffff80,
#ffffff40,
#ffffff30,
#ffffff20,
#ffffff10,
#ffffff10,
#ffffff80
);
--callout-rgb: 20, 20, 20;
--callout-border-rgb: 108, 108, 108;
--card-rgb: 100, 100, 100;
--card-border-rgb: 200, 200, 200;
}
}
* {
box-sizing: border-box;
padding: 0;
margin: 0;
}
html,
body {
max-width: 100vw;
overflow-x: hidden;
}
body {
color: rgb(var(--foreground-rgb));
background: linear-gradient(
to bottom,
transparent,
rgb(var(--background-end-rgb))
)
rgb(var(--background-start-rgb));
}
a {
color: inherit;
text-decoration: none;
}
@media (prefers-color-scheme: dark) {
html {
color-scheme: dark;
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples
|
solana_public_repos/Lightprotocol/light-protocol/examples/token-escrow/.prettierignore
|
.anchor
.DS_Store
target
node_modules
dist
build
test-ledger
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples
|
solana_public_repos/Lightprotocol/light-protocol/examples/token-escrow/README.md
|
# Token Escrow program example
This example program escrows compressed tokens into (1) a regular Solana program account and (2) a compressed-pda account.
**Note:** Breaking changes to interfaces can occur. To ensure compatibility with the latest release, please check out the [latest release branch](https://github.com/Lightprotocol/light-protocol/tree/light-v0.3.0/examples/token-escrow).
### Run the tests
In the monorepo root, run the build.sh script
```bash
source ./scripts/devenv.sh
./scripts/build.sh
mkdir -p ./target/deploy
cp ./third-party/solana-program-library/spl_noop.so ./target/deploy/spl_noop.so
anchor build
```
Then navigate to the token-escrow directory and run the rust tests:
```bash
cd examples/token-escrow/programs/token-escrow
cargo test-sbf -- --test-threads=1
```
## This program is unsafe; don't use it in production.
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples
|
solana_public_repos/Lightprotocol/light-protocol/examples/token-escrow/Anchor.toml
|
[toolchain]
[features]
seeds = false
skip-lint = false
[programs.localnet]
token_escrow = "GRLu2hKaAiMbxpkAM1HeXzks9YeGuz18SEgXEizVvPqX"
[programs.testnet]
token_escrow = "GRLu2hKaAiMbxpkAM1HeXzks9YeGuz18SEgXEizVvPqX"
[registry]
url = "https://api.apr.dev"
[provider]
cluster = "testnet"
wallet = "$HOME/.config/solana/id.json"
[scripts]
test = "yarn run ts-mocha -p ./tsconfig.json -t 1000000 tests/**/*.ts"
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples
|
solana_public_repos/Lightprotocol/light-protocol/examples/token-escrow/package.json
|
{
"scripts": {
"lint:fix": "prettier \"*/**/*{.js,.ts}\" -w",
"lint": "prettier \"*/**/*{.js,.ts}\" --check",
"test": "cargo test-sbf -p token-escrow -- --test-threads=1"
},
"dependencies": {
"@coral-xyz/anchor": "0.29.0"
},
"devDependencies": {
"chai": "^5.1.2",
"mocha": "^10.7.3",
"ts-mocha": "^10.0.0",
"@types/bn.js": "^5.1.5",
"@types/chai": "^5.0.0",
"@types/mocha": "^10.0.7",
"typescript": "^5.5.4",
"prettier": "^3.4.2"
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples
|
solana_public_repos/Lightprotocol/light-protocol/examples/token-escrow/tsconfig.json
|
{
"compilerOptions": {
"types": ["mocha", "chai"],
"typeRoots": ["./node_modules/@types"],
"lib": ["es2015"],
"module": "commonjs",
"target": "es6",
"esModuleInterop": true
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples/token-escrow
|
solana_public_repos/Lightprotocol/light-protocol/examples/token-escrow/migrations/deploy.ts
|
// Migrations are an early feature. Currently, they're nothing more than this
// single deploy script that's invoked from the CLI, injecting a provider
// configured from the workspace's Anchor.toml.
const anchor = require("@coral-xyz/anchor");
module.exports = async function (provider) {
// Configure client to use the provider.
anchor.setProvider(provider);
// Add your deploy script here.
};
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples/token-escrow
|
solana_public_repos/Lightprotocol/light-protocol/examples/token-escrow/tests/token-escrow.ts
|
import * as anchor from "@coral-xyz/anchor";
import { Program } from "@coral-xyz/anchor";
import { TokenEscrow } from "../target/types/token_escrow";
describe("token-escrow", () => {
// Configure the client to use the local cluster.
anchor.setProvider(anchor.AnchorProvider.env());
const program = anchor.workspace.TokenEscrow as Program<TokenEscrow>;
it("Is initialized!", async () => {
// Add your test here.
const tx = await program.methods.initialize().rpc();
console.log("Your transaction signature", tx);
});
});
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples/token-escrow/programs
|
solana_public_repos/Lightprotocol/light-protocol/examples/token-escrow/programs/token-escrow/Cargo.toml
|
[package]
name = "token-escrow"
version = "0.9.0"
description = "Solana escrow implementation using account compression"
repository = "https://github.com/Lightprotocol/light-protocol"
license = "Apache-2.0"
edition = "2021"
[lib]
crate-type = ["cdylib", "lib"]
name = "token_escrow"
[features]
no-entrypoint = []
no-log-ix-name = []
cpi = ["no-entrypoint"]
custom-heap = []
default = ["custom-heap", "idl-build"]
test-sbf = []
idl-build = ["anchor-lang/idl-build", "anchor-spl/idl-build"]
[dependencies]
anchor-lang = { workspace = true, features = ["init-if-needed"] }
light-compressed-token = { workspace = true }
light-system-program = { workspace = true }
account-compression = { workspace = true }
light-hasher = { path = "../../../../merkle-tree/hasher", version = "1.1.0" }
light-verifier = { path = "../../../../circuit-lib/verifier", version = "1.1.0" }
light-sdk = { workspace = true, features = ["legacy"] }
[target.'cfg(not(target_os = "solana"))'.dependencies]
solana-sdk = { workspace = true }
[dev-dependencies]
solana-program-test = { workspace = true }
light-test-utils = { version = "1.2.0", path = "../../../../test-utils", features = ["devenv"] }
light-program-test = { workspace = true, features = ["devenv"] }
reqwest = "0.12"
tokio = { workspace = true }
light-prover-client = { path = "../../../../circuit-lib/light-prover-client", version = "1.2.0" }
num-bigint = "0.4.6"
num-traits = "0.2.19"
spl-token = { workspace = true }
anchor-spl = { workspace = true }
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples/token-escrow/programs
|
solana_public_repos/Lightprotocol/light-protocol/examples/token-escrow/programs/token-escrow/Xargo.toml
|
[target.bpfel-unknown-unknown.dependencies.std]
features = []
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples/token-escrow/programs/token-escrow
|
solana_public_repos/Lightprotocol/light-protocol/examples/token-escrow/programs/token-escrow/tests/test.rs
|
#![cfg(feature = "test-sbf")]
// TODO: extend this example with a swap function
// TODO: implement a version with delegate and approve
// 1. escrow tokens with pda
// create test env
// create mint and mint tokens
// escrow compressed tokens - with normal pda
// - transfer tokens to compressed token account owned by pda
// - create escrow pda and just prove that utxo exists -> read utxo from compressed token account
// release compressed tokens
use light_hasher::Poseidon;
use light_program_test::test_env::{setup_test_programs_with_accounts, EnvAccounts};
use light_prover_client::gnark::helpers::{ProofType, ProverConfig};
use light_system_program::sdk::{compressed_account::MerkleContext, event::PublicTransactionEvent};
use light_test_utils::indexer::TestIndexer;
use light_test_utils::spl::{create_mint_helper, mint_tokens_helper};
use light_test_utils::{
airdrop_lamports, assert_rpc_error, FeeConfig, Indexer, RpcConnection, RpcError,
TransactionParams,
};
use light_verifier::VerifierError;
use solana_sdk::instruction::Instruction;
use solana_sdk::signature::Keypair;
use solana_sdk::{pubkey::Pubkey, signer::Signer, transaction::Transaction};
use token_escrow::escrow_with_compressed_pda::sdk::get_token_owner_pda;
use token_escrow::escrow_with_pda::sdk::{
create_escrow_instruction, create_withdrawal_escrow_instruction, get_timelock_pda,
CreateEscrowInstructionInputs,
};
use token_escrow::{EscrowError, EscrowTimeLock};
/// Tests:
/// 1. create test env
/// 2. create mint and mint tokens
/// 3. escrow compressed tokens
/// 4. withdraw compressed tokens
/// 5. mint tokens to second payer
/// 6. escrow compressed tokens with lockup time
/// 7. try to withdraw before lockup time
/// 8. try to withdraw with invalid signer
/// 9. withdraw after lockup time
#[tokio::test]
async fn test_escrow_pda() {
let (mut rpc, env) = setup_test_programs_with_accounts(Some(vec![(
String::from("token_escrow"),
token_escrow::ID,
)]))
.await;
let payer = rpc.get_payer().insecure_clone();
let payer_pubkey = payer.pubkey();
let merkle_tree_pubkey = env.merkle_tree_pubkey;
let test_indexer = TestIndexer::init_from_env(
&payer,
&env,
Some(ProverConfig {
run_mode: None,
circuits: vec![ProofType::Inclusion],
}),
);
let mint = create_mint_helper(&mut rpc, &payer).await;
let mut test_indexer = test_indexer.await;
let amount = 10000u64;
mint_tokens_helper(
&mut rpc,
&mut test_indexer,
&merkle_tree_pubkey,
&payer,
&mint,
vec![amount],
vec![payer.pubkey()],
)
.await;
let escrow_amount = 100u64;
let lockup_time = 0u64;
perform_escrow_with_event(
&mut rpc,
&mut test_indexer,
&env,
&payer,
&escrow_amount,
&lockup_time,
)
.await
.unwrap();
assert_escrow(
&mut rpc,
&test_indexer,
&payer_pubkey,
amount,
escrow_amount,
&lockup_time,
)
.await;
println!("withdrawal _----------------------------------------------------------------");
let withdrawal_amount = 50u64;
perform_withdrawal_with_event(
&mut rpc,
&mut test_indexer,
&env,
&payer,
&withdrawal_amount,
None,
)
.await
.unwrap();
assert_withdrawal(
&test_indexer,
&payer_pubkey,
withdrawal_amount,
escrow_amount,
);
let second_payer = Keypair::new();
let second_payer_pubkey = second_payer.pubkey();
println!("second payer pub key {:?}", second_payer_pubkey);
let second_payer_token_balance = 1_000_000_000;
airdrop_lamports(&mut rpc, &second_payer_pubkey, 1_000_000_000)
.await
.unwrap();
mint_tokens_helper(
&mut rpc,
&mut test_indexer,
&merkle_tree_pubkey,
&payer,
&mint,
vec![second_payer_token_balance],
vec![second_payer_pubkey],
)
.await;
let escrow_amount = 100u64;
let lockup_time = 100u64;
perform_escrow_with_event(
&mut rpc,
&mut test_indexer,
&env,
&second_payer,
&escrow_amount,
&lockup_time,
)
.await
.unwrap();
assert_escrow(
&mut rpc,
&test_indexer,
&second_payer_pubkey,
second_payer_token_balance,
escrow_amount,
&lockup_time,
)
.await;
// try withdrawal before lockup time
let withdrawal_amount = 50u64;
let result = perform_withdrawal_failing(
&mut rpc,
&mut test_indexer,
&env,
&second_payer,
&withdrawal_amount,
None,
)
.await;
assert_rpc_error(result, 0, EscrowError::EscrowLocked.into()).unwrap();
rpc.warp_to_slot(1000).await.unwrap();
// try withdrawal with invalid signer
let result = perform_withdrawal_failing(
&mut rpc,
&mut test_indexer,
&env,
&second_payer,
&withdrawal_amount,
Some(payer_pubkey),
)
.await;
assert_rpc_error(result, 0, VerifierError::ProofVerificationFailed.into()).unwrap();
perform_withdrawal_with_event(
&mut rpc,
&mut test_indexer,
&env,
&second_payer,
&withdrawal_amount,
None,
)
.await
.unwrap();
assert_withdrawal(
&test_indexer,
&second_payer_pubkey,
withdrawal_amount,
escrow_amount,
);
}
pub async fn perform_escrow<R: RpcConnection>(
rpc: &mut R,
test_indexer: &mut TestIndexer<R>,
env: &EnvAccounts,
payer: &Keypair,
escrow_amount: &u64,
lock_up_time: &u64,
) -> Instruction {
let input_compressed_token_account_data = test_indexer
.token_compressed_accounts
.iter()
.find(|x| {
println!("searching token account: {:?}", x.token_data);
println!("escrow amount: {:?}", escrow_amount);
println!("payer pub key: {:?}", payer.pubkey());
x.token_data.owner == payer.pubkey() && x.token_data.amount >= *escrow_amount
})
.expect("no account with enough tokens")
.clone();
let payer_pubkey = payer.pubkey();
let compressed_input_account_with_context = input_compressed_token_account_data
.compressed_account
.clone();
let input_compressed_account_hash = compressed_input_account_with_context
.compressed_account
.hash::<Poseidon>(
&env.merkle_tree_pubkey,
&compressed_input_account_with_context
.merkle_context
.leaf_index,
)
.unwrap();
let rpc_result = test_indexer
.create_proof_for_compressed_accounts(
Some(&[input_compressed_account_hash]),
Some(&[compressed_input_account_with_context
.merkle_context
.merkle_tree_pubkey]),
None,
None,
rpc,
)
.await;
let create_ix_inputs = CreateEscrowInstructionInputs {
input_token_data: &[input_compressed_token_account_data.token_data.clone()],
lock_up_time: *lock_up_time,
signer: &payer_pubkey,
input_merkle_context: &[MerkleContext {
leaf_index: compressed_input_account_with_context
.merkle_context
.leaf_index,
merkle_tree_pubkey: env.merkle_tree_pubkey,
nullifier_queue_pubkey: env.nullifier_queue_pubkey,
queue_index: None,
}],
output_compressed_account_merkle_tree_pubkeys: &[
env.merkle_tree_pubkey,
env.merkle_tree_pubkey,
],
output_compressed_accounts: &Vec::new(),
root_indices: &rpc_result.root_indices,
proof: &Some(rpc_result.proof),
mint: &input_compressed_token_account_data.token_data.mint,
input_compressed_accounts: &[compressed_input_account_with_context.compressed_account],
};
create_escrow_instruction(create_ix_inputs, *escrow_amount)
}
pub async fn perform_escrow_with_event<R: RpcConnection>(
rpc: &mut R,
test_indexer: &mut TestIndexer<R>,
env: &EnvAccounts,
payer: &Keypair,
escrow_amount: &u64,
lock_up_time: &u64,
) -> Result<(), RpcError> {
let instruction =
perform_escrow(rpc, test_indexer, env, payer, escrow_amount, lock_up_time).await;
let rent = rpc
.get_minimum_balance_for_rent_exemption(16)
.await
.unwrap();
let event = rpc
.create_and_send_transaction_with_event::<PublicTransactionEvent>(
&[instruction],
&payer.pubkey(),
&[payer],
Some(TransactionParams {
num_input_compressed_accounts: 1,
num_output_compressed_accounts: 2,
num_new_addresses: 0,
compress: rent as i64,
fee_config: FeeConfig::default(),
}),
)
.await?
.unwrap();
test_indexer.add_compressed_accounts_with_token_data(&event.0);
Ok(())
}
pub async fn perform_escrow_failing<R: RpcConnection>(
rpc: &mut R,
test_indexer: &mut TestIndexer<R>,
env: &EnvAccounts,
payer: &Keypair,
escrow_amount: &u64,
lock_up_time: &u64,
) -> Result<solana_sdk::signature::Signature, RpcError> {
let instruction =
perform_escrow(rpc, test_indexer, env, payer, escrow_amount, lock_up_time).await;
let transaction = Transaction::new_signed_with_payer(
&[instruction],
Some(&payer.pubkey()),
&[&payer],
rpc.get_latest_blockhash().await.unwrap(),
);
rpc.process_transaction(transaction).await
}
pub async fn assert_escrow<R: RpcConnection>(
rpc: &mut R,
test_indexer: &TestIndexer<R>,
payer_pubkey: &Pubkey,
amount: u64,
escrow_amount: u64,
lock_up_time: &u64,
) {
let token_owner_pda = get_token_owner_pda(payer_pubkey).0;
let token_data_escrow = test_indexer
.token_compressed_accounts
.iter()
.find(|x| x.token_data.owner == token_owner_pda)
.unwrap()
.token_data
.clone();
assert_eq!(token_data_escrow.amount, escrow_amount);
assert_eq!(token_data_escrow.owner, token_owner_pda);
let token_data_change_compressed_token_account =
test_indexer.token_compressed_accounts[0].token_data.clone();
assert_eq!(
token_data_change_compressed_token_account.amount,
amount - escrow_amount
);
assert_eq!(
token_data_change_compressed_token_account.owner,
*payer_pubkey
);
let time_lock_pubkey = get_timelock_pda(payer_pubkey);
let timelock_account = rpc
.get_anchor_account::<EscrowTimeLock>(&time_lock_pubkey)
.await
.unwrap()
.unwrap();
let current_slot = rpc.get_slot().await.unwrap();
assert_eq!(timelock_account.slot, *lock_up_time + current_slot);
}
pub async fn perform_withdrawal<R: RpcConnection>(
context: &mut R,
test_indexer: &mut TestIndexer<R>,
env: &EnvAccounts,
payer: &Keypair,
withdrawal_amount: &u64,
invalid_signer: Option<Pubkey>,
) -> Instruction {
let payer_pubkey = payer.pubkey();
let token_owner_pda = get_token_owner_pda(&invalid_signer.unwrap_or(payer_pubkey)).0;
let escrow_token_data_with_context = test_indexer
.token_compressed_accounts
.iter()
.find(|x| {
x.token_data.owner == token_owner_pda && x.token_data.amount >= *withdrawal_amount
})
.expect("no account with enough tokens")
.clone();
let compressed_input_account_with_context =
escrow_token_data_with_context.compressed_account.clone();
let input_compressed_account_hash = compressed_input_account_with_context
.compressed_account
.hash::<Poseidon>(
&env.merkle_tree_pubkey,
&compressed_input_account_with_context
.merkle_context
.leaf_index,
)
.unwrap();
let rpc_result = test_indexer
.create_proof_for_compressed_accounts(
Some(&[input_compressed_account_hash]),
Some(&[compressed_input_account_with_context
.merkle_context
.merkle_tree_pubkey]),
None,
None,
context,
)
.await;
let create_ix_inputs = CreateEscrowInstructionInputs {
input_token_data: &[escrow_token_data_with_context.token_data.clone()],
lock_up_time: 0,
signer: &payer_pubkey,
input_merkle_context: &[MerkleContext {
leaf_index: compressed_input_account_with_context
.merkle_context
.leaf_index,
merkle_tree_pubkey: env.merkle_tree_pubkey,
nullifier_queue_pubkey: env.nullifier_queue_pubkey,
queue_index: None,
}],
output_compressed_account_merkle_tree_pubkeys: &[
env.merkle_tree_pubkey,
env.merkle_tree_pubkey,
],
output_compressed_accounts: &Vec::new(),
root_indices: &rpc_result.root_indices,
proof: &Some(rpc_result.proof),
mint: &escrow_token_data_with_context.token_data.mint,
input_compressed_accounts: &[compressed_input_account_with_context.compressed_account],
};
create_withdrawal_escrow_instruction(create_ix_inputs, *withdrawal_amount)
}
pub async fn perform_withdrawal_with_event<R: RpcConnection>(
rpc: &mut R,
test_indexer: &mut TestIndexer<R>,
env: &EnvAccounts,
payer: &Keypair,
withdrawal_amount: &u64,
invalid_signer: Option<Pubkey>,
) -> Result<(), RpcError> {
let instruction = perform_withdrawal(
rpc,
test_indexer,
env,
payer,
withdrawal_amount,
invalid_signer,
)
.await;
let event = rpc
.create_and_send_transaction_with_event::<PublicTransactionEvent>(
&[instruction],
&payer.pubkey(),
&[payer],
None,
)
.await?
.unwrap();
test_indexer.add_compressed_accounts_with_token_data(&event.0);
Ok(())
}
pub async fn perform_withdrawal_failing<R: RpcConnection>(
rpc: &mut R,
test_indexer: &mut TestIndexer<R>,
env: &EnvAccounts,
payer: &Keypair,
withdrawal_amount: &u64,
invalid_signer: Option<Pubkey>,
) -> Result<solana_sdk::signature::Signature, RpcError> {
let instruction = perform_withdrawal(
rpc,
test_indexer,
env,
payer,
withdrawal_amount,
invalid_signer,
)
.await;
let transaction = Transaction::new_signed_with_payer(
&[instruction],
Some(&payer.pubkey()),
&[&payer],
rpc.get_latest_blockhash().await.unwrap(),
);
rpc.process_transaction(transaction).await
}
pub fn assert_withdrawal<R: RpcConnection>(
test_indexer: &TestIndexer<R>,
payer_pubkey: &Pubkey,
withdrawal_amount: u64,
escrow_amount: u64,
) {
let token_owner_pda = get_token_owner_pda(payer_pubkey).0;
let token_data_withdrawal = test_indexer
.token_compressed_accounts
.iter()
.any(|x| x.token_data.owner == *payer_pubkey && x.token_data.amount == withdrawal_amount);
assert!(
token_data_withdrawal,
"Withdrawal compressed account doesn't exist or has incorrect amount {} expected amount",
withdrawal_amount
);
let token_data_escrow_change = test_indexer.token_compressed_accounts.iter().any(|x| {
x.token_data.owner == token_owner_pda
&& x.token_data.amount == escrow_amount - withdrawal_amount
});
assert!(
token_data_escrow_change,
"Escrow change compressed account doesn't exist or has incorrect amount {} expected amount",
escrow_amount - withdrawal_amount
);
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples/token-escrow/programs/token-escrow
|
solana_public_repos/Lightprotocol/light-protocol/examples/token-escrow/programs/token-escrow/tests/test_compressed_pda.rs
|
#![cfg(feature = "test-sbf")]
// 2. escrow tokens with compressed pda
// create test env
// create mint and mint tokens
// escrow compressed tokens - with compressed pda
// release compressed tokens
// TODO: 3. escrow tokens by decompression with compressed pda
// this design pattern can be used to use compressed accounts with an AMMM
// create test env
// create mint and mint tokens
// decompress compressed tokens into program owned token account - with compressed pda
// release compressed tokens
use anchor_lang::AnchorDeserialize;
use light_hasher::{Hasher, Poseidon};
use light_program_test::test_env::{setup_test_programs_with_accounts, EnvAccounts};
use light_prover_client::gnark::helpers::{ProverConfig, ProverMode};
use light_system_program::sdk::address::derive_address;
use light_system_program::sdk::compressed_account::MerkleContext;
use light_system_program::sdk::event::PublicTransactionEvent;
use light_system_program::NewAddressParams;
use light_test_utils::indexer::TestIndexer;
use light_test_utils::spl::{create_mint_helper, mint_tokens_helper};
use light_test_utils::{FeeConfig, Indexer, RpcConnection, RpcError, TransactionParams};
use solana_sdk::instruction::{Instruction, InstructionError};
use solana_sdk::signature::Keypair;
use solana_sdk::{signer::Signer, transaction::Transaction};
use token_escrow::escrow_with_compressed_pda::sdk::{
create_escrow_instruction, create_withdrawal_instruction, get_token_owner_pda,
CreateCompressedPdaEscrowInstructionInputs, CreateCompressedPdaWithdrawalInstructionInputs,
};
use token_escrow::{EscrowError, EscrowTimeLock};
#[tokio::test]
async fn test_escrow_with_compressed_pda() {
let (mut rpc, env) = setup_test_programs_with_accounts(Some(vec![(
String::from("token_escrow"),
token_escrow::ID,
)]))
.await;
let payer = rpc.get_payer().insecure_clone();
let test_indexer = TestIndexer::init_from_env(
&payer,
&env,
Some(ProverConfig {
run_mode: Some(ProverMode::Rpc),
circuits: vec![],
}),
);
let mint = create_mint_helper(&mut rpc, &payer).await;
let mut test_indexer = test_indexer.await;
let amount = 10000u64;
mint_tokens_helper(
&mut rpc,
&mut test_indexer,
&env.merkle_tree_pubkey,
&payer,
&mint,
vec![amount],
vec![payer.pubkey()],
)
.await;
let seed = [1u8; 32];
let escrow_amount = 100u64;
let lock_up_time = 1000u64;
perform_escrow_with_event(
&mut test_indexer,
&mut rpc,
&env,
&payer,
lock_up_time,
escrow_amount,
seed,
)
.await
.unwrap();
let current_slot = rpc.get_slot().await.unwrap();
let lockup_end = lock_up_time + current_slot;
assert_escrow(
&mut test_indexer,
&env,
&payer,
&escrow_amount,
&amount,
&seed,
&lockup_end,
)
.await;
println!("withdrawal _----------------------------------------------------------------");
let withdrawal_amount = escrow_amount;
let new_lock_up_time = 2000u64;
let result = perform_withdrawal_failing(
&mut rpc,
&mut test_indexer,
&env,
&payer,
lock_up_time,
new_lock_up_time,
withdrawal_amount,
)
.await;
let instruction_error = InstructionError::Custom(EscrowError::EscrowLocked.into());
let transaction_error =
solana_sdk::transaction::TransactionError::InstructionError(0, instruction_error);
let rpc_error = RpcError::TransactionError(transaction_error);
assert!(matches!(result, Err(error) if error.to_string() == rpc_error.to_string()));
rpc.warp_to_slot(lockup_end + 1).await.unwrap();
perform_withdrawal_with_event(
&mut rpc,
&mut test_indexer,
&env,
&payer,
lockup_end,
new_lock_up_time,
withdrawal_amount,
)
.await
.unwrap();
assert_withdrawal(
&mut rpc,
&mut test_indexer,
&env,
&payer,
&withdrawal_amount,
&escrow_amount,
&seed,
new_lock_up_time,
)
.await;
}
pub async fn perform_escrow_failing<R: RpcConnection>(
test_indexer: &mut TestIndexer<R>,
rpc: &mut R,
env: &EnvAccounts,
payer: &Keypair,
lock_up_time: u64,
escrow_amount: u64,
seed: [u8; 32],
) -> Result<solana_sdk::signature::Signature, RpcError> {
let (payer_pubkey, instruction) = create_escrow_ix(
payer,
test_indexer,
env,
seed,
rpc,
lock_up_time,
escrow_amount,
)
.await;
let latest_blockhash = rpc.get_latest_blockhash().await.unwrap();
let transaction = Transaction::new_signed_with_payer(
&[instruction],
Some(&payer_pubkey),
&[&payer],
latest_blockhash,
);
rpc.process_transaction(transaction).await
}
pub async fn perform_escrow_with_event<R: RpcConnection>(
test_indexer: &mut TestIndexer<R>,
rpc: &mut R,
env: &EnvAccounts,
payer: &Keypair,
lock_up_time: u64,
escrow_amount: u64,
seed: [u8; 32],
) -> Result<(), RpcError> {
let (_, instruction) = create_escrow_ix(
payer,
test_indexer,
env,
seed,
rpc,
lock_up_time,
escrow_amount,
)
.await;
let event = rpc
.create_and_send_transaction_with_event::<PublicTransactionEvent>(
&[instruction],
&payer.pubkey(),
&[payer],
Some(TransactionParams {
num_input_compressed_accounts: 1,
num_output_compressed_accounts: 3,
num_new_addresses: 1,
compress: 0,
fee_config: FeeConfig::default(),
}),
)
.await?;
test_indexer.add_compressed_accounts_with_token_data(&event.unwrap().0);
Ok(())
}
async fn create_escrow_ix<R: RpcConnection>(
payer: &Keypair,
test_indexer: &mut TestIndexer<R>,
env: &EnvAccounts,
seed: [u8; 32],
context: &mut R,
lock_up_time: u64,
escrow_amount: u64,
) -> (anchor_lang::prelude::Pubkey, Instruction) {
let payer_pubkey = payer.pubkey();
let input_compressed_token_account_data = test_indexer.token_compressed_accounts[0].clone();
let compressed_input_account_with_context = input_compressed_token_account_data
.compressed_account
.clone();
let input_compressed_account_hash = compressed_input_account_with_context
.compressed_account
.hash::<Poseidon>(
&env.merkle_tree_pubkey,
&compressed_input_account_with_context
.merkle_context
.leaf_index,
)
.unwrap();
let address = derive_address(&env.address_merkle_tree_pubkey, &seed).unwrap();
let rpc_result = test_indexer
.create_proof_for_compressed_accounts(
Some(&[input_compressed_account_hash]),
Some(&[compressed_input_account_with_context
.merkle_context
.merkle_tree_pubkey]),
Some(&[address]),
Some(vec![env.address_merkle_tree_pubkey]),
context,
)
.await;
let new_address_params = NewAddressParams {
seed,
address_merkle_tree_pubkey: env.address_merkle_tree_pubkey,
address_queue_pubkey: env.address_merkle_tree_queue_pubkey,
address_merkle_tree_root_index: rpc_result.address_root_indices[0],
};
let create_ix_inputs = CreateCompressedPdaEscrowInstructionInputs {
input_token_data: &[input_compressed_token_account_data.token_data.clone()],
lock_up_time,
signer: &payer_pubkey,
input_merkle_context: &[MerkleContext {
leaf_index: compressed_input_account_with_context
.merkle_context
.leaf_index,
merkle_tree_pubkey: env.merkle_tree_pubkey,
nullifier_queue_pubkey: env.nullifier_queue_pubkey,
queue_index: None,
}],
output_compressed_account_merkle_tree_pubkeys: &[
env.merkle_tree_pubkey,
env.merkle_tree_pubkey,
],
output_compressed_accounts: &Vec::new(),
root_indices: &rpc_result.root_indices,
proof: &Some(rpc_result.proof),
mint: &input_compressed_token_account_data.token_data.mint,
new_address_params,
cpi_context_account: &env.cpi_context_account_pubkey,
input_compressed_accounts: &[compressed_input_account_with_context.compressed_account],
};
let instruction = create_escrow_instruction(create_ix_inputs.clone(), escrow_amount);
(payer_pubkey, instruction)
}
pub async fn assert_escrow<R: RpcConnection>(
test_indexer: &mut TestIndexer<R>,
env: &EnvAccounts,
payer: &Keypair,
escrow_amount: &u64,
amount: &u64,
seed: &[u8; 32],
lock_up_time: &u64,
) {
let payer_pubkey = payer.pubkey();
let token_owner_pda = get_token_owner_pda(&payer_pubkey).0;
let token_data_escrow = test_indexer
.token_compressed_accounts
.iter()
.find(|x| x.token_data.owner == token_owner_pda)
.unwrap()
.token_data
.clone();
assert_eq!(token_data_escrow.amount, *escrow_amount);
assert_eq!(token_data_escrow.owner, token_owner_pda);
let token_data_change_compressed_token_account_exist =
test_indexer.token_compressed_accounts.iter().any(|x| {
x.token_data.owner == payer.pubkey() && x.token_data.amount == amount - escrow_amount
});
assert!(token_data_change_compressed_token_account_exist);
let compressed_escrow_pda = test_indexer
.compressed_accounts
.iter()
.find(|x| x.compressed_account.owner == token_escrow::ID)
.unwrap()
.clone();
let address = derive_address(&env.address_merkle_tree_pubkey, seed).unwrap();
assert_eq!(
compressed_escrow_pda.compressed_account.address.unwrap(),
address
);
assert_eq!(
compressed_escrow_pda.compressed_account.owner,
token_escrow::ID
);
let compressed_escrow_pda_deserialized = compressed_escrow_pda
.compressed_account
.data
.as_ref()
.unwrap();
let compressed_escrow_pda_data =
EscrowTimeLock::deserialize_reader(&mut &compressed_escrow_pda_deserialized.data[..])
.unwrap();
println!(
"compressed_escrow_pda_data {:?}",
compressed_escrow_pda_data
);
assert_eq!(compressed_escrow_pda_data.slot, *lock_up_time);
assert_eq!(
compressed_escrow_pda_deserialized.discriminator,
1u64.to_le_bytes(),
);
assert_eq!(
compressed_escrow_pda_deserialized.data_hash,
Poseidon::hash(&compressed_escrow_pda_data.slot.to_le_bytes()).unwrap(),
);
}
pub async fn perform_withdrawal_with_event<R: RpcConnection>(
rpc: &mut R,
test_indexer: &mut TestIndexer<R>,
env: &EnvAccounts,
payer: &Keypair,
old_lock_up_time: u64,
new_lock_up_time: u64,
escrow_amount: u64,
) -> Result<(), RpcError> {
let instruction = perform_withdrawal(
rpc,
test_indexer,
env,
payer,
old_lock_up_time,
new_lock_up_time,
escrow_amount,
)
.await;
let event = rpc
.create_and_send_transaction_with_event::<PublicTransactionEvent>(
&[instruction],
&payer.pubkey(),
&[payer],
None,
)
.await?;
test_indexer.add_compressed_accounts_with_token_data(&event.unwrap().0);
Ok(())
}
pub async fn perform_withdrawal_failing<R: RpcConnection>(
rpc: &mut R,
test_indexer: &mut TestIndexer<R>,
env: &EnvAccounts,
payer: &Keypair,
old_lock_up_time: u64,
new_lock_up_time: u64,
escrow_amount: u64,
) -> Result<solana_sdk::signature::Signature, RpcError> {
let instruction = perform_withdrawal(
rpc,
test_indexer,
env,
payer,
old_lock_up_time,
new_lock_up_time,
escrow_amount,
)
.await;
let latest_blockhash = rpc.get_latest_blockhash().await.unwrap();
let transaction = Transaction::new_signed_with_payer(
&[instruction],
Some(&payer.pubkey()),
&[&payer],
latest_blockhash,
);
rpc.process_transaction(transaction).await
}
pub async fn perform_withdrawal<R: RpcConnection>(
rpc: &mut R,
test_indexer: &mut TestIndexer<R>,
env: &EnvAccounts,
payer: &Keypair,
old_lock_up_time: u64,
new_lock_up_time: u64,
escrow_amount: u64,
) -> Instruction {
let payer_pubkey = payer.pubkey();
let compressed_escrow_pda = test_indexer
.compressed_accounts
.iter()
.find(|x| x.compressed_account.owner == token_escrow::ID)
.unwrap()
.clone();
println!("compressed_escrow_pda {:?}", compressed_escrow_pda);
let token_owner_pda = get_token_owner_pda(&payer_pubkey).0;
let token_escrow = test_indexer
.token_compressed_accounts
.iter()
.find(|x| x.token_data.owner == token_owner_pda)
.unwrap()
.clone();
let token_escrow_account = token_escrow.compressed_account.clone();
let token_escrow_account_hash = token_escrow_account
.compressed_account
.hash::<Poseidon>(
&env.merkle_tree_pubkey,
&token_escrow_account.merkle_context.leaf_index,
)
.unwrap();
println!("token_data_escrow {:?}", token_escrow);
println!("token escrow_account {:?}", token_escrow_account);
let compressed_pda_hash = compressed_escrow_pda
.compressed_account
.hash::<Poseidon>(
&env.merkle_tree_pubkey,
&compressed_escrow_pda.merkle_context.leaf_index,
)
.unwrap();
println!("compressed_pda_hash {:?}", compressed_pda_hash);
println!("token_escrow_account_hash {:?}", token_escrow_account_hash);
// compressed pda will go first into the proof because in the program
// the compressed pda program executes the transaction
let rpc_result = test_indexer
.create_proof_for_compressed_accounts(
Some(&[compressed_pda_hash, token_escrow_account_hash]),
Some(&[
compressed_escrow_pda.merkle_context.merkle_tree_pubkey,
token_escrow_account.merkle_context.merkle_tree_pubkey,
]),
None,
None,
rpc,
)
.await;
let create_withdrawal_ix_inputs = CreateCompressedPdaWithdrawalInstructionInputs {
input_token_data: &[token_escrow.token_data.clone()],
signer: &payer_pubkey,
input_token_escrow_merkle_context: MerkleContext {
leaf_index: token_escrow_account.merkle_context.leaf_index,
merkle_tree_pubkey: env.merkle_tree_pubkey,
nullifier_queue_pubkey: env.nullifier_queue_pubkey,
queue_index: None,
},
input_cpda_merkle_context: MerkleContext {
leaf_index: compressed_escrow_pda.merkle_context.leaf_index,
merkle_tree_pubkey: env.merkle_tree_pubkey,
nullifier_queue_pubkey: env.nullifier_queue_pubkey,
queue_index: None,
},
output_compressed_account_merkle_tree_pubkeys: &[
env.merkle_tree_pubkey,
env.merkle_tree_pubkey,
],
output_compressed_accounts: &Vec::new(),
root_indices: &rpc_result.root_indices,
proof: &Some(rpc_result.proof),
mint: &token_escrow.token_data.mint,
cpi_context_account: &env.cpi_context_account_pubkey,
old_lock_up_time,
new_lock_up_time,
address: compressed_escrow_pda.compressed_account.address.unwrap(),
input_compressed_accounts: &[compressed_escrow_pda.compressed_account],
};
create_withdrawal_instruction(create_withdrawal_ix_inputs.clone(), escrow_amount)
}
/// 1. Change escrow compressed account exists
/// 2. Withdrawal token account exists
/// 3. Compressed pda with update lock-up time exists
#[allow(clippy::too_many_arguments)]
pub async fn assert_withdrawal<R: RpcConnection>(
rpc: &mut R,
test_indexer: &mut TestIndexer<R>,
env: &EnvAccounts,
payer: &Keypair,
withdrawal_amount: &u64,
escrow_amount: &u64,
seed: &[u8; 32],
lock_up_time: u64,
) {
let escrow_change_amount = escrow_amount - withdrawal_amount;
let payer_pubkey = payer.pubkey();
let token_owner_pda = get_token_owner_pda(&payer_pubkey).0;
let token_data_escrow = test_indexer.token_compressed_accounts.iter().any(|x| {
x.token_data.owner == token_owner_pda && x.token_data.amount == escrow_change_amount
});
assert!(
token_data_escrow,
"change escrow token account does not exist or has incorrect amount",
);
let withdrawal_account_exits = test_indexer
.token_compressed_accounts
.iter()
.any(|x| x.token_data.owner == payer.pubkey() && x.token_data.amount == *withdrawal_amount);
assert!(withdrawal_account_exits);
let compressed_escrow_pda = test_indexer
.compressed_accounts
.iter()
.find(|x| x.compressed_account.owner == token_escrow::ID)
.unwrap()
.clone();
let address = derive_address(&env.address_merkle_tree_pubkey, seed).unwrap();
assert_eq!(
compressed_escrow_pda.compressed_account.address.unwrap(),
address
);
assert_eq!(
compressed_escrow_pda.compressed_account.owner,
token_escrow::ID
);
let compressed_escrow_pda_deserialized = compressed_escrow_pda
.compressed_account
.data
.as_ref()
.unwrap();
let compressed_escrow_pda_data =
EscrowTimeLock::deserialize_reader(&mut &compressed_escrow_pda_deserialized.data[..])
.unwrap();
let current_slot = rpc.get_slot().await.unwrap();
assert_eq!(compressed_escrow_pda_data.slot, lock_up_time + current_slot);
assert_eq!(
compressed_escrow_pda_deserialized.discriminator,
1u64.to_le_bytes(),
);
assert_eq!(
compressed_escrow_pda_deserialized.data_hash,
Poseidon::hash(&compressed_escrow_pda_data.slot.to_le_bytes()).unwrap(),
);
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples/token-escrow/programs/token-escrow
|
solana_public_repos/Lightprotocol/light-protocol/examples/token-escrow/programs/token-escrow/src/lib.rs
|
#![allow(clippy::too_many_arguments)]
use anchor_lang::prelude::*;
use anchor_lang::solana_program::pubkey::Pubkey;
use light_compressed_token::process_transfer::InputTokenDataWithContext;
use light_compressed_token::process_transfer::PackedTokenTransferOutputData;
use light_system_program::invoke::processor::CompressedProof;
pub mod escrow_with_compressed_pda;
pub mod escrow_with_pda;
pub use escrow_with_compressed_pda::escrow::*;
pub use escrow_with_pda::escrow::*;
use light_system_program::sdk::CompressedCpiContext;
use light_system_program::NewAddressParamsPacked;
#[error_code]
pub enum EscrowError {
#[msg("Escrow is locked")]
EscrowLocked,
#[msg("CpiContextAccountIndexNotFound")]
CpiContextAccountIndexNotFound,
}
declare_id!("GRLu2hKaAiMbxpkAM1HeXzks9YeGuz18SEgXEizVvPqX");
#[program]
pub mod token_escrow {
use self::{
escrow_with_compressed_pda::withdrawal::process_withdraw_compressed_tokens_with_compressed_pda,
escrow_with_pda::withdrawal::process_withdraw_compressed_escrow_tokens_with_pda,
};
use super::*;
/// Escrows compressed tokens, for a certain number of slots.
/// Transfers compressed tokens to compressed token account owned by cpi_signer.
/// Tokens are locked for lock_up_time slots.
pub fn escrow_compressed_tokens_with_pda<'info>(
ctx: Context<'_, '_, '_, 'info, EscrowCompressedTokensWithPda<'info>>,
lock_up_time: u64,
escrow_amount: u64,
proof: CompressedProof,
mint: Pubkey,
signer_is_delegate: bool,
input_token_data_with_context: Vec<InputTokenDataWithContext>,
output_state_merkle_tree_account_indices: Vec<u8>,
) -> Result<()> {
process_escrow_compressed_tokens_with_pda(
ctx,
lock_up_time,
escrow_amount,
proof,
mint,
signer_is_delegate,
input_token_data_with_context,
output_state_merkle_tree_account_indices,
)
}
/// Allows the owner to withdraw compressed tokens from the escrow account,
/// provided the lockup time has expired.
pub fn withdraw_compressed_escrow_tokens_with_pda<'info>(
ctx: Context<'_, '_, '_, 'info, EscrowCompressedTokensWithPda<'info>>,
bump: u8,
withdrawal_amount: u64,
proof: CompressedProof,
mint: Pubkey,
signer_is_delegate: bool,
input_token_data_with_context: Vec<InputTokenDataWithContext>,
output_state_merkle_tree_account_indices: Vec<u8>,
) -> Result<()> {
process_withdraw_compressed_escrow_tokens_with_pda(
ctx,
bump,
withdrawal_amount,
proof,
mint,
signer_is_delegate,
input_token_data_with_context,
output_state_merkle_tree_account_indices,
)
}
/// Escrows compressed tokens, for a certain number of slots.
/// Transfers compressed tokens to compressed token account owned by cpi_signer.
/// Tokens are locked for lock_up_time slots.
pub fn escrow_compressed_tokens_with_compressed_pda<'info>(
ctx: Context<'_, '_, '_, 'info, EscrowCompressedTokensWithCompressedPda<'info>>,
lock_up_time: u64,
escrow_amount: u64,
proof: CompressedProof,
mint: Pubkey,
signer_is_delegate: bool,
input_token_data_with_context: Vec<InputTokenDataWithContext>,
output_state_merkle_tree_account_indices: Vec<u8>,
new_address_params: NewAddressParamsPacked,
cpi_context: CompressedCpiContext,
) -> Result<()> {
process_escrow_compressed_tokens_with_compressed_pda(
ctx,
lock_up_time,
escrow_amount,
proof,
mint,
signer_is_delegate,
input_token_data_with_context,
output_state_merkle_tree_account_indices,
new_address_params,
cpi_context,
)
}
/// Escrows compressed tokens, for a certain number of slots.
/// Transfers compressed tokens to compressed token account owned by cpi_signer.
/// Tokens are locked for lock_up_time slots.
pub fn withdraw_compressed_tokens_with_compressed_pda<'info>(
ctx: Context<'_, '_, '_, 'info, EscrowCompressedTokensWithCompressedPda<'info>>,
withdrawal_amount: u64,
proof: CompressedProof,
mint: Pubkey,
signer_is_delegate: bool,
input_token_data_with_context: Vec<InputTokenDataWithContext>,
output_state_merkle_tree_account_indices: Vec<u8>,
cpi_context: CompressedCpiContext,
input_compressed_pda: PackedInputCompressedPda,
bump: u8,
) -> Result<()> {
process_withdraw_compressed_tokens_with_compressed_pda(
ctx,
withdrawal_amount,
proof,
mint,
signer_is_delegate,
input_token_data_with_context,
output_state_merkle_tree_account_indices,
cpi_context,
input_compressed_pda,
bump,
)
}
}
// TODO: add to light_sdk
/// A helper function that creates a new compressed account with the change output.
/// Input sum - Output sum = Change amount
/// Outputs compressed account with the change amount, and owner of the compressed input accounts.
fn create_change_output_compressed_token_account(
input_token_data_with_context: &[InputTokenDataWithContext],
output_compressed_accounts: &[PackedTokenTransferOutputData],
owner: &Pubkey,
merkle_tree_index: u8,
) -> PackedTokenTransferOutputData {
let input_sum = input_token_data_with_context
.iter()
.map(|account| account.amount)
.sum::<u64>();
let output_sum = output_compressed_accounts
.iter()
.map(|account| account.amount)
.sum::<u64>();
let change_amount = input_sum - output_sum;
PackedTokenTransferOutputData {
amount: change_amount,
owner: *owner,
lamports: None,
merkle_tree_index,
tlv: None,
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples/token-escrow/programs/token-escrow/src
|
solana_public_repos/Lightprotocol/light-protocol/examples/token-escrow/programs/token-escrow/src/escrow_with_compressed_pda/sdk.rs
|
#![cfg(not(target_os = "solana"))]
use crate::escrow_with_compressed_pda::escrow::PackedInputCompressedPda;
use anchor_lang::{InstructionData, ToAccountMetas};
use light_compressed_token::process_transfer::{
get_cpi_authority_pda,
transfer_sdk::{create_inputs_and_remaining_accounts_checked, to_account_metas},
TokenTransferOutputData,
};
use light_system_program::{
invoke::processor::CompressedProof,
sdk::{
address::{add_and_get_remaining_account_indices, pack_new_address_params},
compressed_account::{pack_merkle_context, CompressedAccount, MerkleContext},
CompressedCpiContext,
},
NewAddressParams,
};
use solana_sdk::{instruction::Instruction, pubkey::Pubkey};
#[derive(Debug, Clone)]
pub struct CreateCompressedPdaEscrowInstructionInputs<'a> {
pub lock_up_time: u64,
pub signer: &'a Pubkey,
pub input_merkle_context: &'a [MerkleContext],
pub output_compressed_account_merkle_tree_pubkeys: &'a [Pubkey],
pub output_compressed_accounts: &'a [TokenTransferOutputData],
pub root_indices: &'a [u16],
pub proof: &'a Option<CompressedProof>,
pub input_token_data: &'a [light_compressed_token::token_data::TokenData],
pub input_compressed_accounts: &'a [CompressedAccount],
pub mint: &'a Pubkey,
pub new_address_params: NewAddressParams,
pub cpi_context_account: &'a Pubkey,
}
pub fn create_escrow_instruction(
input_params: CreateCompressedPdaEscrowInstructionInputs,
escrow_amount: u64,
) -> Instruction {
let token_owner_pda = get_token_owner_pda(input_params.signer);
let (mut remaining_accounts, inputs) = create_inputs_and_remaining_accounts_checked(
input_params.input_token_data,
input_params.input_compressed_accounts,
input_params.input_merkle_context,
None,
input_params.output_compressed_accounts,
input_params.root_indices,
input_params.proof,
*input_params.mint,
input_params.signer,
false,
None,
None,
None,
)
.unwrap();
let merkle_tree_indices = add_and_get_remaining_account_indices(
input_params.output_compressed_account_merkle_tree_pubkeys,
&mut remaining_accounts,
);
let new_address_params =
pack_new_address_params(&[input_params.new_address_params], &mut remaining_accounts);
let cpi_context_account_index: u8 = match remaining_accounts
.get(input_params.cpi_context_account)
{
Some(entry) => (*entry).try_into().unwrap(),
None => {
remaining_accounts.insert(*input_params.cpi_context_account, remaining_accounts.len());
(remaining_accounts.len() - 1) as u8
}
};
let instruction_data = crate::instruction::EscrowCompressedTokensWithCompressedPda {
lock_up_time: input_params.lock_up_time,
escrow_amount,
proof: input_params.proof.clone().unwrap(),
mint: *input_params.mint,
signer_is_delegate: false,
input_token_data_with_context: inputs.input_token_data_with_context,
output_state_merkle_tree_account_indices: merkle_tree_indices,
new_address_params: new_address_params[0],
cpi_context: CompressedCpiContext {
set_context: false,
first_set_context: true,
cpi_context_account_index,
},
};
let registered_program_pda = Pubkey::find_program_address(
&[light_system_program::ID.to_bytes().as_slice()],
&account_compression::ID,
)
.0;
let compressed_token_cpi_authority_pda = get_cpi_authority_pda().0;
let account_compression_authority =
light_system_program::utils::get_cpi_authority_pda(&light_system_program::ID);
let cpi_authority_pda = light_sdk::utils::get_cpi_authority_pda(&crate::ID);
let accounts = crate::accounts::EscrowCompressedTokensWithCompressedPda {
signer: *input_params.signer,
noop_program: Pubkey::new_from_array(account_compression::utils::constants::NOOP_PUBKEY),
compressed_token_program: light_compressed_token::ID,
light_system_program: light_system_program::ID,
account_compression_program: account_compression::ID,
registered_program_pda,
compressed_token_cpi_authority_pda,
account_compression_authority,
self_program: crate::ID,
token_owner_pda: token_owner_pda.0,
system_program: solana_sdk::system_program::id(),
cpi_context_account: *input_params.cpi_context_account,
cpi_authority_pda,
};
let remaining_accounts = to_account_metas(remaining_accounts);
Instruction {
program_id: crate::ID,
accounts: [accounts.to_account_metas(Some(true)), remaining_accounts].concat(),
data: instruction_data.data(),
}
}
pub fn get_token_owner_pda(signer: &Pubkey) -> (Pubkey, u8) {
Pubkey::find_program_address(
&[b"escrow".as_ref(), signer.to_bytes().as_ref()],
&crate::id(),
)
}
#[derive(Debug, Clone)]
pub struct CreateCompressedPdaWithdrawalInstructionInputs<'a> {
pub signer: &'a Pubkey,
pub input_token_escrow_merkle_context: MerkleContext,
pub input_cpda_merkle_context: MerkleContext,
pub output_compressed_account_merkle_tree_pubkeys: &'a [Pubkey],
pub output_compressed_accounts: &'a [TokenTransferOutputData],
pub root_indices: &'a [u16],
pub proof: &'a Option<CompressedProof>,
pub input_token_data: &'a [light_compressed_token::token_data::TokenData],
pub input_compressed_accounts: &'a [CompressedAccount],
pub mint: &'a Pubkey,
pub old_lock_up_time: u64,
pub new_lock_up_time: u64,
pub address: [u8; 32],
pub cpi_context_account: &'a Pubkey,
}
pub fn create_withdrawal_instruction(
input_params: CreateCompressedPdaWithdrawalInstructionInputs,
withdrawal_amount: u64,
) -> Instruction {
let (token_owner_pda, bump) = get_token_owner_pda(input_params.signer);
let (mut remaining_accounts, inputs) = create_inputs_and_remaining_accounts_checked(
input_params.input_token_data,
input_params.input_compressed_accounts,
&[input_params.input_token_escrow_merkle_context],
None,
input_params.output_compressed_accounts,
input_params.root_indices,
input_params.proof,
*input_params.mint,
&token_owner_pda,
false,
None,
None,
None,
)
.unwrap();
let merkle_tree_indices = add_and_get_remaining_account_indices(
input_params.output_compressed_account_merkle_tree_pubkeys,
&mut remaining_accounts,
);
let merkle_context_packed = pack_merkle_context(
&[
input_params.input_cpda_merkle_context,
input_params.input_token_escrow_merkle_context,
],
&mut remaining_accounts,
);
let cpi_context_account_index: u8 = match remaining_accounts
.get(input_params.cpi_context_account)
{
Some(entry) => (*entry).try_into().unwrap(),
None => {
remaining_accounts.insert(*input_params.cpi_context_account, remaining_accounts.len());
(remaining_accounts.len() - 1) as u8
}
};
let cpi_context = CompressedCpiContext {
set_context: false,
first_set_context: true,
cpi_context_account_index,
};
let input_compressed_pda = PackedInputCompressedPda {
old_lock_up_time: input_params.old_lock_up_time,
new_lock_up_time: input_params.new_lock_up_time,
address: input_params.address,
merkle_context: merkle_context_packed[0],
root_index: input_params.root_indices[0],
};
let instruction_data = crate::instruction::WithdrawCompressedTokensWithCompressedPda {
proof: input_params.proof.clone().unwrap(),
mint: *input_params.mint,
signer_is_delegate: false,
input_token_data_with_context: inputs.input_token_data_with_context,
output_state_merkle_tree_account_indices: merkle_tree_indices,
cpi_context,
input_compressed_pda,
withdrawal_amount,
bump,
};
let registered_program_pda = Pubkey::find_program_address(
&[light_system_program::ID.to_bytes().as_slice()],
&account_compression::ID,
)
.0;
let compressed_token_cpi_authority_pda = get_cpi_authority_pda().0;
let account_compression_authority =
light_system_program::utils::get_cpi_authority_pda(&light_system_program::ID);
let cpi_authority_pda = light_system_program::utils::get_cpi_authority_pda(&crate::ID);
let accounts = crate::accounts::EscrowCompressedTokensWithCompressedPda {
signer: *input_params.signer,
noop_program: Pubkey::new_from_array(account_compression::utils::constants::NOOP_PUBKEY),
compressed_token_program: light_compressed_token::ID,
light_system_program: light_system_program::ID,
account_compression_program: account_compression::ID,
registered_program_pda,
compressed_token_cpi_authority_pda,
account_compression_authority,
self_program: crate::ID,
token_owner_pda,
system_program: solana_sdk::system_program::id(),
cpi_context_account: *input_params.cpi_context_account,
cpi_authority_pda,
};
let remaining_accounts = to_account_metas(remaining_accounts);
Instruction {
program_id: crate::ID,
accounts: [accounts.to_account_metas(Some(true)), remaining_accounts].concat(),
data: instruction_data.data(),
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples/token-escrow/programs/token-escrow/src
|
solana_public_repos/Lightprotocol/light-protocol/examples/token-escrow/programs/token-escrow/src/escrow_with_compressed_pda/withdrawal.rs
|
use account_compression::utils::constants::CPI_AUTHORITY_PDA_SEED;
use anchor_lang::prelude::*;
use light_compressed_token::process_transfer::{
CompressedTokenInstructionDataTransfer, InputTokenDataWithContext,
PackedTokenTransferOutputData,
};
use light_hasher::{DataHasher, Poseidon};
use light_sdk::verify::verify;
use light_system_program::{
invoke::processor::CompressedProof,
sdk::{
compressed_account::{
CompressedAccount, CompressedAccountData, PackedCompressedAccountWithMerkleContext,
},
CompressedCpiContext,
},
InstructionDataInvokeCpi, OutputCompressedAccountWithPackedContext,
};
use crate::{
create_change_output_compressed_token_account, EscrowCompressedTokensWithCompressedPda,
EscrowError, EscrowTimeLock, PackedInputCompressedPda,
};
pub fn process_withdraw_compressed_tokens_with_compressed_pda<'info>(
ctx: Context<'_, '_, '_, 'info, EscrowCompressedTokensWithCompressedPda<'info>>,
withdrawal_amount: u64,
proof: CompressedProof,
mint: Pubkey,
signer_is_delegate: bool,
input_token_data_with_context: Vec<InputTokenDataWithContext>,
output_state_merkle_tree_account_indices: Vec<u8>,
cpi_context: CompressedCpiContext,
input_compressed_pda: PackedInputCompressedPda,
bump: u8,
) -> Result<()> {
let current_slot = Clock::get()?.slot;
if current_slot < input_compressed_pda.old_lock_up_time {
return err!(EscrowError::EscrowLocked);
}
let (old_state, new_state) = create_compressed_pda_data_based_on_diff(&input_compressed_pda)?;
let withdrawal_token_data = PackedTokenTransferOutputData {
amount: withdrawal_amount,
owner: ctx.accounts.signer.key(),
lamports: None,
merkle_tree_index: output_state_merkle_tree_account_indices[0],
tlv: None,
};
let escrow_change_token_data = create_change_output_compressed_token_account(
&input_token_data_with_context,
&[withdrawal_token_data.clone()],
&ctx.accounts.token_owner_pda.key(),
output_state_merkle_tree_account_indices[1],
);
let output_compressed_accounts = vec![withdrawal_token_data, escrow_change_token_data];
cpi_compressed_token_withdrawal(
&ctx,
mint,
signer_is_delegate,
input_token_data_with_context,
output_compressed_accounts,
proof.clone(),
bump,
cpi_context,
)?;
cpi_compressed_pda_withdrawal(ctx, proof, old_state, new_state, cpi_context)?;
Ok(())
}
fn create_compressed_pda_data_based_on_diff(
input_compressed_pda: &PackedInputCompressedPda,
) -> Result<(
PackedCompressedAccountWithMerkleContext,
OutputCompressedAccountWithPackedContext,
)> {
let current_slot = Clock::get()?.slot;
let old_timelock_compressed_pda = EscrowTimeLock {
slot: input_compressed_pda.old_lock_up_time,
};
let old_compressed_account_data = CompressedAccountData {
discriminator: 1u64.to_le_bytes(),
data: old_timelock_compressed_pda.try_to_vec().unwrap(),
data_hash: old_timelock_compressed_pda
.hash::<Poseidon>()
.map_err(ProgramError::from)?,
};
let old_compressed_account = OutputCompressedAccountWithPackedContext {
compressed_account: CompressedAccount {
owner: crate::ID,
lamports: 0,
address: Some(input_compressed_pda.address),
data: Some(old_compressed_account_data),
},
merkle_tree_index: input_compressed_pda.merkle_context.merkle_tree_pubkey_index,
};
let old_compressed_account_with_context = PackedCompressedAccountWithMerkleContext {
compressed_account: old_compressed_account.compressed_account,
merkle_context: input_compressed_pda.merkle_context,
root_index: input_compressed_pda.root_index,
read_only: false,
};
let new_timelock_compressed_pda = EscrowTimeLock {
slot: current_slot
.checked_add(input_compressed_pda.new_lock_up_time)
.unwrap(),
};
let new_compressed_account_data = CompressedAccountData {
discriminator: 1u64.to_le_bytes(),
data: new_timelock_compressed_pda.try_to_vec().unwrap(),
data_hash: new_timelock_compressed_pda
.hash::<Poseidon>()
.map_err(ProgramError::from)?,
};
let new_state = OutputCompressedAccountWithPackedContext {
compressed_account: CompressedAccount {
owner: crate::ID,
lamports: 0,
address: Some(input_compressed_pda.address),
data: Some(new_compressed_account_data),
},
merkle_tree_index: input_compressed_pda.merkle_context.merkle_tree_pubkey_index,
};
Ok((old_compressed_account_with_context, new_state))
}
fn cpi_compressed_pda_withdrawal<'info>(
ctx: Context<'_, '_, '_, 'info, EscrowCompressedTokensWithCompressedPda<'info>>,
proof: CompressedProof,
old_state: PackedCompressedAccountWithMerkleContext,
compressed_pda: OutputCompressedAccountWithPackedContext,
mut cpi_context: CompressedCpiContext,
) -> Result<()> {
// Create CPI signer seed
let bump = Pubkey::find_program_address(&[b"cpi_authority"], &crate::ID).1;
let bump = [bump];
let signer_seeds = [CPI_AUTHORITY_PDA_SEED, &bump];
cpi_context.first_set_context = false;
// Create CPI inputs
let inputs_struct = InstructionDataInvokeCpi {
relay_fee: None,
input_compressed_accounts_with_merkle_context: vec![old_state],
output_compressed_accounts: vec![compressed_pda],
proof: Some(proof),
new_address_params: Vec::new(),
compress_or_decompress_lamports: None,
is_compress: false,
cpi_context: Some(cpi_context),
};
verify(&ctx, &inputs_struct, &[&signer_seeds])?;
Ok(())
}
// TODO: test with delegate (is disabled right now)
#[inline(never)]
pub fn cpi_compressed_token_withdrawal<'info>(
ctx: &Context<'_, '_, '_, 'info, EscrowCompressedTokensWithCompressedPda<'info>>,
mint: Pubkey,
_signer_is_delegate: bool,
input_token_data_with_context: Vec<InputTokenDataWithContext>,
output_compressed_accounts: Vec<PackedTokenTransferOutputData>,
proof: CompressedProof,
bump: u8,
mut cpi_context: CompressedCpiContext,
) -> Result<()> {
let bump = &[bump];
let signer_bytes = ctx.accounts.signer.key.to_bytes();
let seeds: [&[u8]; 3] = [b"escrow".as_slice(), signer_bytes.as_slice(), bump];
cpi_context.set_context = true;
let inputs_struct = CompressedTokenInstructionDataTransfer {
proof: Some(proof),
mint,
delegated_transfer: None,
input_token_data_with_context,
output_compressed_accounts,
is_compress: false,
compress_or_decompress_amount: None,
cpi_context: Some(cpi_context),
lamports_change_account_merkle_tree_index: None,
};
let mut inputs = Vec::new();
CompressedTokenInstructionDataTransfer::serialize(&inputs_struct, &mut inputs).unwrap();
let cpi_accounts = light_compressed_token::cpi::accounts::TransferInstruction {
fee_payer: ctx.accounts.signer.to_account_info(),
authority: ctx.accounts.token_owner_pda.to_account_info(),
registered_program_pda: ctx.accounts.registered_program_pda.to_account_info(),
noop_program: ctx.accounts.noop_program.to_account_info(),
account_compression_authority: ctx.accounts.account_compression_authority.to_account_info(),
account_compression_program: ctx.accounts.account_compression_program.to_account_info(),
self_program: ctx.accounts.compressed_token_program.to_account_info(),
cpi_authority_pda: ctx
.accounts
.compressed_token_cpi_authority_pda
.to_account_info(),
light_system_program: ctx.accounts.light_system_program.to_account_info(),
token_pool_pda: None,
compress_or_decompress_token_account: None,
token_program: None,
system_program: ctx.accounts.system_program.to_account_info(),
};
let signer_seeds: [&[&[u8]]; 1] = [&seeds[..]];
let mut cpi_ctx = CpiContext::new_with_signer(
ctx.accounts.compressed_token_program.to_account_info(),
cpi_accounts,
&signer_seeds,
);
cpi_ctx.remaining_accounts = ctx.remaining_accounts.to_vec();
light_compressed_token::cpi::transfer(cpi_ctx, inputs)?;
Ok(())
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples/token-escrow/programs/token-escrow/src
|
solana_public_repos/Lightprotocol/light-protocol/examples/token-escrow/programs/token-escrow/src/escrow_with_compressed_pda/mod.rs
|
pub mod escrow;
pub mod sdk;
pub mod withdrawal;
pub use escrow::*;
| 0
|
solana_public_repos/Lightprotocol/light-protocol/examples/token-escrow/programs/token-escrow/src
|
solana_public_repos/Lightprotocol/light-protocol/examples/token-escrow/programs/token-escrow/src/escrow_with_compressed_pda/escrow.rs
|
use crate::{create_change_output_compressed_token_account, program::TokenEscrow, EscrowTimeLock};
use account_compression::utils::constants::CPI_AUTHORITY_PDA_SEED;
use anchor_lang::prelude::*;
use light_compressed_token::{
process_transfer::{
CompressedTokenInstructionDataTransfer, InputTokenDataWithContext,
PackedTokenTransferOutputData,
},
program::LightCompressedToken,
};
use light_hasher::{errors::HasherError, DataHasher, Hasher, Poseidon};
use light_sdk::{
legacy::create_cpi_inputs_for_new_account, light_system_accounts, verify::verify, LightTraits,
};
use light_system_program::{
invoke::processor::CompressedProof,
sdk::{
address::derive_address,
compressed_account::{CompressedAccount, CompressedAccountData, PackedMerkleContext},
CompressedCpiContext,
},
NewAddressParamsPacked, OutputCompressedAccountWithPackedContext,
};
#[light_system_accounts]
#[derive(Accounts, LightTraits)]
pub struct EscrowCompressedTokensWithCompressedPda<'info> {
#[account(mut)]
#[fee_payer]
pub signer: Signer<'info>,
/// CHECK:
#[account(seeds = [b"escrow".as_slice(), signer.key.to_bytes().as_slice()], bump)]
pub token_owner_pda: AccountInfo<'info>,
pub compressed_token_program: Program<'info, LightCompressedToken>,
pub compressed_token_cpi_authority_pda: AccountInfo<'info>,
#[self_program]
pub self_program: Program<'info, TokenEscrow>,
/// CHECK:
#[cpi_context]
#[account(mut)]
pub cpi_context_account: AccountInfo<'info>,
#[authority]
#[account(seeds = [CPI_AUTHORITY_PDA_SEED], bump)]
pub cpi_authority_pda: AccountInfo<'info>,
}
#[derive(Debug, Clone, AnchorSerialize, AnchorDeserialize)]
pub struct PackedInputCompressedPda {
pub old_lock_up_time: u64,
pub new_lock_up_time: u64,
pub address: [u8; 32],
pub merkle_context: PackedMerkleContext,
pub root_index: u16,
}
/// create compressed pda data
/// transfer tokens
/// execute complete transaction
pub fn process_escrow_compressed_tokens_with_compressed_pda<'info>(
ctx: Context<'_, '_, '_, 'info, EscrowCompressedTokensWithCompressedPda<'info>>,
lock_up_time: u64,
escrow_amount: u64,
proof: CompressedProof,
mint: Pubkey,
signer_is_delegate: bool,
input_token_data_with_context: Vec<InputTokenDataWithContext>,
output_state_merkle_tree_account_indices: Vec<u8>,
new_address_params: NewAddressParamsPacked,
cpi_context: CompressedCpiContext,
) -> Result<()> {
let compressed_pda = create_compressed_pda_data(lock_up_time, &ctx, &new_address_params)?;
let escrow_token_data = PackedTokenTransferOutputData {
amount: escrow_amount,
owner: ctx.accounts.token_owner_pda.key(),
lamports: None,
merkle_tree_index: output_state_merkle_tree_account_indices[0],
tlv: None,
};
let change_token_data = create_change_output_compressed_token_account(
&input_token_data_with_context,
&[escrow_token_data.clone()],
&ctx.accounts.signer.key(),
output_state_merkle_tree_account_indices[1],
);
let output_compressed_accounts = vec![escrow_token_data, change_token_data];
cpi_compressed_token_transfer_pda(
&ctx,
mint,
signer_is_delegate,
input_token_data_with_context,
output_compressed_accounts,
proof.clone(),
cpi_context,
)?;
cpi_compressed_pda_transfer(ctx, proof, new_address_params, compressed_pda, cpi_context)?;
Ok(())
}
fn cpi_compressed_pda_transfer<'info>(
ctx: Context<'_, '_, '_, 'info, EscrowCompressedTokensWithCompressedPda<'info>>,
proof: CompressedProof,
new_address_params: NewAddressParamsPacked,
compressed_pda: OutputCompressedAccountWithPackedContext,
mut cpi_context: CompressedCpiContext,
) -> Result<()> {
let bump = Pubkey::find_program_address(&[b"cpi_authority"], &crate::ID).1;
let bump = [bump];
let signer_seeds = [CPI_AUTHORITY_PDA_SEED, &bump];
cpi_context.first_set_context = false;
// Create inputs struct
let inputs_struct = create_cpi_inputs_for_new_account(
proof,
new_address_params,
compressed_pda,
Some(cpi_context),
);
verify(&ctx, &inputs_struct, &[&signer_seeds])?;
Ok(())
}
fn create_compressed_pda_data(
lock_up_time: u64,
ctx: &Context<'_, '_, '_, '_, EscrowCompressedTokensWithCompressedPda<'_>>,
new_address_params: &NewAddressParamsPacked,
) -> Result<OutputCompressedAccountWithPackedContext> {
let current_slot = Clock::get()?.slot;
let timelock_compressed_pda = EscrowTimeLock {
slot: current_slot.checked_add(lock_up_time).unwrap(),
};
let compressed_account_data = CompressedAccountData {
discriminator: 1u64.to_le_bytes(),
data: timelock_compressed_pda.try_to_vec().unwrap(),
data_hash: timelock_compressed_pda
.hash::<Poseidon>()
.map_err(ProgramError::from)?,
};
let derive_address = derive_address(
&ctx.remaining_accounts[new_address_params.address_merkle_tree_account_index as usize]
.key(),
&new_address_params.seed,
)
.map_err(|_| ProgramError::InvalidArgument)?;
Ok(OutputCompressedAccountWithPackedContext {
compressed_account: CompressedAccount {
owner: crate::ID,
lamports: 0,
address: Some(derive_address),
data: Some(compressed_account_data),
},
merkle_tree_index: 0,
})
}
impl light_hasher::DataHasher for EscrowTimeLock {
fn hash<H: Hasher>(&self) -> std::result::Result<[u8; 32], HasherError> {
H::hash(&self.slot.to_le_bytes())
}
}
#[inline(never)]
pub fn cpi_compressed_token_transfer_pda<'info>(
ctx: &Context<'_, '_, '_, 'info, EscrowCompressedTokensWithCompressedPda<'info>>,
mint: Pubkey,
_signer_is_delegate: bool,
input_token_data_with_context: Vec<InputTokenDataWithContext>,
output_compressed_accounts: Vec<PackedTokenTransferOutputData>,
proof: CompressedProof,
mut cpi_context: CompressedCpiContext,
) -> Result<()> {
cpi_context.set_context = true;
let inputs_struct = CompressedTokenInstructionDataTransfer {
proof: Some(proof),
mint,
delegated_transfer: None,
input_token_data_with_context,
output_compressed_accounts,
is_compress: false,
compress_or_decompress_amount: None,
cpi_context: Some(cpi_context),
lamports_change_account_merkle_tree_index: None,
};
let mut inputs = Vec::new();
CompressedTokenInstructionDataTransfer::serialize(&inputs_struct, &mut inputs).unwrap();
let cpi_accounts = light_compressed_token::cpi::accounts::TransferInstruction {
fee_payer: ctx.accounts.signer.to_account_info(),
authority: ctx.accounts.signer.to_account_info(),
registered_program_pda: ctx.accounts.registered_program_pda.to_account_info(),
noop_program: ctx.accounts.noop_program.to_account_info(),
account_compression_authority: ctx.accounts.account_compression_authority.to_account_info(),
account_compression_program: ctx.accounts.account_compression_program.to_account_info(),
self_program: ctx.accounts.compressed_token_program.to_account_info(),
cpi_authority_pda: ctx
.accounts
.compressed_token_cpi_authority_pda
.to_account_info(),
light_system_program: ctx.accounts.light_system_program.to_account_info(),
token_pool_pda: None,
compress_or_decompress_token_account: None,
token_program: None,
system_program: ctx.accounts.system_program.to_account_info(),
};
let mut cpi_ctx = CpiContext::new(
ctx.accounts.compressed_token_program.to_account_info(),
cpi_accounts,
);
cpi_ctx.remaining_accounts = ctx.remaining_accounts.to_vec();
light_compressed_token::cpi::transfer(cpi_ctx, inputs)?;
Ok(())
}
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.