repo_id
stringclasses 563
values | file_path
stringlengths 40
166
| content
stringlengths 1
2.94M
| __index_level_0__
int64 0
0
|
|---|---|---|---|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_compressed_token_accounts_by_delegate_post_request_params.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetCompressedTokenAccountsByDelegatePostRequestParams {
/// A base 58 encoded string.
#[serde(
rename = "cursor",
default,
with = "::serde_with::rust::double_option",
skip_serializing_if = "Option::is_none"
)]
pub cursor: Option<Option<String>>,
/// A Solana public key represented as a base58 string.
#[serde(rename = "delegate")]
pub delegate: String,
#[serde(
rename = "limit",
default,
with = "::serde_with::rust::double_option",
skip_serializing_if = "Option::is_none"
)]
pub limit: Option<Option<i32>>,
/// A Solana public key represented as a base58 string.
#[serde(
rename = "mint",
default,
with = "::serde_with::rust::double_option",
skip_serializing_if = "Option::is_none"
)]
pub mint: Option<Option<String>>,
}
impl GetCompressedTokenAccountsByDelegatePostRequestParams {
pub fn new(delegate: String) -> GetCompressedTokenAccountsByDelegatePostRequestParams {
GetCompressedTokenAccountsByDelegatePostRequestParams {
cursor: None,
delegate,
limit: None,
mint: None,
}
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_compressed_account_balance_post_200_response.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetCompressedAccountBalancePost200Response {
#[serde(rename = "error", skip_serializing_if = "Option::is_none")]
pub error: Option<Box<models::GetCompressedAccountPost200ResponseError>>,
/// An ID to identify the response.
#[serde(rename = "id")]
pub id: Id,
/// The version of the JSON-RPC protocol.
#[serde(rename = "jsonrpc")]
pub jsonrpc: Jsonrpc,
#[serde(rename = "result", skip_serializing_if = "Option::is_none")]
pub result: Option<Box<models::GetCompressedAccountBalancePost200ResponseResult>>,
}
impl GetCompressedAccountBalancePost200Response {
pub fn new(id: Id, jsonrpc: Jsonrpc) -> GetCompressedAccountBalancePost200Response {
GetCompressedAccountBalancePost200Response {
error: None,
id,
jsonrpc,
result: None,
}
}
}
/// An ID to identify the response.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Id {
#[serde(rename = "test-account")]
TestAccount,
}
impl Default for Id {
fn default() -> Id {
Self::TestAccount
}
}
/// The version of the JSON-RPC protocol.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Jsonrpc {
#[serde(rename = "2.0")]
Variant2Period0,
}
impl Default for Jsonrpc {
fn default() -> Jsonrpc {
Self::Variant2Period0
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/data_slice.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataSlice {
#[serde(rename = "length")]
pub length: u32,
#[serde(rename = "offset")]
pub offset: u32,
}
impl DataSlice {
pub fn new(length: u32, offset: u32) -> DataSlice {
DataSlice { length, offset }
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/token_balance.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct TokenBalance {
#[serde(rename = "balance")]
pub balance: i32,
/// A Solana public key represented as a base58 string.
#[serde(rename = "mint")]
pub mint: String,
}
impl TokenBalance {
pub fn new(balance: i32, mint: String) -> TokenBalance {
TokenBalance { balance, mint }
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/account_state.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
///
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum AccountState {
#[serde(rename = "initialized")]
Initialized,
#[serde(rename = "frozen")]
Frozen,
}
impl ToString for AccountState {
fn to_string(&self) -> String {
match self {
Self::Initialized => String::from("initialized"),
Self::Frozen => String::from("frozen"),
}
}
}
impl Default for AccountState {
fn default() -> AccountState {
Self::Initialized
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_indexer_health_post_200_response.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetIndexerHealthPost200Response {
#[serde(rename = "error", skip_serializing_if = "Option::is_none")]
pub error: Option<Box<models::GetCompressedAccountPost200ResponseError>>,
/// An ID to identify the response.
#[serde(rename = "id")]
pub id: Id,
/// The version of the JSON-RPC protocol.
#[serde(rename = "jsonrpc")]
pub jsonrpc: Jsonrpc,
/// ok if healthy
#[serde(rename = "result", skip_serializing_if = "Option::is_none")]
pub result: Option<Result>,
}
impl GetIndexerHealthPost200Response {
pub fn new(id: Id, jsonrpc: Jsonrpc) -> GetIndexerHealthPost200Response {
GetIndexerHealthPost200Response {
error: None,
id,
jsonrpc,
result: None,
}
}
}
/// An ID to identify the response.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Id {
#[serde(rename = "test-account")]
TestAccount,
}
impl Default for Id {
fn default() -> Id {
Self::TestAccount
}
}
/// The version of the JSON-RPC protocol.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Jsonrpc {
#[serde(rename = "2.0")]
Variant2Period0,
}
impl Default for Jsonrpc {
fn default() -> Jsonrpc {
Self::Variant2Period0
}
}
/// ok if healthy
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Result {
#[serde(rename = "ok")]
Ok,
}
impl Default for Result {
fn default() -> Result {
Self::Ok
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_compressed_accounts_by_owner_post_200_response_result.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetCompressedAccountsByOwnerPost200ResponseResult {
#[serde(rename = "context")]
pub context: Box<models::Context>,
#[serde(rename = "value")]
pub value: Box<models::PaginatedAccountList>,
}
impl GetCompressedAccountsByOwnerPost200ResponseResult {
pub fn new(
context: models::Context,
value: models::PaginatedAccountList,
) -> GetCompressedAccountsByOwnerPost200ResponseResult {
GetCompressedAccountsByOwnerPost200ResponseResult {
context: Box::new(context),
value: Box::new(value),
}
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_transaction_with_compression_info_post_200_response_result_compression_info.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetTransactionWithCompressionInfoPost200ResponseResultCompressionInfo {
#[serde(rename = "closedAccounts")]
pub closed_accounts: Vec<models::AccountWithOptionalTokenData>,
#[serde(rename = "openedAccounts")]
pub opened_accounts: Vec<models::AccountWithOptionalTokenData>,
}
impl GetTransactionWithCompressionInfoPost200ResponseResultCompressionInfo {
pub fn new(
closed_accounts: Vec<models::AccountWithOptionalTokenData>,
opened_accounts: Vec<models::AccountWithOptionalTokenData>,
) -> GetTransactionWithCompressionInfoPost200ResponseResultCompressionInfo {
GetTransactionWithCompressionInfoPost200ResponseResultCompressionInfo {
closed_accounts,
opened_accounts,
}
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/merkle_proof_with_context.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct MerkleProofWithContext {
/// A 32-byte hash represented as a base58 string.
#[serde(rename = "hash")]
pub hash: String,
#[serde(rename = "leafIndex")]
pub leaf_index: u64,
/// A Solana public key represented as a base58 string.
#[serde(rename = "merkleTree")]
pub merkle_tree: String,
#[serde(rename = "proof")]
pub proof: Vec<String>,
/// A 32-byte hash represented as a base58 string.
#[serde(rename = "root")]
pub root: String,
#[serde(rename = "rootSeq")]
pub root_seq: u64,
}
impl MerkleProofWithContext {
pub fn new(
hash: String,
leaf_index: u64,
merkle_tree: String,
proof: Vec<String>,
root: String,
root_seq: u64,
) -> MerkleProofWithContext {
MerkleProofWithContext {
hash,
leaf_index,
merkle_tree,
proof,
root,
root_seq,
}
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_compression_signatures_for_address_post_request.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetCompressionSignaturesForAddressPostRequest {
/// An ID to identify the request.
#[serde(rename = "id")]
pub id: Id,
/// The version of the JSON-RPC protocol.
#[serde(rename = "jsonrpc")]
pub jsonrpc: Jsonrpc,
/// The name of the method to invoke.
#[serde(rename = "method")]
pub method: Method,
#[serde(rename = "params")]
pub params: Box<models::GetCompressionSignaturesForAddressPostRequestParams>,
}
impl GetCompressionSignaturesForAddressPostRequest {
pub fn new(
id: Id,
jsonrpc: Jsonrpc,
method: Method,
params: models::GetCompressionSignaturesForAddressPostRequestParams,
) -> GetCompressionSignaturesForAddressPostRequest {
GetCompressionSignaturesForAddressPostRequest {
id,
jsonrpc,
method,
params: Box::new(params),
}
}
}
/// An ID to identify the request.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Id {
#[serde(rename = "test-account")]
TestAccount,
}
impl Default for Id {
fn default() -> Id {
Self::TestAccount
}
}
/// The version of the JSON-RPC protocol.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Jsonrpc {
#[serde(rename = "2.0")]
Variant2Period0,
}
impl Default for Jsonrpc {
fn default() -> Jsonrpc {
Self::Variant2Period0
}
}
/// The name of the method to invoke.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Method {
#[serde(rename = "getCompressionSignaturesForAddress")]
GetCompressionSignaturesForAddress,
}
impl Default for Method {
fn default() -> Method {
Self::GetCompressionSignaturesForAddress
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_compression_signatures_for_token_owner_post_request.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetCompressionSignaturesForTokenOwnerPostRequest {
/// An ID to identify the request.
#[serde(rename = "id")]
pub id: Id,
/// The version of the JSON-RPC protocol.
#[serde(rename = "jsonrpc")]
pub jsonrpc: Jsonrpc,
/// The name of the method to invoke.
#[serde(rename = "method")]
pub method: Method,
#[serde(rename = "params")]
pub params: Box<models::GetCompressionSignaturesForOwnerPostRequestParams>,
}
impl GetCompressionSignaturesForTokenOwnerPostRequest {
pub fn new(
id: Id,
jsonrpc: Jsonrpc,
method: Method,
params: models::GetCompressionSignaturesForOwnerPostRequestParams,
) -> GetCompressionSignaturesForTokenOwnerPostRequest {
GetCompressionSignaturesForTokenOwnerPostRequest {
id,
jsonrpc,
method,
params: Box::new(params),
}
}
}
/// An ID to identify the request.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Id {
#[serde(rename = "test-account")]
TestAccount,
}
impl Default for Id {
fn default() -> Id {
Self::TestAccount
}
}
/// The version of the JSON-RPC protocol.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Jsonrpc {
#[serde(rename = "2.0")]
Variant2Period0,
}
impl Default for Jsonrpc {
fn default() -> Jsonrpc {
Self::Variant2Period0
}
}
/// The name of the method to invoke.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Method {
#[serde(rename = "getCompressionSignaturesForTokenOwner")]
GetCompressionSignaturesForTokenOwner,
}
impl Default for Method {
fn default() -> Method {
Self::GetCompressionSignaturesForTokenOwner
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_compressed_account_balance_post_request.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetCompressedAccountBalancePostRequest {
/// An ID to identify the request.
#[serde(rename = "id")]
pub id: Id,
/// The version of the JSON-RPC protocol.
#[serde(rename = "jsonrpc")]
pub jsonrpc: Jsonrpc,
/// The name of the method to invoke.
#[serde(rename = "method")]
pub method: Method,
#[serde(rename = "params")]
pub params: Box<models::GetCompressedAccountPostRequestParams>,
}
impl GetCompressedAccountBalancePostRequest {
pub fn new(
id: Id,
jsonrpc: Jsonrpc,
method: Method,
params: models::GetCompressedAccountPostRequestParams,
) -> GetCompressedAccountBalancePostRequest {
GetCompressedAccountBalancePostRequest {
id,
jsonrpc,
method,
params: Box::new(params),
}
}
}
/// An ID to identify the request.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Id {
#[serde(rename = "test-account")]
TestAccount,
}
impl Default for Id {
fn default() -> Id {
Self::TestAccount
}
}
/// The version of the JSON-RPC protocol.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Jsonrpc {
#[serde(rename = "2.0")]
Variant2Period0,
}
impl Default for Jsonrpc {
fn default() -> Jsonrpc {
Self::Variant2Period0
}
}
/// The name of the method to invoke.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Method {
#[serde(rename = "getCompressedAccountBalance")]
GetCompressedAccountBalance,
}
impl Default for Method {
fn default() -> Method {
Self::GetCompressedAccountBalance
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/signature_info.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct SignatureInfo {
/// An Unix timestamp (seconds)
#[serde(rename = "blockTime")]
pub block_time: i32,
/// A Solana transaction signature.
#[serde(rename = "signature")]
pub signature: String,
#[serde(rename = "slot")]
pub slot: i32,
}
impl SignatureInfo {
pub fn new(block_time: i32, signature: String, slot: i32) -> SignatureInfo {
SignatureInfo {
block_time,
signature,
slot,
}
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_validity_proof_post_200_response_result.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetValidityProofPost200ResponseResult {
#[serde(rename = "context")]
pub context: Box<models::Context>,
#[serde(rename = "value")]
pub value: Box<models::CompressedProofWithContext>,
}
impl GetValidityProofPost200ResponseResult {
pub fn new(
context: models::Context,
value: models::CompressedProofWithContext,
) -> GetValidityProofPost200ResponseResult {
GetValidityProofPost200ResponseResult {
context: Box::new(context),
value: Box::new(value),
}
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_latest_non_voting_signatures_post_200_response_result.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetLatestNonVotingSignaturesPost200ResponseResult {
#[serde(rename = "context")]
pub context: Box<models::Context>,
#[serde(rename = "value")]
pub value: Box<models::SignatureInfoListWithError>,
}
impl GetLatestNonVotingSignaturesPost200ResponseResult {
pub fn new(
context: models::Context,
value: models::SignatureInfoListWithError,
) -> GetLatestNonVotingSignaturesPost200ResponseResult {
GetLatestNonVotingSignaturesPost200ResponseResult {
context: Box::new(context),
value: Box::new(value),
}
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_compressed_account_proof_post_request_params.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetCompressedAccountProofPostRequestParams {
/// A 32-byte hash represented as a base58 string.
#[serde(rename = "hash")]
pub hash: String,
}
impl GetCompressedAccountProofPostRequestParams {
pub fn new(hash: String) -> GetCompressedAccountProofPostRequestParams {
GetCompressedAccountProofPostRequestParams { hash }
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_compression_signatures_for_account_post_request.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetCompressionSignaturesForAccountPostRequest {
/// An ID to identify the request.
#[serde(rename = "id")]
pub id: Id,
/// The version of the JSON-RPC protocol.
#[serde(rename = "jsonrpc")]
pub jsonrpc: Jsonrpc,
/// The name of the method to invoke.
#[serde(rename = "method")]
pub method: Method,
#[serde(rename = "params")]
pub params: Box<models::GetCompressedAccountProofPostRequestParams>,
}
impl GetCompressionSignaturesForAccountPostRequest {
pub fn new(
id: Id,
jsonrpc: Jsonrpc,
method: Method,
params: models::GetCompressedAccountProofPostRequestParams,
) -> GetCompressionSignaturesForAccountPostRequest {
GetCompressionSignaturesForAccountPostRequest {
id,
jsonrpc,
method,
params: Box::new(params),
}
}
}
/// An ID to identify the request.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Id {
#[serde(rename = "test-account")]
TestAccount,
}
impl Default for Id {
fn default() -> Id {
Self::TestAccount
}
}
/// The version of the JSON-RPC protocol.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Jsonrpc {
#[serde(rename = "2.0")]
Variant2Period0,
}
impl Default for Jsonrpc {
fn default() -> Jsonrpc {
Self::Variant2Period0
}
}
/// The name of the method to invoke.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Method {
#[serde(rename = "getCompressionSignaturesForAccount")]
GetCompressionSignaturesForAccount,
}
impl Default for Method {
fn default() -> Method {
Self::GetCompressionSignaturesForAccount
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_compressed_token_accounts_by_delegate_post_200_response_result.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetCompressedTokenAccountsByDelegatePost200ResponseResult {
#[serde(rename = "context")]
pub context: Box<models::Context>,
#[serde(rename = "value")]
pub value: Box<models::TokenAccountList>,
}
impl GetCompressedTokenAccountsByDelegatePost200ResponseResult {
pub fn new(
context: models::Context,
value: models::TokenAccountList,
) -> GetCompressedTokenAccountsByDelegatePost200ResponseResult {
GetCompressedTokenAccountsByDelegatePost200ResponseResult {
context: Box::new(context),
value: Box::new(value),
}
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_compressed_token_accounts_by_delegate_post_request.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetCompressedTokenAccountsByDelegatePostRequest {
/// An ID to identify the request.
#[serde(rename = "id")]
pub id: Id,
/// The version of the JSON-RPC protocol.
#[serde(rename = "jsonrpc")]
pub jsonrpc: Jsonrpc,
/// The name of the method to invoke.
#[serde(rename = "method")]
pub method: Method,
#[serde(rename = "params")]
pub params: Box<models::GetCompressedTokenAccountsByDelegatePostRequestParams>,
}
impl GetCompressedTokenAccountsByDelegatePostRequest {
pub fn new(
id: Id,
jsonrpc: Jsonrpc,
method: Method,
params: models::GetCompressedTokenAccountsByDelegatePostRequestParams,
) -> GetCompressedTokenAccountsByDelegatePostRequest {
GetCompressedTokenAccountsByDelegatePostRequest {
id,
jsonrpc,
method,
params: Box::new(params),
}
}
}
/// An ID to identify the request.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Id {
#[serde(rename = "test-account")]
TestAccount,
}
impl Default for Id {
fn default() -> Id {
Self::TestAccount
}
}
/// The version of the JSON-RPC protocol.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Jsonrpc {
#[serde(rename = "2.0")]
Variant2Period0,
}
impl Default for Jsonrpc {
fn default() -> Jsonrpc {
Self::Variant2Period0
}
}
/// The name of the method to invoke.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Method {
#[serde(rename = "getCompressedTokenAccountsByDelegate")]
GetCompressedTokenAccountsByDelegate,
}
impl Default for Method {
fn default() -> Method {
Self::GetCompressedTokenAccountsByDelegate
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_compressed_accounts_by_owner_post_request_params.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetCompressedAccountsByOwnerPostRequestParams {
/// A 32-byte hash represented as a base58 string.
#[serde(
rename = "cursor",
default,
with = "::serde_with::rust::double_option",
skip_serializing_if = "Option::is_none"
)]
pub cursor: Option<Option<String>>,
#[serde(
rename = "dataSlice",
default,
with = "::serde_with::rust::double_option",
skip_serializing_if = "Option::is_none"
)]
pub data_slice: Option<Option<Box<models::DataSlice>>>,
#[serde(rename = "filters", skip_serializing_if = "Option::is_none")]
pub filters: Option<Vec<models::FilterSelector>>,
#[serde(
rename = "limit",
default,
with = "::serde_with::rust::double_option",
skip_serializing_if = "Option::is_none"
)]
pub limit: Option<Option<i32>>,
/// A Solana public key represented as a base58 string.
#[serde(rename = "owner")]
pub owner: String,
}
impl GetCompressedAccountsByOwnerPostRequestParams {
pub fn new(owner: String) -> GetCompressedAccountsByOwnerPostRequestParams {
GetCompressedAccountsByOwnerPostRequestParams {
cursor: None,
data_slice: None,
filters: None,
limit: None,
owner,
}
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_compressed_accounts_by_owner_post_200_response.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetCompressedAccountsByOwnerPost200Response {
#[serde(rename = "error", skip_serializing_if = "Option::is_none")]
pub error: Option<Box<models::GetCompressedAccountPost200ResponseError>>,
/// An ID to identify the response.
#[serde(rename = "id")]
pub id: Id,
/// The version of the JSON-RPC protocol.
#[serde(rename = "jsonrpc")]
pub jsonrpc: Jsonrpc,
#[serde(rename = "result", skip_serializing_if = "Option::is_none")]
pub result: Option<Box<models::GetCompressedAccountsByOwnerPost200ResponseResult>>,
}
impl GetCompressedAccountsByOwnerPost200Response {
pub fn new(id: Id, jsonrpc: Jsonrpc) -> GetCompressedAccountsByOwnerPost200Response {
GetCompressedAccountsByOwnerPost200Response {
error: None,
id,
jsonrpc,
result: None,
}
}
}
/// An ID to identify the response.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Id {
#[serde(rename = "test-account")]
TestAccount,
}
impl Default for Id {
fn default() -> Id {
Self::TestAccount
}
}
/// The version of the JSON-RPC protocol.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Jsonrpc {
#[serde(rename = "2.0")]
Variant2Period0,
}
impl Default for Jsonrpc {
fn default() -> Jsonrpc {
Self::Variant2Period0
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/signature_info_list_with_error.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct SignatureInfoListWithError {
#[serde(rename = "items")]
pub items: Vec<models::SignatureInfoWithError>,
}
impl SignatureInfoListWithError {
pub fn new(items: Vec<models::SignatureInfoWithError>) -> SignatureInfoListWithError {
SignatureInfoListWithError { items }
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_transaction_with_compression_info_post_200_response.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetTransactionWithCompressionInfoPost200Response {
#[serde(rename = "error", skip_serializing_if = "Option::is_none")]
pub error: Option<Box<models::GetCompressedAccountPost200ResponseError>>,
/// An ID to identify the response.
#[serde(rename = "id")]
pub id: Id,
/// The version of the JSON-RPC protocol.
#[serde(rename = "jsonrpc")]
pub jsonrpc: Jsonrpc,
#[serde(rename = "result", skip_serializing_if = "Option::is_none")]
pub result: Option<Box<models::GetTransactionWithCompressionInfoPost200ResponseResult>>,
}
impl GetTransactionWithCompressionInfoPost200Response {
pub fn new(id: Id, jsonrpc: Jsonrpc) -> GetTransactionWithCompressionInfoPost200Response {
GetTransactionWithCompressionInfoPost200Response {
error: None,
id,
jsonrpc,
result: None,
}
}
}
/// An ID to identify the response.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Id {
#[serde(rename = "test-account")]
TestAccount,
}
impl Default for Id {
fn default() -> Id {
Self::TestAccount
}
}
/// The version of the JSON-RPC protocol.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Jsonrpc {
#[serde(rename = "2.0")]
Variant2Period0,
}
impl Default for Jsonrpc {
fn default() -> Jsonrpc {
Self::Variant2Period0
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_compressed_token_account_balance_post_200_response_result.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetCompressedTokenAccountBalancePost200ResponseResult {
#[serde(rename = "context")]
pub context: Box<models::Context>,
#[serde(rename = "value")]
pub value: Box<models::TokenAccountBalance>,
}
impl GetCompressedTokenAccountBalancePost200ResponseResult {
pub fn new(
context: models::Context,
value: models::TokenAccountBalance,
) -> GetCompressedTokenAccountBalancePost200ResponseResult {
GetCompressedTokenAccountBalancePost200ResponseResult {
context: Box::new(context),
value: Box::new(value),
}
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_indexer_slot_post_request.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetIndexerSlotPostRequest {
/// An ID to identify the request.
#[serde(rename = "id")]
pub id: Id,
/// The version of the JSON-RPC protocol.
#[serde(rename = "jsonrpc")]
pub jsonrpc: Jsonrpc,
/// The name of the method to invoke.
#[serde(rename = "method")]
pub method: Method,
}
impl GetIndexerSlotPostRequest {
pub fn new(id: Id, jsonrpc: Jsonrpc, method: Method) -> GetIndexerSlotPostRequest {
GetIndexerSlotPostRequest {
id,
jsonrpc,
method,
}
}
}
/// An ID to identify the request.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Id {
#[serde(rename = "test-account")]
TestAccount,
}
impl Default for Id {
fn default() -> Id {
Self::TestAccount
}
}
/// The version of the JSON-RPC protocol.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Jsonrpc {
#[serde(rename = "2.0")]
Variant2Period0,
}
impl Default for Jsonrpc {
fn default() -> Jsonrpc {
Self::Variant2Period0
}
}
/// The name of the method to invoke.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Method {
#[serde(rename = "getIndexerSlot")]
GetIndexerSlot,
}
impl Default for Method {
fn default() -> Method {
Self::GetIndexerSlot
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_compression_signatures_for_owner_post_request.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetCompressionSignaturesForOwnerPostRequest {
/// An ID to identify the request.
#[serde(rename = "id")]
pub id: Id,
/// The version of the JSON-RPC protocol.
#[serde(rename = "jsonrpc")]
pub jsonrpc: Jsonrpc,
/// The name of the method to invoke.
#[serde(rename = "method")]
pub method: Method,
#[serde(rename = "params")]
pub params: Box<models::GetCompressionSignaturesForOwnerPostRequestParams>,
}
impl GetCompressionSignaturesForOwnerPostRequest {
pub fn new(
id: Id,
jsonrpc: Jsonrpc,
method: Method,
params: models::GetCompressionSignaturesForOwnerPostRequestParams,
) -> GetCompressionSignaturesForOwnerPostRequest {
GetCompressionSignaturesForOwnerPostRequest {
id,
jsonrpc,
method,
params: Box::new(params),
}
}
}
/// An ID to identify the request.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Id {
#[serde(rename = "test-account")]
TestAccount,
}
impl Default for Id {
fn default() -> Id {
Self::TestAccount
}
}
/// The version of the JSON-RPC protocol.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Jsonrpc {
#[serde(rename = "2.0")]
Variant2Period0,
}
impl Default for Jsonrpc {
fn default() -> Jsonrpc {
Self::Variant2Period0
}
}
/// The name of the method to invoke.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Method {
#[serde(rename = "getCompressionSignaturesForOwner")]
GetCompressionSignaturesForOwner,
}
impl Default for Method {
fn default() -> Method {
Self::GetCompressionSignaturesForOwner
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_compression_signatures_for_address_post_200_response_result.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetCompressionSignaturesForAddressPost200ResponseResult {
#[serde(rename = "context")]
pub context: Box<models::Context>,
#[serde(rename = "value")]
pub value: Box<models::PaginatedSignatureInfoList>,
}
impl GetCompressionSignaturesForAddressPost200ResponseResult {
pub fn new(
context: models::Context,
value: models::PaginatedSignatureInfoList,
) -> GetCompressionSignaturesForAddressPost200ResponseResult {
GetCompressionSignaturesForAddressPost200ResponseResult {
context: Box::new(context),
value: Box::new(value),
}
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/mod.rs
|
pub mod account;
pub use self::account::Account;
pub mod account_data;
pub use self::account_data::AccountData;
pub mod account_list;
pub use self::account_list::AccountList;
pub mod account_state;
pub use self::account_state::AccountState;
pub mod account_with_optional_token_data;
pub use self::account_with_optional_token_data::AccountWithOptionalTokenData;
pub mod address_with_tree;
pub use self::address_with_tree::AddressWithTree;
pub mod compressed_proof;
pub use self::compressed_proof::CompressedProof;
pub mod compressed_proof_with_context;
pub use self::compressed_proof_with_context::CompressedProofWithContext;
pub mod context;
pub use self::context::Context;
pub mod data_slice;
pub use self::data_slice::DataSlice;
pub mod filter_selector;
pub use self::filter_selector::FilterSelector;
pub mod _get_compressed_account_balance_post_200_response;
pub use self::_get_compressed_account_balance_post_200_response::GetCompressedAccountBalancePost200Response;
pub mod _get_compressed_account_balance_post_200_response_result;
pub use self::_get_compressed_account_balance_post_200_response_result::GetCompressedAccountBalancePost200ResponseResult;
pub mod _get_compressed_account_balance_post_request;
pub use self::_get_compressed_account_balance_post_request::GetCompressedAccountBalancePostRequest;
pub mod _get_compressed_account_post_200_response;
pub use self::_get_compressed_account_post_200_response::GetCompressedAccountPost200Response;
pub mod _get_compressed_account_post_200_response_error;
pub use self::_get_compressed_account_post_200_response_error::GetCompressedAccountPost200ResponseError;
pub mod _get_compressed_account_post_200_response_result;
pub use self::_get_compressed_account_post_200_response_result::GetCompressedAccountPost200ResponseResult;
pub mod _get_compressed_account_post_429_response;
pub use self::_get_compressed_account_post_429_response::GetCompressedAccountPost429Response;
pub mod _get_compressed_account_post_request;
pub use self::_get_compressed_account_post_request::GetCompressedAccountPostRequest;
pub mod _get_compressed_account_post_request_params;
pub use self::_get_compressed_account_post_request_params::GetCompressedAccountPostRequestParams;
pub mod _get_compressed_account_proof_post_200_response;
pub use self::_get_compressed_account_proof_post_200_response::GetCompressedAccountProofPost200Response;
pub mod _get_compressed_account_proof_post_200_response_result;
pub use self::_get_compressed_account_proof_post_200_response_result::GetCompressedAccountProofPost200ResponseResult;
pub mod _get_compressed_account_proof_post_request;
pub use self::_get_compressed_account_proof_post_request::GetCompressedAccountProofPostRequest;
pub mod _get_compressed_account_proof_post_request_params;
pub use self::_get_compressed_account_proof_post_request_params::GetCompressedAccountProofPostRequestParams;
pub mod _get_compressed_accounts_by_owner_post_200_response;
pub use self::_get_compressed_accounts_by_owner_post_200_response::GetCompressedAccountsByOwnerPost200Response;
pub mod _get_compressed_accounts_by_owner_post_200_response_result;
pub use self::_get_compressed_accounts_by_owner_post_200_response_result::GetCompressedAccountsByOwnerPost200ResponseResult;
pub mod _get_compressed_accounts_by_owner_post_request;
pub use self::_get_compressed_accounts_by_owner_post_request::GetCompressedAccountsByOwnerPostRequest;
pub mod _get_compressed_accounts_by_owner_post_request_params;
pub use self::_get_compressed_accounts_by_owner_post_request_params::GetCompressedAccountsByOwnerPostRequestParams;
pub mod _get_compressed_balance_by_owner_post_request;
pub use self::_get_compressed_balance_by_owner_post_request::GetCompressedBalanceByOwnerPostRequest;
pub mod _get_compressed_balance_by_owner_post_request_params;
pub use self::_get_compressed_balance_by_owner_post_request_params::GetCompressedBalanceByOwnerPostRequestParams;
pub mod _get_compressed_token_account_balance_post_200_response;
pub use self::_get_compressed_token_account_balance_post_200_response::GetCompressedTokenAccountBalancePost200Response;
pub mod _get_compressed_token_account_balance_post_200_response_result;
pub use self::_get_compressed_token_account_balance_post_200_response_result::GetCompressedTokenAccountBalancePost200ResponseResult;
pub mod _get_compressed_token_account_balance_post_request;
pub use self::_get_compressed_token_account_balance_post_request::GetCompressedTokenAccountBalancePostRequest;
pub mod _get_compressed_token_accounts_by_delegate_post_200_response;
pub use self::_get_compressed_token_accounts_by_delegate_post_200_response::GetCompressedTokenAccountsByDelegatePost200Response;
pub mod _get_compressed_token_accounts_by_delegate_post_200_response_result;
pub use self::_get_compressed_token_accounts_by_delegate_post_200_response_result::GetCompressedTokenAccountsByDelegatePost200ResponseResult;
pub mod _get_compressed_token_accounts_by_delegate_post_request;
pub use self::_get_compressed_token_accounts_by_delegate_post_request::GetCompressedTokenAccountsByDelegatePostRequest;
pub mod _get_compressed_token_accounts_by_delegate_post_request_params;
pub use self::_get_compressed_token_accounts_by_delegate_post_request_params::GetCompressedTokenAccountsByDelegatePostRequestParams;
pub mod _get_compressed_token_accounts_by_owner_post_request;
pub use self::_get_compressed_token_accounts_by_owner_post_request::GetCompressedTokenAccountsByOwnerPostRequest;
pub mod _get_compressed_token_accounts_by_owner_post_request_params;
pub use self::_get_compressed_token_accounts_by_owner_post_request_params::GetCompressedTokenAccountsByOwnerPostRequestParams;
pub mod _get_compressed_token_balances_by_owner_post_200_response;
pub use self::_get_compressed_token_balances_by_owner_post_200_response::GetCompressedTokenBalancesByOwnerPost200Response;
pub mod _get_compressed_token_balances_by_owner_post_200_response_result;
pub use self::_get_compressed_token_balances_by_owner_post_200_response_result::GetCompressedTokenBalancesByOwnerPost200ResponseResult;
pub mod _get_compressed_token_balances_by_owner_post_request;
pub use self::_get_compressed_token_balances_by_owner_post_request::GetCompressedTokenBalancesByOwnerPostRequest;
pub mod _get_compression_signatures_for_account_post_200_response;
pub use self::_get_compression_signatures_for_account_post_200_response::GetCompressionSignaturesForAccountPost200Response;
pub mod _get_compression_signatures_for_account_post_200_response_result;
pub use self::_get_compression_signatures_for_account_post_200_response_result::GetCompressionSignaturesForAccountPost200ResponseResult;
pub mod _get_compression_signatures_for_account_post_request;
pub use self::_get_compression_signatures_for_account_post_request::GetCompressionSignaturesForAccountPostRequest;
pub mod _get_compression_signatures_for_address_post_200_response;
pub use self::_get_compression_signatures_for_address_post_200_response::GetCompressionSignaturesForAddressPost200Response;
pub mod _get_compression_signatures_for_address_post_200_response_result;
pub use self::_get_compression_signatures_for_address_post_200_response_result::GetCompressionSignaturesForAddressPost200ResponseResult;
pub mod _get_compression_signatures_for_address_post_request;
pub use self::_get_compression_signatures_for_address_post_request::GetCompressionSignaturesForAddressPostRequest;
pub mod _get_compression_signatures_for_address_post_request_params;
pub use self::_get_compression_signatures_for_address_post_request_params::GetCompressionSignaturesForAddressPostRequestParams;
pub mod _get_compression_signatures_for_owner_post_request;
pub use self::_get_compression_signatures_for_owner_post_request::GetCompressionSignaturesForOwnerPostRequest;
pub mod _get_compression_signatures_for_owner_post_request_params;
pub use self::_get_compression_signatures_for_owner_post_request_params::GetCompressionSignaturesForOwnerPostRequestParams;
pub mod _get_compression_signatures_for_token_owner_post_request;
pub use self::_get_compression_signatures_for_token_owner_post_request::GetCompressionSignaturesForTokenOwnerPostRequest;
pub mod _get_indexer_health_post_200_response;
pub use self::_get_indexer_health_post_200_response::GetIndexerHealthPost200Response;
pub mod _get_indexer_health_post_request;
pub use self::_get_indexer_health_post_request::GetIndexerHealthPostRequest;
pub mod _get_indexer_slot_post_200_response;
pub use self::_get_indexer_slot_post_200_response::GetIndexerSlotPost200Response;
pub mod _get_indexer_slot_post_request;
pub use self::_get_indexer_slot_post_request::GetIndexerSlotPostRequest;
pub mod _get_latest_compression_signatures_post_request;
pub use self::_get_latest_compression_signatures_post_request::GetLatestCompressionSignaturesPostRequest;
pub mod _get_latest_compression_signatures_post_request_params;
pub use self::_get_latest_compression_signatures_post_request_params::GetLatestCompressionSignaturesPostRequestParams;
pub mod _get_latest_non_voting_signatures_post_200_response;
pub use self::_get_latest_non_voting_signatures_post_200_response::GetLatestNonVotingSignaturesPost200Response;
pub mod _get_latest_non_voting_signatures_post_200_response_result;
pub use self::_get_latest_non_voting_signatures_post_200_response_result::GetLatestNonVotingSignaturesPost200ResponseResult;
pub mod _get_latest_non_voting_signatures_post_request;
pub use self::_get_latest_non_voting_signatures_post_request::GetLatestNonVotingSignaturesPostRequest;
pub mod _get_multiple_compressed_account_proofs_post_200_response;
pub use self::_get_multiple_compressed_account_proofs_post_200_response::GetMultipleCompressedAccountProofsPost200Response;
pub mod _get_multiple_compressed_account_proofs_post_200_response_result;
pub use self::_get_multiple_compressed_account_proofs_post_200_response_result::GetMultipleCompressedAccountProofsPost200ResponseResult;
pub mod _get_multiple_compressed_account_proofs_post_request;
pub use self::_get_multiple_compressed_account_proofs_post_request::GetMultipleCompressedAccountProofsPostRequest;
pub mod _get_multiple_compressed_accounts_post_200_response;
pub use self::_get_multiple_compressed_accounts_post_200_response::GetMultipleCompressedAccountsPost200Response;
pub mod _get_multiple_compressed_accounts_post_200_response_result;
pub use self::_get_multiple_compressed_accounts_post_200_response_result::GetMultipleCompressedAccountsPost200ResponseResult;
pub mod _get_multiple_compressed_accounts_post_request;
pub use self::_get_multiple_compressed_accounts_post_request::GetMultipleCompressedAccountsPostRequest;
pub mod _get_multiple_compressed_accounts_post_request_params;
pub use self::_get_multiple_compressed_accounts_post_request_params::GetMultipleCompressedAccountsPostRequestParams;
pub mod _get_multiple_new_address_proofs_post_200_response;
pub use self::_get_multiple_new_address_proofs_post_200_response::GetMultipleNewAddressProofsPost200Response;
pub mod _get_multiple_new_address_proofs_post_200_response_result;
pub use self::_get_multiple_new_address_proofs_post_200_response_result::GetMultipleNewAddressProofsPost200ResponseResult;
pub mod _get_multiple_new_address_proofs_post_request;
pub use self::_get_multiple_new_address_proofs_post_request::GetMultipleNewAddressProofsPostRequest;
pub mod _get_multiple_new_address_proofs_v2_post_request;
pub use self::_get_multiple_new_address_proofs_v2_post_request::GetMultipleNewAddressProofsV2PostRequest;
pub mod _get_transaction_with_compression_info_post_200_response;
pub use self::_get_transaction_with_compression_info_post_200_response::GetTransactionWithCompressionInfoPost200Response;
pub mod _get_transaction_with_compression_info_post_200_response_result;
pub use self::_get_transaction_with_compression_info_post_200_response_result::GetTransactionWithCompressionInfoPost200ResponseResult;
pub mod _get_transaction_with_compression_info_post_200_response_result_compression_info;
pub use self::_get_transaction_with_compression_info_post_200_response_result_compression_info::GetTransactionWithCompressionInfoPost200ResponseResultCompressionInfo;
pub mod _get_transaction_with_compression_info_post_request;
pub use self::_get_transaction_with_compression_info_post_request::GetTransactionWithCompressionInfoPostRequest;
pub mod _get_transaction_with_compression_info_post_request_params;
pub use self::_get_transaction_with_compression_info_post_request_params::GetTransactionWithCompressionInfoPostRequestParams;
pub mod _get_validity_proof_post_200_response;
pub use self::_get_validity_proof_post_200_response::GetValidityProofPost200Response;
pub mod _get_validity_proof_post_200_response_result;
pub use self::_get_validity_proof_post_200_response_result::GetValidityProofPost200ResponseResult;
pub mod _get_validity_proof_post_request;
pub use self::_get_validity_proof_post_request::GetValidityProofPostRequest;
pub mod _get_validity_proof_post_request_params;
pub use self::_get_validity_proof_post_request_params::GetValidityProofPostRequestParams;
pub mod memcmp;
pub use self::memcmp::Memcmp;
pub mod merkle_context_with_new_address_proof;
pub use self::merkle_context_with_new_address_proof::MerkleContextWithNewAddressProof;
pub mod merkle_proof_with_context;
pub use self::merkle_proof_with_context::MerkleProofWithContext;
pub mod paginated_account_list;
pub use self::paginated_account_list::PaginatedAccountList;
pub mod paginated_signature_info_list;
pub use self::paginated_signature_info_list::PaginatedSignatureInfoList;
pub mod signature_info;
pub use self::signature_info::SignatureInfo;
pub mod signature_info_list;
pub use self::signature_info_list::SignatureInfoList;
pub mod signature_info_list_with_error;
pub use self::signature_info_list_with_error::SignatureInfoListWithError;
pub mod signature_info_with_error;
pub use self::signature_info_with_error::SignatureInfoWithError;
pub mod token_acccount;
pub use self::token_acccount::TokenAcccount;
pub mod token_account_balance;
pub use self::token_account_balance::TokenAccountBalance;
pub mod token_account_list;
pub use self::token_account_list::TokenAccountList;
pub mod token_balance;
pub use self::token_balance::TokenBalance;
pub mod token_balance_list;
pub use self::token_balance_list::TokenBalanceList;
pub mod token_data;
pub use self::token_data::TokenData;
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_compressed_account_balance_post_200_response_result.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetCompressedAccountBalancePost200ResponseResult {
#[serde(rename = "context")]
pub context: Box<models::Context>,
#[serde(rename = "value")]
pub value: i32,
}
impl GetCompressedAccountBalancePost200ResponseResult {
pub fn new(
context: models::Context,
value: i32,
) -> GetCompressedAccountBalancePost200ResponseResult {
GetCompressedAccountBalancePost200ResponseResult {
context: Box::new(context),
value,
}
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/account.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct Account {
/// A Solana public key represented as a base58 string.
#[serde(rename = "address", skip_serializing_if = "Option::is_none")]
pub address: Option<String>,
#[serde(rename = "data", skip_serializing_if = "Option::is_none")]
pub data: Option<Box<models::AccountData>>,
/// A 32-byte hash represented as a base58 string.
#[serde(rename = "hash")]
pub hash: String,
#[serde(rename = "lamports")]
pub lamports: i32,
#[serde(rename = "leafIndex")]
pub leaf_index: i32,
/// A Solana public key represented as a base58 string.
#[serde(rename = "owner")]
pub owner: String,
#[serde(rename = "seq")]
pub seq: i32,
#[serde(rename = "slotCreated")]
pub slot_created: i32,
/// A Solana public key represented as a base58 string.
#[serde(rename = "tree")]
pub tree: String,
}
impl Account {
pub fn new(
hash: String,
lamports: i32,
leaf_index: i32,
owner: String,
seq: i32,
slot_created: i32,
tree: String,
) -> Account {
Account {
address: None,
data: None,
hash,
lamports,
leaf_index,
owner,
seq,
slot_created,
tree,
}
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_compression_signatures_for_account_post_200_response_result.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetCompressionSignaturesForAccountPost200ResponseResult {
#[serde(rename = "context")]
pub context: Box<models::Context>,
#[serde(rename = "value")]
pub value: Box<models::SignatureInfoList>,
}
impl GetCompressionSignaturesForAccountPost200ResponseResult {
pub fn new(
context: models::Context,
value: models::SignatureInfoList,
) -> GetCompressionSignaturesForAccountPost200ResponseResult {
GetCompressionSignaturesForAccountPost200ResponseResult {
context: Box::new(context),
value: Box::new(value),
}
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_validity_proof_post_200_response.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetValidityProofPost200Response {
#[serde(rename = "error", skip_serializing_if = "Option::is_none")]
pub error: Option<Box<models::GetCompressedAccountPost200ResponseError>>,
/// An ID to identify the response.
#[serde(rename = "id")]
pub id: Id,
/// The version of the JSON-RPC protocol.
#[serde(rename = "jsonrpc")]
pub jsonrpc: Jsonrpc,
#[serde(rename = "result", skip_serializing_if = "Option::is_none")]
pub result: Option<Box<models::GetValidityProofPost200ResponseResult>>,
}
impl GetValidityProofPost200Response {
pub fn new(id: Id, jsonrpc: Jsonrpc) -> GetValidityProofPost200Response {
GetValidityProofPost200Response {
error: None,
id,
jsonrpc,
result: None,
}
}
}
/// An ID to identify the response.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Id {
#[serde(rename = "test-account")]
TestAccount,
}
impl Default for Id {
fn default() -> Id {
Self::TestAccount
}
}
/// The version of the JSON-RPC protocol.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Jsonrpc {
#[serde(rename = "2.0")]
Variant2Period0,
}
impl Default for Jsonrpc {
fn default() -> Jsonrpc {
Self::Variant2Period0
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/paginated_signature_info_list.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct PaginatedSignatureInfoList {
#[serde(
rename = "cursor",
default,
with = "::serde_with::rust::double_option",
skip_serializing_if = "Option::is_none"
)]
pub cursor: Option<Option<String>>,
#[serde(rename = "items")]
pub items: Vec<models::SignatureInfo>,
}
impl PaginatedSignatureInfoList {
pub fn new(items: Vec<models::SignatureInfo>) -> PaginatedSignatureInfoList {
PaginatedSignatureInfoList {
cursor: None,
items,
}
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_compressed_balance_by_owner_post_request_params.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetCompressedBalanceByOwnerPostRequestParams {
/// A Solana public key represented as a base58 string.
#[serde(rename = "owner")]
pub owner: String,
}
impl GetCompressedBalanceByOwnerPostRequestParams {
pub fn new(owner: String) -> GetCompressedBalanceByOwnerPostRequestParams {
GetCompressedBalanceByOwnerPostRequestParams { owner }
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/token_acccount.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct TokenAcccount {
#[serde(rename = "account")]
pub account: Box<models::Account>,
#[serde(rename = "tokenData")]
pub token_data: Box<models::TokenData>,
}
impl TokenAcccount {
pub fn new(account: models::Account, token_data: models::TokenData) -> TokenAcccount {
TokenAcccount {
account: Box::new(account),
token_data: Box::new(token_data),
}
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_compressed_token_account_balance_post_200_response.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetCompressedTokenAccountBalancePost200Response {
#[serde(rename = "error", skip_serializing_if = "Option::is_none")]
pub error: Option<Box<models::GetCompressedAccountPost200ResponseError>>,
/// An ID to identify the response.
#[serde(rename = "id")]
pub id: Id,
/// The version of the JSON-RPC protocol.
#[serde(rename = "jsonrpc")]
pub jsonrpc: Jsonrpc,
#[serde(rename = "result", skip_serializing_if = "Option::is_none")]
pub result: Option<Box<models::GetCompressedTokenAccountBalancePost200ResponseResult>>,
}
impl GetCompressedTokenAccountBalancePost200Response {
pub fn new(id: Id, jsonrpc: Jsonrpc) -> GetCompressedTokenAccountBalancePost200Response {
GetCompressedTokenAccountBalancePost200Response {
error: None,
id,
jsonrpc,
result: None,
}
}
}
/// An ID to identify the response.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Id {
#[serde(rename = "test-account")]
TestAccount,
}
impl Default for Id {
fn default() -> Id {
Self::TestAccount
}
}
/// The version of the JSON-RPC protocol.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Jsonrpc {
#[serde(rename = "2.0")]
Variant2Period0,
}
impl Default for Jsonrpc {
fn default() -> Jsonrpc {
Self::Variant2Period0
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_transaction_with_compression_info_post_200_response_result.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
/// GetTransactionWithCompressionInfoPost200ResponseResult : A Solana transaction with additional compression information
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetTransactionWithCompressionInfoPost200ResponseResult {
#[serde(rename = "compression_info", skip_serializing_if = "Option::is_none")]
pub compression_info:
Option<Box<models::GetTransactionWithCompressionInfoPost200ResponseResultCompressionInfo>>,
/// An encoded confirmed transaction with status meta
#[serde(rename = "transaction", skip_serializing_if = "Option::is_none")]
pub transaction: Option<serde_json::Value>,
}
impl GetTransactionWithCompressionInfoPost200ResponseResult {
/// A Solana transaction with additional compression information
pub fn new() -> GetTransactionWithCompressionInfoPost200ResponseResult {
GetTransactionWithCompressionInfoPost200ResponseResult {
compression_info: None,
transaction: None,
}
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/signature_info_with_error.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct SignatureInfoWithError {
/// An Unix timestamp (seconds)
#[serde(rename = "blockTime")]
pub block_time: i32,
#[serde(
rename = "error",
default,
with = "::serde_with::rust::double_option",
skip_serializing_if = "Option::is_none"
)]
pub error: Option<Option<String>>,
/// A Solana transaction signature.
#[serde(rename = "signature")]
pub signature: String,
#[serde(rename = "slot")]
pub slot: i32,
}
impl SignatureInfoWithError {
pub fn new(block_time: i32, signature: String, slot: i32) -> SignatureInfoWithError {
SignatureInfoWithError {
block_time,
error: None,
signature,
slot,
}
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_compression_signatures_for_account_post_200_response.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetCompressionSignaturesForAccountPost200Response {
#[serde(rename = "error", skip_serializing_if = "Option::is_none")]
pub error: Option<Box<models::GetCompressedAccountPost200ResponseError>>,
/// An ID to identify the response.
#[serde(rename = "id")]
pub id: Id,
/// The version of the JSON-RPC protocol.
#[serde(rename = "jsonrpc")]
pub jsonrpc: Jsonrpc,
#[serde(rename = "result", skip_serializing_if = "Option::is_none")]
pub result: Option<Box<models::GetCompressionSignaturesForAccountPost200ResponseResult>>,
}
impl GetCompressionSignaturesForAccountPost200Response {
pub fn new(id: Id, jsonrpc: Jsonrpc) -> GetCompressionSignaturesForAccountPost200Response {
GetCompressionSignaturesForAccountPost200Response {
error: None,
id,
jsonrpc,
result: None,
}
}
}
/// An ID to identify the response.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Id {
#[serde(rename = "test-account")]
TestAccount,
}
impl Default for Id {
fn default() -> Id {
Self::TestAccount
}
}
/// The version of the JSON-RPC protocol.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Jsonrpc {
#[serde(rename = "2.0")]
Variant2Period0,
}
impl Default for Jsonrpc {
fn default() -> Jsonrpc {
Self::Variant2Period0
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_compressed_token_balances_by_owner_post_200_response.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetCompressedTokenBalancesByOwnerPost200Response {
#[serde(rename = "error", skip_serializing_if = "Option::is_none")]
pub error: Option<Box<models::GetCompressedAccountPost200ResponseError>>,
/// An ID to identify the response.
#[serde(rename = "id")]
pub id: Id,
/// The version of the JSON-RPC protocol.
#[serde(rename = "jsonrpc")]
pub jsonrpc: Jsonrpc,
#[serde(rename = "result", skip_serializing_if = "Option::is_none")]
pub result: Option<Box<models::GetCompressedTokenBalancesByOwnerPost200ResponseResult>>,
}
impl GetCompressedTokenBalancesByOwnerPost200Response {
pub fn new(id: Id, jsonrpc: Jsonrpc) -> GetCompressedTokenBalancesByOwnerPost200Response {
GetCompressedTokenBalancesByOwnerPost200Response {
error: None,
id,
jsonrpc,
result: None,
}
}
}
/// An ID to identify the response.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Id {
#[serde(rename = "test-account")]
TestAccount,
}
impl Default for Id {
fn default() -> Id {
Self::TestAccount
}
}
/// The version of the JSON-RPC protocol.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Jsonrpc {
#[serde(rename = "2.0")]
Variant2Period0,
}
impl Default for Jsonrpc {
fn default() -> Jsonrpc {
Self::Variant2Period0
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/merkle_context_with_new_address_proof.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct MerkleContextWithNewAddressProof {
/// A Solana public key represented as a base58 string.
#[serde(rename = "address")]
pub address: String,
/// A Solana public key represented as a base58 string.
#[serde(rename = "higherRangeAddress")]
pub higher_range_address: String,
#[serde(rename = "lowElementLeafIndex")]
pub low_element_leaf_index: i32,
/// A Solana public key represented as a base58 string.
#[serde(rename = "lowerRangeAddress")]
pub lower_range_address: String,
/// A Solana public key represented as a base58 string.
#[serde(rename = "merkleTree")]
pub merkle_tree: String,
#[serde(rename = "nextIndex")]
pub next_index: i32,
#[serde(rename = "proof")]
pub proof: Vec<String>,
/// A 32-byte hash represented as a base58 string.
#[serde(rename = "root")]
pub root: String,
#[serde(rename = "rootSeq")]
pub root_seq: u64,
}
impl MerkleContextWithNewAddressProof {
#[allow(clippy::too_many_arguments)]
pub fn new(
address: String,
higher_range_address: String,
low_element_leaf_index: i32,
lower_range_address: String,
merkle_tree: String,
next_index: i32,
proof: Vec<String>,
root: String,
root_seq: u64,
) -> MerkleContextWithNewAddressProof {
MerkleContextWithNewAddressProof {
address,
higher_range_address,
low_element_leaf_index,
lower_range_address,
merkle_tree,
next_index,
proof,
root,
root_seq,
}
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_latest_non_voting_signatures_post_200_response.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetLatestNonVotingSignaturesPost200Response {
#[serde(rename = "error", skip_serializing_if = "Option::is_none")]
pub error: Option<Box<models::GetCompressedAccountPost200ResponseError>>,
/// An ID to identify the response.
#[serde(rename = "id")]
pub id: Id,
/// The version of the JSON-RPC protocol.
#[serde(rename = "jsonrpc")]
pub jsonrpc: Jsonrpc,
#[serde(rename = "result", skip_serializing_if = "Option::is_none")]
pub result: Option<Box<models::GetLatestNonVotingSignaturesPost200ResponseResult>>,
}
impl GetLatestNonVotingSignaturesPost200Response {
pub fn new(id: Id, jsonrpc: Jsonrpc) -> GetLatestNonVotingSignaturesPost200Response {
GetLatestNonVotingSignaturesPost200Response {
error: None,
id,
jsonrpc,
result: None,
}
}
}
/// An ID to identify the response.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Id {
#[serde(rename = "test-account")]
TestAccount,
}
impl Default for Id {
fn default() -> Id {
Self::TestAccount
}
}
/// The version of the JSON-RPC protocol.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Jsonrpc {
#[serde(rename = "2.0")]
Variant2Period0,
}
impl Default for Jsonrpc {
fn default() -> Jsonrpc {
Self::Variant2Period0
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_validity_proof_post_request.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetValidityProofPostRequest {
/// An ID to identify the request.
#[serde(rename = "id")]
pub id: Id,
/// The version of the JSON-RPC protocol.
#[serde(rename = "jsonrpc")]
pub jsonrpc: Jsonrpc,
/// The name of the method to invoke.
#[serde(rename = "method")]
pub method: Method,
#[serde(rename = "params")]
pub params: Box<models::GetValidityProofPostRequestParams>,
}
impl GetValidityProofPostRequest {
pub fn new(
id: Id,
jsonrpc: Jsonrpc,
method: Method,
params: models::GetValidityProofPostRequestParams,
) -> GetValidityProofPostRequest {
GetValidityProofPostRequest {
id,
jsonrpc,
method,
params: Box::new(params),
}
}
}
/// An ID to identify the request.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Id {
#[serde(rename = "test-account")]
TestAccount,
}
impl Default for Id {
fn default() -> Id {
Self::TestAccount
}
}
/// The version of the JSON-RPC protocol.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Jsonrpc {
#[serde(rename = "2.0")]
Variant2Period0,
}
impl Default for Jsonrpc {
fn default() -> Jsonrpc {
Self::Variant2Period0
}
}
/// The name of the method to invoke.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Method {
#[serde(rename = "getValidityProof")]
GetValidityProof,
}
impl Default for Method {
fn default() -> Method {
Self::GetValidityProof
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_compressed_accounts_by_owner_post_request.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetCompressedAccountsByOwnerPostRequest {
/// An ID to identify the request.
#[serde(rename = "id")]
pub id: Id,
/// The version of the JSON-RPC protocol.
#[serde(rename = "jsonrpc")]
pub jsonrpc: Jsonrpc,
/// The name of the method to invoke.
#[serde(rename = "method")]
pub method: Method,
#[serde(rename = "params")]
pub params: Box<models::GetCompressedAccountsByOwnerPostRequestParams>,
}
impl GetCompressedAccountsByOwnerPostRequest {
pub fn new(
id: Id,
jsonrpc: Jsonrpc,
method: Method,
params: models::GetCompressedAccountsByOwnerPostRequestParams,
) -> GetCompressedAccountsByOwnerPostRequest {
GetCompressedAccountsByOwnerPostRequest {
id,
jsonrpc,
method,
params: Box::new(params),
}
}
}
/// An ID to identify the request.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Id {
#[serde(rename = "test-account")]
TestAccount,
}
impl Default for Id {
fn default() -> Id {
Self::TestAccount
}
}
/// The version of the JSON-RPC protocol.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Jsonrpc {
#[serde(rename = "2.0")]
Variant2Period0,
}
impl Default for Jsonrpc {
fn default() -> Jsonrpc {
Self::Variant2Period0
}
}
/// The name of the method to invoke.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Method {
#[serde(rename = "getCompressedAccountsByOwner")]
GetCompressedAccountsByOwner,
}
impl Default for Method {
fn default() -> Method {
Self::GetCompressedAccountsByOwner
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_compressed_token_balances_by_owner_post_200_response_result.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetCompressedTokenBalancesByOwnerPost200ResponseResult {
#[serde(rename = "context")]
pub context: Box<models::Context>,
#[serde(rename = "value")]
pub value: Box<models::TokenBalanceList>,
}
impl GetCompressedTokenBalancesByOwnerPost200ResponseResult {
pub fn new(
context: models::Context,
value: models::TokenBalanceList,
) -> GetCompressedTokenBalancesByOwnerPost200ResponseResult {
GetCompressedTokenBalancesByOwnerPost200ResponseResult {
context: Box::new(context),
value: Box::new(value),
}
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/signature_info_list.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct SignatureInfoList {
#[serde(rename = "items")]
pub items: Vec<models::SignatureInfo>,
}
impl SignatureInfoList {
pub fn new(items: Vec<models::SignatureInfo>) -> SignatureInfoList {
SignatureInfoList { items }
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_compression_signatures_for_owner_post_request_params.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetCompressionSignaturesForOwnerPostRequestParams {
#[serde(
rename = "cursor",
default,
with = "::serde_with::rust::double_option",
skip_serializing_if = "Option::is_none"
)]
pub cursor: Option<Option<String>>,
#[serde(
rename = "limit",
default,
with = "::serde_with::rust::double_option",
skip_serializing_if = "Option::is_none"
)]
pub limit: Option<Option<i32>>,
/// A Solana public key represented as a base58 string.
#[serde(rename = "owner")]
pub owner: String,
}
impl GetCompressionSignaturesForOwnerPostRequestParams {
pub fn new(owner: String) -> GetCompressionSignaturesForOwnerPostRequestParams {
GetCompressionSignaturesForOwnerPostRequestParams {
cursor: None,
limit: None,
owner,
}
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/account_list.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct AccountList {
#[serde(rename = "items")]
pub items: Vec<models::Account>,
}
impl AccountList {
pub fn new(items: Vec<models::Account>) -> AccountList {
AccountList { items }
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_compressed_token_accounts_by_owner_post_request.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetCompressedTokenAccountsByOwnerPostRequest {
/// An ID to identify the request.
#[serde(rename = "id")]
pub id: Id,
/// The version of the JSON-RPC protocol.
#[serde(rename = "jsonrpc")]
pub jsonrpc: Jsonrpc,
/// The name of the method to invoke.
#[serde(rename = "method")]
pub method: Method,
#[serde(rename = "params")]
pub params: Box<models::GetCompressedTokenAccountsByOwnerPostRequestParams>,
}
impl GetCompressedTokenAccountsByOwnerPostRequest {
pub fn new(
id: Id,
jsonrpc: Jsonrpc,
method: Method,
params: models::GetCompressedTokenAccountsByOwnerPostRequestParams,
) -> GetCompressedTokenAccountsByOwnerPostRequest {
GetCompressedTokenAccountsByOwnerPostRequest {
id,
jsonrpc,
method,
params: Box::new(params),
}
}
}
/// An ID to identify the request.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Id {
#[serde(rename = "test-account")]
TestAccount,
}
impl Default for Id {
fn default() -> Id {
Self::TestAccount
}
}
/// The version of the JSON-RPC protocol.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Jsonrpc {
#[serde(rename = "2.0")]
Variant2Period0,
}
impl Default for Jsonrpc {
fn default() -> Jsonrpc {
Self::Variant2Period0
}
}
/// The name of the method to invoke.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Method {
#[serde(rename = "getCompressedTokenAccountsByOwner")]
GetCompressedTokenAccountsByOwner,
}
impl Default for Method {
fn default() -> Method {
Self::GetCompressedTokenAccountsByOwner
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_compressed_balance_by_owner_post_request.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetCompressedBalanceByOwnerPostRequest {
/// An ID to identify the request.
#[serde(rename = "id")]
pub id: Id,
/// The version of the JSON-RPC protocol.
#[serde(rename = "jsonrpc")]
pub jsonrpc: Jsonrpc,
/// The name of the method to invoke.
#[serde(rename = "method")]
pub method: Method,
#[serde(rename = "params")]
pub params: Box<models::GetCompressedBalanceByOwnerPostRequestParams>,
}
impl GetCompressedBalanceByOwnerPostRequest {
pub fn new(
id: Id,
jsonrpc: Jsonrpc,
method: Method,
params: models::GetCompressedBalanceByOwnerPostRequestParams,
) -> GetCompressedBalanceByOwnerPostRequest {
GetCompressedBalanceByOwnerPostRequest {
id,
jsonrpc,
method,
params: Box::new(params),
}
}
}
/// An ID to identify the request.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Id {
#[serde(rename = "test-account")]
TestAccount,
}
impl Default for Id {
fn default() -> Id {
Self::TestAccount
}
}
/// The version of the JSON-RPC protocol.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Jsonrpc {
#[serde(rename = "2.0")]
Variant2Period0,
}
impl Default for Jsonrpc {
fn default() -> Jsonrpc {
Self::Variant2Period0
}
}
/// The name of the method to invoke.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Method {
#[serde(rename = "getCompressedBalanceByOwner")]
GetCompressedBalanceByOwner,
}
impl Default for Method {
fn default() -> Method {
Self::GetCompressedBalanceByOwner
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_compressed_token_balances_by_owner_post_request.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetCompressedTokenBalancesByOwnerPostRequest {
/// An ID to identify the request.
#[serde(rename = "id")]
pub id: Id,
/// The version of the JSON-RPC protocol.
#[serde(rename = "jsonrpc")]
pub jsonrpc: Jsonrpc,
/// The name of the method to invoke.
#[serde(rename = "method")]
pub method: Method,
#[serde(rename = "params")]
pub params: Box<models::GetCompressedTokenAccountsByOwnerPostRequestParams>,
}
impl GetCompressedTokenBalancesByOwnerPostRequest {
pub fn new(
id: Id,
jsonrpc: Jsonrpc,
method: Method,
params: models::GetCompressedTokenAccountsByOwnerPostRequestParams,
) -> GetCompressedTokenBalancesByOwnerPostRequest {
GetCompressedTokenBalancesByOwnerPostRequest {
id,
jsonrpc,
method,
params: Box::new(params),
}
}
}
/// An ID to identify the request.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Id {
#[serde(rename = "test-account")]
TestAccount,
}
impl Default for Id {
fn default() -> Id {
Self::TestAccount
}
}
/// The version of the JSON-RPC protocol.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Jsonrpc {
#[serde(rename = "2.0")]
Variant2Period0,
}
impl Default for Jsonrpc {
fn default() -> Jsonrpc {
Self::Variant2Period0
}
}
/// The name of the method to invoke.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Method {
#[serde(rename = "getCompressedTokenBalancesByOwner")]
GetCompressedTokenBalancesByOwner,
}
impl Default for Method {
fn default() -> Method {
Self::GetCompressedTokenBalancesByOwner
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_multiple_new_address_proofs_post_200_response_result.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetMultipleNewAddressProofsPost200ResponseResult {
#[serde(rename = "context")]
pub context: Box<models::Context>,
#[serde(rename = "value")]
pub value: Vec<models::MerkleContextWithNewAddressProof>,
}
impl GetMultipleNewAddressProofsPost200ResponseResult {
pub fn new(
context: models::Context,
value: Vec<models::MerkleContextWithNewAddressProof>,
) -> GetMultipleNewAddressProofsPost200ResponseResult {
GetMultipleNewAddressProofsPost200ResponseResult {
context: Box::new(context),
value,
}
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/account_with_optional_token_data.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct AccountWithOptionalTokenData {
#[serde(rename = "account")]
pub account: Box<models::Account>,
#[serde(rename = "optionalTokenData", skip_serializing_if = "Option::is_none")]
pub optional_token_data: Option<Box<models::TokenData>>,
}
impl AccountWithOptionalTokenData {
pub fn new(account: models::Account) -> AccountWithOptionalTokenData {
AccountWithOptionalTokenData {
account: Box::new(account),
optional_token_data: None,
}
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_compressed_account_proof_post_request.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetCompressedAccountProofPostRequest {
/// An ID to identify the request.
#[serde(rename = "id")]
pub id: Id,
/// The version of the JSON-RPC protocol.
#[serde(rename = "jsonrpc")]
pub jsonrpc: Jsonrpc,
/// The name of the method to invoke.
#[serde(rename = "method")]
pub method: Method,
#[serde(rename = "params")]
pub params: Box<models::GetCompressedAccountProofPostRequestParams>,
}
impl GetCompressedAccountProofPostRequest {
pub fn new(
id: Id,
jsonrpc: Jsonrpc,
method: Method,
params: models::GetCompressedAccountProofPostRequestParams,
) -> GetCompressedAccountProofPostRequest {
GetCompressedAccountProofPostRequest {
id,
jsonrpc,
method,
params: Box::new(params),
}
}
}
/// An ID to identify the request.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Id {
#[serde(rename = "test-account")]
TestAccount,
}
impl Default for Id {
fn default() -> Id {
Self::TestAccount
}
}
/// The version of the JSON-RPC protocol.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Jsonrpc {
#[serde(rename = "2.0")]
Variant2Period0,
}
impl Default for Jsonrpc {
fn default() -> Jsonrpc {
Self::Variant2Period0
}
}
/// The name of the method to invoke.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Method {
#[serde(rename = "getCompressedAccountProof")]
GetCompressedAccountProof,
}
impl Default for Method {
fn default() -> Method {
Self::GetCompressedAccountProof
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_multiple_new_address_proofs_post_request.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetMultipleNewAddressProofsPostRequest {
/// An ID to identify the request.
#[serde(rename = "id")]
pub id: Id,
/// The version of the JSON-RPC protocol.
#[serde(rename = "jsonrpc")]
pub jsonrpc: Jsonrpc,
/// The name of the method to invoke.
#[serde(rename = "method")]
pub method: Method,
#[serde(rename = "params")]
pub params: Vec<String>,
}
impl GetMultipleNewAddressProofsPostRequest {
pub fn new(
id: Id,
jsonrpc: Jsonrpc,
method: Method,
params: Vec<String>,
) -> GetMultipleNewAddressProofsPostRequest {
GetMultipleNewAddressProofsPostRequest {
id,
jsonrpc,
method,
params,
}
}
}
/// An ID to identify the request.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Id {
#[serde(rename = "test-account")]
TestAccount,
}
impl Default for Id {
fn default() -> Id {
Self::TestAccount
}
}
/// The version of the JSON-RPC protocol.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Jsonrpc {
#[serde(rename = "2.0")]
Variant2Period0,
}
impl Default for Jsonrpc {
fn default() -> Jsonrpc {
Self::Variant2Period0
}
}
/// The name of the method to invoke.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Method {
#[serde(rename = "getMultipleNewAddressProofs")]
GetMultipleNewAddressProofs,
}
impl Default for Method {
fn default() -> Method {
Self::GetMultipleNewAddressProofs
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_multiple_compressed_accounts_post_request_params.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
/// GetMultipleCompressedAccountsPostRequestParams : Request for compressed account data
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetMultipleCompressedAccountsPostRequestParams {
#[serde(
rename = "addresses",
default,
with = "::serde_with::rust::double_option",
skip_serializing_if = "Option::is_none"
)]
pub addresses: Option<Option<Vec<String>>>,
#[serde(
rename = "hashes",
default,
with = "::serde_with::rust::double_option",
skip_serializing_if = "Option::is_none"
)]
pub hashes: Option<Option<Vec<String>>>,
}
impl GetMultipleCompressedAccountsPostRequestParams {
/// Request for compressed account data
pub fn new() -> GetMultipleCompressedAccountsPostRequestParams {
GetMultipleCompressedAccountsPostRequestParams {
addresses: None,
hashes: None,
}
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/compressed_proof_with_context.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct CompressedProofWithContext {
#[serde(rename = "compressedProof")]
pub compressed_proof: Box<models::CompressedProof>,
#[serde(rename = "leafIndices")]
pub leaf_indices: Vec<i32>,
#[serde(rename = "leaves")]
pub leaves: Vec<String>,
#[serde(rename = "merkleTrees")]
pub merkle_trees: Vec<String>,
#[serde(rename = "rootIndices")]
pub root_indices: Vec<i32>,
#[serde(rename = "roots")]
pub roots: Vec<String>,
}
impl CompressedProofWithContext {
pub fn new(
compressed_proof: models::CompressedProof,
leaf_indices: Vec<i32>,
leaves: Vec<String>,
merkle_trees: Vec<String>,
root_indices: Vec<i32>,
roots: Vec<String>,
) -> CompressedProofWithContext {
CompressedProofWithContext {
compressed_proof: Box::new(compressed_proof),
leaf_indices,
leaves,
merkle_trees,
root_indices,
roots,
}
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/context.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct Context {
#[serde(rename = "slot")]
pub slot: i32,
}
impl Context {
pub fn new(slot: i32) -> Context {
Context { slot }
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_multiple_new_address_proofs_post_200_response.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetMultipleNewAddressProofsPost200Response {
#[serde(rename = "error", skip_serializing_if = "Option::is_none")]
pub error: Option<Box<models::GetCompressedAccountPost200ResponseError>>,
/// An ID to identify the response.
#[serde(rename = "id")]
pub id: Id,
/// The version of the JSON-RPC protocol.
#[serde(rename = "jsonrpc")]
pub jsonrpc: Jsonrpc,
#[serde(rename = "result", skip_serializing_if = "Option::is_none")]
pub result: Option<Box<models::GetMultipleNewAddressProofsPost200ResponseResult>>,
}
impl GetMultipleNewAddressProofsPost200Response {
pub fn new(id: Id, jsonrpc: Jsonrpc) -> GetMultipleNewAddressProofsPost200Response {
GetMultipleNewAddressProofsPost200Response {
error: None,
id,
jsonrpc,
result: None,
}
}
}
/// An ID to identify the response.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Id {
#[serde(rename = "test-account")]
TestAccount,
}
impl Default for Id {
fn default() -> Id {
Self::TestAccount
}
}
/// The version of the JSON-RPC protocol.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Jsonrpc {
#[serde(rename = "2.0")]
Variant2Period0,
}
impl Default for Jsonrpc {
fn default() -> Jsonrpc {
Self::Variant2Period0
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_latest_non_voting_signatures_post_request.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetLatestNonVotingSignaturesPostRequest {
/// An ID to identify the request.
#[serde(rename = "id")]
pub id: Id,
/// The version of the JSON-RPC protocol.
#[serde(rename = "jsonrpc")]
pub jsonrpc: Jsonrpc,
/// The name of the method to invoke.
#[serde(rename = "method")]
pub method: Method,
#[serde(rename = "params")]
pub params: Box<models::GetLatestCompressionSignaturesPostRequestParams>,
}
impl GetLatestNonVotingSignaturesPostRequest {
pub fn new(
id: Id,
jsonrpc: Jsonrpc,
method: Method,
params: models::GetLatestCompressionSignaturesPostRequestParams,
) -> GetLatestNonVotingSignaturesPostRequest {
GetLatestNonVotingSignaturesPostRequest {
id,
jsonrpc,
method,
params: Box::new(params),
}
}
}
/// An ID to identify the request.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Id {
#[serde(rename = "test-account")]
TestAccount,
}
impl Default for Id {
fn default() -> Id {
Self::TestAccount
}
}
/// The version of the JSON-RPC protocol.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Jsonrpc {
#[serde(rename = "2.0")]
Variant2Period0,
}
impl Default for Jsonrpc {
fn default() -> Jsonrpc {
Self::Variant2Period0
}
}
/// The name of the method to invoke.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Method {
#[serde(rename = "getLatestNonVotingSignatures")]
GetLatestNonVotingSignatures,
}
impl Default for Method {
fn default() -> Method {
Self::GetLatestNonVotingSignatures
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_compressed_account_post_request.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetCompressedAccountPostRequest {
/// An ID to identify the request.
#[serde(rename = "id")]
pub id: Id,
/// The version of the JSON-RPC protocol.
#[serde(rename = "jsonrpc")]
pub jsonrpc: Jsonrpc,
/// The name of the method to invoke.
#[serde(rename = "method")]
pub method: Method,
#[serde(rename = "params")]
pub params: Box<models::GetCompressedAccountPostRequestParams>,
}
impl GetCompressedAccountPostRequest {
pub fn new(
id: Id,
jsonrpc: Jsonrpc,
method: Method,
params: models::GetCompressedAccountPostRequestParams,
) -> GetCompressedAccountPostRequest {
GetCompressedAccountPostRequest {
id,
jsonrpc,
method,
params: Box::new(params),
}
}
}
/// An ID to identify the request.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Id {
#[serde(rename = "test-account")]
TestAccount,
}
impl Default for Id {
fn default() -> Id {
Self::TestAccount
}
}
/// The version of the JSON-RPC protocol.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Jsonrpc {
#[serde(rename = "2.0")]
Variant2Period0,
}
impl Default for Jsonrpc {
fn default() -> Jsonrpc {
Self::Variant2Period0
}
}
/// The name of the method to invoke.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Method {
#[serde(rename = "getCompressedAccount")]
GetCompressedAccount,
}
impl Default for Method {
fn default() -> Method {
Self::GetCompressedAccount
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src
|
solana_public_repos/Lightprotocol/light-protocol/photon-api/src/models/_get_transaction_with_compression_info_post_request.rs
|
/*
* photon-indexer
*
* Solana indexer for general compression
*
* The version of the OpenAPI document: 0.45.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct GetTransactionWithCompressionInfoPostRequest {
/// An ID to identify the request.
#[serde(rename = "id")]
pub id: Id,
/// The version of the JSON-RPC protocol.
#[serde(rename = "jsonrpc")]
pub jsonrpc: Jsonrpc,
/// The name of the method to invoke.
#[serde(rename = "method")]
pub method: Method,
#[serde(rename = "params")]
pub params: Box<models::GetTransactionWithCompressionInfoPostRequestParams>,
}
impl GetTransactionWithCompressionInfoPostRequest {
pub fn new(
id: Id,
jsonrpc: Jsonrpc,
method: Method,
params: models::GetTransactionWithCompressionInfoPostRequestParams,
) -> GetTransactionWithCompressionInfoPostRequest {
GetTransactionWithCompressionInfoPostRequest {
id,
jsonrpc,
method,
params: Box::new(params),
}
}
}
/// An ID to identify the request.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Id {
#[serde(rename = "test-account")]
TestAccount,
}
impl Default for Id {
fn default() -> Id {
Self::TestAccount
}
}
/// The version of the JSON-RPC protocol.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Jsonrpc {
#[serde(rename = "2.0")]
Variant2Period0,
}
impl Default for Jsonrpc {
fn default() -> Jsonrpc {
Self::Variant2Period0
}
}
/// The name of the method to invoke.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Method {
#[serde(rename = "getTransactionWithCompressionInfo")]
GetTransactionWithCompressionInfo,
}
impl Default for Method {
fn default() -> Method {
Self::GetTransactionWithCompressionInfo
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol
|
solana_public_repos/Lightprotocol/light-protocol/test-utils/Cargo.toml
|
[package]
name = "light-test-utils"
version = "1.2.1"
description = "Utilities used in Light Protocol program tests"
repository = "https://github.com/Lightprotocol/light-protocol"
license = "Apache-2.0"
edition = "2021"
[features]
default = []
devenv = []
[dependencies]
anchor-lang = { workspace = true }
anchor-spl = { workspace = true }
anyhow = "1.0"
ark-ff = "0.4"
light-hash-set = { workspace=true }
num-bigint = "0.4"
num-traits = "0.2"
solana-program-test = { workspace = true }
solana-sdk = { workspace = true }
solana-client = { workspace = true }
thiserror = "1.0"
light-macros = { path = "../macros/light", version = "1.1.0" }
account-compression = { workspace = true }
light-compressed-token = { workspace = true }
light-system-program = { workspace = true }
light-registry = { workspace = true }
spl-token = { workspace = true, features = ["no-entrypoint"] }
solana-transaction-status = { workspace = true }
tokio = { workspace = true }
light-prover-client = { path = "../circuit-lib/light-prover-client", version = "1.2.0" }
reqwest = "0.11.26"
light-hasher = { version = "1.1.0", path = "../merkle-tree/hasher" }
light-merkle-tree-reference = { version = "1.1.0", path = "../merkle-tree/reference" }
light-concurrent-merkle-tree = { version = "1.1.0", path = "../merkle-tree/concurrent" }
light-indexed-merkle-tree = { path = "../merkle-tree/indexed/", version = "1.1.0" }
light-verifier = { path = "../circuit-lib/verifier", version = "1.1.0" }
light-utils = { path = "../utils", version = "1.1.0" }
light-program-test = { workspace = true }
forester-utils = { workspace = true }
memoffset = "0.9.1"
rand = "0.8"
photon-api = { workspace = true }
log = "0.4"
serde = { version = "1.0.197", features = ["derive"] }
async-trait = "0.1.82"
light-client = { workspace = true }
spl-token-2022 = { workspace = true }
[dev-dependencies]
rand = "0.8"
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-utils
|
solana_public_repos/Lightprotocol/light-protocol/test-utils/src/assert_compressed_tx.rs
|
use account_compression::{state::QueueAccount, StateMerkleTreeAccount};
use forester_utils::indexer::{Indexer, StateMerkleTreeAccounts};
use forester_utils::{get_concurrent_merkle_tree, get_hash_set, AccountZeroCopy};
use light_client::rpc::RpcConnection;
use light_hasher::Poseidon;
use light_system_program::sdk::event::MerkleTreeSequenceNumber;
use light_system_program::sdk::{
compressed_account::{CompressedAccount, CompressedAccountWithMerkleContext},
event::PublicTransactionEvent,
invoke::get_sol_pool_pda,
};
use log::debug;
use num_bigint::BigUint;
use num_traits::FromBytes;
use solana_sdk::account::ReadableAccount;
use solana_sdk::pubkey::Pubkey;
pub struct AssertCompressedTransactionInputs<'a, R: RpcConnection, I: Indexer<R>> {
pub rpc: &'a mut R,
pub test_indexer: &'a mut I,
pub output_compressed_accounts: &'a [CompressedAccount],
pub created_output_compressed_accounts: &'a [CompressedAccountWithMerkleContext],
pub input_compressed_account_hashes: &'a [[u8; 32]],
pub output_merkle_tree_snapshots: &'a [MerkleTreeTestSnapShot],
pub input_merkle_tree_snapshots: &'a [MerkleTreeTestSnapShot],
pub created_addresses: &'a [[u8; 32]],
pub address_queue_pubkeys: &'a [Pubkey],
pub event: &'a PublicTransactionEvent,
pub sorted_output_accounts: bool,
pub compress_or_decompress_lamports: Option<u64>,
pub is_compress: bool,
pub relay_fee: Option<u64>,
pub compression_recipient: Option<Pubkey>,
pub recipient_balance_pre: u64,
pub compressed_sol_pda_balance_pre: u64,
}
/// General tx assert:
/// 1. outputs created
/// 2. inputs nullified
/// 3. addressed inserted into address queue
/// 4. Public Transaction event emitted correctly
/// 5. Merkle tree was updated correctly
/// 6. TODO: Fees have been paid (after fee refactor)
/// 7. Check compression amount was transferred
pub async fn assert_compressed_transaction<R: RpcConnection, I: Indexer<R>>(
input: AssertCompressedTransactionInputs<'_, R, I>,
) {
// CHECK 1
assert_created_compressed_accounts(
input.output_compressed_accounts,
input
.output_merkle_tree_snapshots
.iter()
.map(|x| x.accounts.merkle_tree)
.collect::<Vec<_>>()
.as_slice(),
input.created_output_compressed_accounts,
input.sorted_output_accounts,
);
// CHECK 2
assert_nullifiers_exist_in_hash_sets(
input.rpc,
input.input_merkle_tree_snapshots,
input.input_compressed_account_hashes,
)
.await;
// CHECK 3
assert_addresses_exist_in_hash_sets(
input.rpc,
input.address_queue_pubkeys,
input.created_addresses,
)
.await;
// CHECK 5
let sequence_numbers = assert_merkle_tree_after_tx(
input.rpc,
input.output_merkle_tree_snapshots,
input.test_indexer,
)
.await;
// CHECK 4
assert_public_transaction_event(
input.event,
Some(&input.input_compressed_account_hashes.to_vec()),
input
.output_merkle_tree_snapshots
.iter()
.map(|x| x.accounts)
.collect::<Vec<_>>()
.as_slice(),
&input
.created_output_compressed_accounts
.iter()
.map(|x| x.merkle_context.leaf_index)
.collect::<Vec<_>>(),
input.compress_or_decompress_lamports,
input.is_compress,
input.relay_fee,
sequence_numbers,
);
// CHECK 7
if let Some(compress_or_decompress_lamports) = input.compress_or_decompress_lamports {
assert_compression(
input.rpc,
compress_or_decompress_lamports,
input.compressed_sol_pda_balance_pre,
input.recipient_balance_pre,
&input.compression_recipient.unwrap_or_default(),
input.is_compress,
)
.await;
}
}
pub async fn assert_nullifiers_exist_in_hash_sets<R: RpcConnection>(
rpc: &mut R,
snapshots: &[MerkleTreeTestSnapShot],
input_compressed_account_hashes: &[[u8; 32]],
) {
for (i, hash) in input_compressed_account_hashes.iter().enumerate() {
let nullifier_queue = unsafe {
get_hash_set::<QueueAccount, R>(rpc, snapshots[i].accounts.nullifier_queue).await
};
assert!(nullifier_queue
.contains(&BigUint::from_be_bytes(hash.as_slice()), None)
.unwrap());
}
}
pub async fn assert_addresses_exist_in_hash_sets<R: RpcConnection>(
rpc: &mut R,
address_queue_pubkeys: &[Pubkey],
created_addresses: &[[u8; 32]],
) {
for (address, pubkey) in created_addresses.iter().zip(address_queue_pubkeys) {
let address_queue = unsafe { get_hash_set::<QueueAccount, R>(rpc, *pubkey).await };
assert!(address_queue
.contains(&BigUint::from_be_bytes(address), None)
.unwrap());
}
}
pub fn assert_created_compressed_accounts(
output_compressed_accounts: &[CompressedAccount],
output_merkle_tree_pubkeys: &[Pubkey],
created_out_compressed_accounts: &[CompressedAccountWithMerkleContext],
_sorted: bool,
) {
for output_account in created_out_compressed_accounts.iter() {
assert!(output_compressed_accounts.iter().any(|x| x.lamports
== output_account.compressed_account.lamports
&& x.owner == output_account.compressed_account.owner
&& x.data == output_account.compressed_account.data
&& x.address == output_account.compressed_account.address),);
assert!(output_merkle_tree_pubkeys
.iter()
.any(|x| *x == output_account.merkle_context.merkle_tree_pubkey),);
}
}
#[allow(clippy::too_many_arguments)]
pub fn assert_public_transaction_event(
event: &PublicTransactionEvent,
input_compressed_account_hashes: Option<&Vec<[u8; 32]>>,
output_merkle_tree_accounts: &[StateMerkleTreeAccounts],
output_leaf_indices: &Vec<u32>,
compress_or_decompress_lamports: Option<u64>,
is_compress: bool,
relay_fee: Option<u64>,
sequence_numbers: Vec<MerkleTreeSequenceNumber>,
) {
assert_eq!(
event.input_compressed_account_hashes,
*input_compressed_account_hashes.unwrap_or(&Vec::<[u8; 32]>::new()),
"assert_public_transaction_event: input compressed account hashes mismatch"
);
for account in event.output_compressed_accounts.iter() {
assert!(
output_merkle_tree_accounts
.iter()
.any(|x| x.merkle_tree == event.pubkey_array[account.merkle_tree_index as usize]),
// output_merkle_tree_accounts[account.merkle_tree_index as usize].merkle_tree,
"assert_public_transaction_event: output state merkle tree account index mismatch"
);
}
assert_eq!(
event.output_leaf_indices, *output_leaf_indices,
"assert_public_transaction_event: output leaf indices mismatch"
);
assert_eq!(
event.compress_or_decompress_lamports, compress_or_decompress_lamports,
"assert_public_transaction_event: compression lamports mismatch"
);
assert_eq!(
event.is_compress, is_compress,
"assert_public_transaction_event: is_compress mismatch"
);
assert_eq!(
event.relay_fee, relay_fee,
"assert_public_transaction_event: relay fee mismatch"
);
let mut updated_sequence_numbers = event.sequence_numbers.clone();
for account in event.output_compressed_accounts.iter() {
let merkle_tree_pubkey = event.pubkey_array[account.merkle_tree_index as usize];
let index = &mut updated_sequence_numbers
.iter_mut()
.find(|x| x.pubkey == merkle_tree_pubkey);
if index.is_none() {
debug!("reference sequence numbers: {:?}", sequence_numbers);
debug!("event: {:?}", event);
panic!(
"merkle tree pubkey not found in sequence numbers : {:?}",
merkle_tree_pubkey
);
} else {
index.as_mut().unwrap().seq += 1;
}
}
for sequence_number in updated_sequence_numbers.iter() {
sequence_numbers.iter().any(|x| x == sequence_number);
}
}
#[derive(Debug, Clone, Copy, Ord, PartialOrd, Eq, PartialEq)]
pub struct MerkleTreeTestSnapShot {
pub accounts: StateMerkleTreeAccounts,
pub root: [u8; 32],
pub next_index: usize,
pub num_added_accounts: usize,
pub merkle_tree_account_lamports: u64,
pub queue_account_lamports: u64,
pub cpi_context_account_lamports: u64,
}
// TODO: add assert that changelog, seq number is updated correctly
/// Asserts that the merkle tree account has been updated correctly,
/// by comparing the merkle tree account with the test indexer merkle tree.
/// Asserts:
/// 1. The root has been updated
/// 2. The next index has been updated
pub async fn assert_merkle_tree_after_tx<R: RpcConnection, I: Indexer<R>>(
rpc: &mut R,
snapshots: &[MerkleTreeTestSnapShot],
test_indexer: &mut I,
) -> Vec<MerkleTreeSequenceNumber> {
let mut deduped_snapshots = snapshots.to_vec();
deduped_snapshots.sort();
deduped_snapshots.dedup();
let mut sequence_numbers = Vec::new();
for (i, snapshot) in deduped_snapshots.iter().enumerate() {
let merkle_tree = get_concurrent_merkle_tree::<StateMerkleTreeAccount, R, Poseidon, 26>(
rpc,
snapshot.accounts.merkle_tree,
)
.await;
debug!("sequence number: {:?}", merkle_tree.next_index() as u64);
debug!("next index: {:?}", snapshot.next_index);
debug!("prev sequence number: {:?}", snapshot.num_added_accounts);
sequence_numbers.push(MerkleTreeSequenceNumber {
pubkey: snapshot.accounts.merkle_tree,
seq: merkle_tree.sequence_number() as u64,
});
if merkle_tree.root() == snapshot.root {
debug!("deduped_snapshots: {:?}", deduped_snapshots);
debug!("i: {:?}", i);
panic!("merkle tree root update failed, it should have updated but didn't");
}
assert_eq!(
merkle_tree.next_index(),
snapshot.next_index + snapshot.num_added_accounts
);
let test_indexer_merkle_tree = test_indexer
.get_state_merkle_trees_mut()
.iter_mut()
.find(|x| x.accounts.merkle_tree == snapshot.accounts.merkle_tree)
.expect("merkle tree not found in test indexer");
if merkle_tree.root() != test_indexer_merkle_tree.merkle_tree.root() {
// The following lines are just debug prints
debug!("Merkle tree pubkey {:?}", snapshot.accounts.merkle_tree);
for (i, leaf) in test_indexer_merkle_tree.merkle_tree.layers[0]
.iter()
.enumerate()
{
debug!("test_indexer_merkle_tree index {} leaf: {:?}", i, leaf);
}
for i in 0..16 {
debug!("root {} {:?}", i, merkle_tree.roots.get(i));
}
panic!("merkle tree root update failed");
}
}
sequence_numbers
}
/// Takes a snapshot of the provided the onchain Merkle trees.
/// Snapshot data:
/// 1. root
/// 2. next_index
/// 3. num_added_accounts // so that we can assert the expected next index after tx
/// 4. lamports of all bundle accounts
pub async fn get_merkle_tree_snapshots<R: RpcConnection>(
rpc: &mut R,
accounts: &[StateMerkleTreeAccounts],
) -> Vec<MerkleTreeTestSnapShot> {
let mut snapshots = Vec::new();
for account_bundle in accounts.iter() {
let merkle_tree = get_concurrent_merkle_tree::<StateMerkleTreeAccount, R, Poseidon, 26>(
rpc,
account_bundle.merkle_tree,
)
.await;
let merkle_tree_account =
AccountZeroCopy::<StateMerkleTreeAccount>::new(rpc, account_bundle.merkle_tree).await;
let queue_account_lamports = match rpc
.get_account(account_bundle.nullifier_queue)
.await
.unwrap()
{
Some(x) => x.lamports,
None => 0,
};
let cpi_context_account_lamports =
match rpc.get_account(account_bundle.cpi_context).await.unwrap() {
Some(x) => x.lamports,
None => 0,
};
snapshots.push(MerkleTreeTestSnapShot {
accounts: *account_bundle,
root: merkle_tree.root(),
next_index: merkle_tree.next_index(),
num_added_accounts: accounts
.iter()
.filter(|x| x.merkle_tree == account_bundle.merkle_tree)
.count(),
merkle_tree_account_lamports: merkle_tree_account.account.lamports(),
queue_account_lamports,
cpi_context_account_lamports,
});
}
snapshots
}
pub async fn assert_compression<R: RpcConnection>(
context: &mut R,
compress_amount: u64,
compressed_sol_pda_balance_pre: u64,
recipient_balance_pre: u64,
recipient: &Pubkey,
is_compress: bool,
) {
if is_compress {
let compressed_sol_pda_balance = match context.get_account(get_sol_pool_pda()).await {
Ok(Some(account)) => account.lamports,
_ => 0,
};
assert_eq!(
compressed_sol_pda_balance,
compressed_sol_pda_balance_pre + compress_amount,
"assert_compression: balance of compressed sol pda insufficient, compress sol failed"
);
} else {
let compressed_sol_pda_balance =
match context.get_account(get_sol_pool_pda()).await.unwrap() {
Some(account) => account.lamports,
None => 0,
};
assert_eq!(
compressed_sol_pda_balance,
compressed_sol_pda_balance_pre - compress_amount,
"assert_compression: balance of compressed sol pda incorrect, decompress sol failed"
);
let recipient_balance = context
.get_account(*recipient)
.await
.unwrap()
.unwrap()
.lamports;
assert_eq!(
recipient_balance,
recipient_balance_pre + compress_amount,
"assert_compression: balance of recipient insufficient, decompress sol failed"
);
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-utils
|
solana_public_repos/Lightprotocol/light-protocol/test-utils/src/assert_epoch.rs
|
use light_client::rpc::RpcConnection;
use light_registry::{
protocol_config::state::ProtocolConfigPda,
utils::{get_epoch_pda_address, get_forester_pda, get_protocol_config_pda_address},
EpochPda, ForesterEpochPda, ForesterPda,
};
use solana_sdk::pubkey::Pubkey;
pub async fn assert_finalized_epoch_registration<R: RpcConnection>(
rpc: &mut R,
forester_epoch_pda_pubkey: &Pubkey,
epoch_pda_pubkey: &Pubkey,
) {
let epoch_pda = rpc
.get_anchor_account::<EpochPda>(epoch_pda_pubkey)
.await
.unwrap()
.unwrap();
let expected_total_epoch_weight = epoch_pda.registered_weight;
let forester_epoch_pda = rpc
.get_anchor_account::<ForesterEpochPda>(forester_epoch_pda_pubkey)
.await
.unwrap()
.unwrap();
assert!(forester_epoch_pda.total_epoch_weight.is_some());
assert_eq!(
forester_epoch_pda.total_epoch_weight.unwrap(),
expected_total_epoch_weight
);
}
pub async fn assert_epoch_pda<R: RpcConnection>(
rpc: &mut R,
epoch: u64,
expected_registered_weight: u64,
) {
let epoch_pda_pubkey = get_epoch_pda_address(epoch);
let epoch_pda = rpc
.get_anchor_account::<EpochPda>(&epoch_pda_pubkey)
.await
.unwrap()
.unwrap();
let protocol_config_pda_pubkey = get_protocol_config_pda_address().0;
let protocol_config_pda = rpc
.get_anchor_account::<ProtocolConfigPda>(&protocol_config_pda_pubkey)
.await
.unwrap()
.unwrap();
assert_eq!(epoch_pda.registered_weight, expected_registered_weight);
assert_eq!(epoch_pda.total_work, 0);
assert_eq!(epoch_pda.protocol_config, protocol_config_pda.config);
assert_eq!(epoch_pda.epoch, epoch);
}
/// Helper function to fetch the forester epoch and epoch account to assert diff
/// after transaction.
pub async fn fetch_epoch_and_forester_pdas<R: RpcConnection>(
rpc: &mut R,
forester_epoch_pda: &Pubkey,
epoch_pda: &Pubkey,
) -> (ForesterEpochPda, EpochPda) {
let forester_epoch_pda = rpc
.get_anchor_account::<ForesterEpochPda>(forester_epoch_pda)
.await
.unwrap()
.unwrap();
println!("forester_epoch_pda: {:?}", forester_epoch_pda);
let epoch_pda = rpc
.get_anchor_account::<EpochPda>(epoch_pda)
.await
.unwrap()
.unwrap();
println!("epoch_pda: {:?}", epoch_pda);
(forester_epoch_pda, epoch_pda)
}
/// Asserts:
/// 1. ForesterEpochPda has reported work
/// 2. EpochPda has updated total work by forester work counter
pub async fn assert_report_work<R: RpcConnection>(
rpc: &mut R,
forester_epoch_pda_pubkey: &Pubkey,
epoch_pda_pubkey: &Pubkey,
mut pre_forester_epoch_pda: ForesterEpochPda,
mut pre_epoch_pda: EpochPda,
) {
let forester_epoch_pda = rpc
.get_anchor_account::<ForesterEpochPda>(forester_epoch_pda_pubkey)
.await
.unwrap()
.unwrap();
pre_forester_epoch_pda.has_reported_work = true;
assert_eq!(forester_epoch_pda, pre_forester_epoch_pda);
let epoch_pda = rpc
.get_anchor_account::<EpochPda>(epoch_pda_pubkey)
.await
.unwrap()
.unwrap();
pre_epoch_pda.total_work += forester_epoch_pda.work_counter;
assert_eq!(epoch_pda, pre_epoch_pda);
}
/// Asserts the correct creation of a ForesterEpochPda.
pub async fn assert_registered_forester_pda<R: RpcConnection>(
rpc: &mut R,
forester_epoch_pda_pubkey: &Pubkey,
forester_derivation_pubkey: &Pubkey,
epoch: u64,
) {
let (forester_pda_pubkey, _) = get_forester_pda(forester_derivation_pubkey);
let epoch_pda_pubkey = get_epoch_pda_address(epoch);
let epoch_pda = rpc
.get_anchor_account::<EpochPda>(&epoch_pda_pubkey)
.await
.unwrap()
.unwrap();
let forester_pda = rpc
.get_anchor_account::<ForesterPda>(&forester_pda_pubkey)
.await
.unwrap()
.unwrap();
let epoch_active_phase_start_slot = epoch_pda.protocol_config.genesis_slot
+ epoch_pda.protocol_config.registration_phase_length
+ epoch_pda.epoch * epoch_pda.protocol_config.active_phase_length;
let expected_forester_epoch_pda = ForesterEpochPda {
authority: forester_pda.authority,
config: forester_pda.config,
epoch: epoch_pda.epoch,
weight: forester_pda.active_weight,
work_counter: 0,
has_reported_work: false,
forester_index: epoch_pda.registered_weight - forester_pda.active_weight,
total_epoch_weight: None,
epoch_active_phase_start_slot,
protocol_config: epoch_pda.protocol_config,
finalize_counter: 0,
};
let forester_epoch_pda = rpc
.get_anchor_account::<ForesterEpochPda>(forester_epoch_pda_pubkey)
.await
.unwrap()
.unwrap();
assert_eq!(forester_epoch_pda, expected_forester_epoch_pda);
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-utils
|
solana_public_repos/Lightprotocol/light-protocol/test-utils/src/test_forester.rs
|
use account_compression::instruction::UpdateAddressMerkleTree;
use account_compression::state::QueueAccount;
use account_compression::utils::constants::{
ADDRESS_MERKLE_TREE_HEIGHT, ADDRESS_MERKLE_TREE_ROOTS,
};
use account_compression::{instruction::InsertAddresses, StateMerkleTreeAccount, ID};
use account_compression::{AddressMerkleTreeAccount, SAFETY_MARGIN};
use anchor_lang::system_program;
use anchor_lang::{InstructionData, ToAccountMetas};
use light_client::rpc::errors::RpcError;
use light_client::rpc::RpcConnection;
use light_concurrent_merkle_tree::event::MerkleTreeEvent;
use light_hasher::Poseidon;
use light_indexed_merkle_tree::copy::IndexedMerkleTreeCopy;
use forester_utils::indexer::{AddressMerkleTreeBundle, StateMerkleTreeBundle};
use forester_utils::{get_concurrent_merkle_tree, get_hash_set, get_indexed_merkle_tree};
use light_program_test::test_env::NOOP_PROGRAM_ID;
use light_registry::account_compression_cpi::sdk::{
create_nullify_instruction, create_update_address_merkle_tree_instruction,
CreateNullifyInstructionInputs, UpdateAddressMerkleTreeInstructionInputs,
};
use light_registry::utils::get_forester_epoch_pda_from_authority;
use light_registry::{ForesterEpochPda, RegisterForester};
use light_utils::bigint::bigint_to_be_bytes_array;
use log::debug;
use solana_sdk::signature::Signature;
use solana_sdk::{
instruction::{AccountMeta, Instruction},
pubkey::Pubkey,
signature::{Keypair, Signer},
transaction::Transaction,
};
use thiserror::Error;
// doesn't keep its own Merkle tree but gets it from the indexer
// can also get all the state and Address Merkle trees from the indexer
// the lightweight version is just a function
// we should have a random option that shuffles the order in which to nullify transactions
// we should have a parameters how many to nullify
// in the test we should nullify everything once the queue is 60% full
/// Check compressed_accounts in the queue array which are not nullified yet
/// Iterate over these compressed_accounts and nullify them
///
/// Checks:
/// 1. Value in hashset is marked
/// 2. State tree root is updated
/// 3. TODO: add event is emitted (after rebase)
/// optional: assert that the Merkle tree doesn't change except the updated leaf
pub async fn nullify_compressed_accounts<R: RpcConnection>(
rpc: &mut R,
forester: &Keypair,
state_tree_bundle: &mut StateMerkleTreeBundle,
epoch: u64,
is_metadata_forester: bool,
) -> Result<(), RpcError> {
let nullifier_queue = unsafe {
get_hash_set::<QueueAccount, R>(rpc, state_tree_bundle.accounts.nullifier_queue).await
};
let pre_forester_counter = if is_metadata_forester {
0
} else {
rpc.get_anchor_account::<ForesterEpochPda>(
&get_forester_epoch_pda_from_authority(&forester.pubkey(), epoch).0,
)
.await
.unwrap()
.unwrap()
.work_counter
};
let onchain_merkle_tree =
get_concurrent_merkle_tree::<StateMerkleTreeAccount, R, Poseidon, 26>(
rpc,
state_tree_bundle.accounts.merkle_tree,
)
.await;
assert_eq!(
onchain_merkle_tree.root(),
state_tree_bundle.merkle_tree.root()
);
let pre_root = onchain_merkle_tree.root();
let change_log_index = onchain_merkle_tree.changelog_index() as u64;
let mut compressed_account_to_nullify = Vec::new();
let first = nullifier_queue.first_no_seq().unwrap();
for i in 0..nullifier_queue.get_capacity() {
let bucket = nullifier_queue.get_bucket(i).unwrap();
if let Some(bucket) = bucket {
if bucket.sequence_number.is_none() {
debug!("element to nullify: {:?}", bucket.value_bytes());
let leaf_index: usize = state_tree_bundle
.merkle_tree
.get_leaf_index(&bucket.value_bytes())
.unwrap();
debug!("leaf_index: {:?}", leaf_index);
compressed_account_to_nullify.push((i, bucket.value_bytes()));
}
}
}
debug!(
"nullifying {:?} accounts ",
compressed_account_to_nullify.len()
);
for (i, (index_in_nullifier_queue, compressed_account)) in
compressed_account_to_nullify.iter().enumerate()
{
let leaf_index: usize = state_tree_bundle
.merkle_tree
.get_leaf_index(compressed_account)
.unwrap();
debug!("nullifying leaf: {:?}", leaf_index);
let proof: Vec<[u8; 32]> = state_tree_bundle
.merkle_tree
.get_proof_of_leaf(leaf_index, false)
.unwrap()
.to_array::<16>()
.unwrap()
.to_vec();
let ix = create_nullify_instruction(
CreateNullifyInstructionInputs {
authority: forester.pubkey(),
nullifier_queue: state_tree_bundle.accounts.nullifier_queue,
merkle_tree: state_tree_bundle.accounts.merkle_tree,
change_log_indices: vec![change_log_index],
leaves_queue_indices: vec![*index_in_nullifier_queue as u16],
indices: vec![leaf_index as u64],
proofs: vec![proof],
derivation: forester.pubkey(),
is_metadata_forester,
},
epoch,
);
let instructions = [ix];
let event = rpc
.create_and_send_transaction_with_event::<MerkleTreeEvent>(
&instructions,
&forester.pubkey(),
&[forester],
None,
)
.await?
.unwrap();
match event.0 {
MerkleTreeEvent::V2(event) => {
assert_eq!(event.id, state_tree_bundle.accounts.merkle_tree.to_bytes());
assert_eq!(
event.seq,
onchain_merkle_tree.sequence_number() as u64 + 1 + i as u64
);
assert_eq!(event.nullified_leaves_indices.len(), 1);
assert_eq!(event.nullified_leaves_indices[0], leaf_index as u64);
}
_ => {
panic!("Wrong event type.");
}
}
assert_value_is_marked_in_queue(
rpc,
state_tree_bundle,
index_in_nullifier_queue,
compressed_account,
)
.await;
}
let num_nullified = compressed_account_to_nullify.len() as u64;
// Locally nullify all leaves
for (_, compressed_account) in compressed_account_to_nullify.iter() {
let leaf_index = state_tree_bundle
.merkle_tree
.get_leaf_index(compressed_account)
.unwrap();
debug!("locally nullifying leaf_index {}", leaf_index);
debug!("compressed_account {:?}", compressed_account);
debug!(
"merkle tree pubkey {:?}",
state_tree_bundle.accounts.merkle_tree
);
state_tree_bundle
.merkle_tree
.update(&[0u8; 32], leaf_index)
.unwrap();
}
let onchain_merkle_tree =
get_concurrent_merkle_tree::<StateMerkleTreeAccount, R, Poseidon, 26>(
rpc,
state_tree_bundle.accounts.merkle_tree,
)
.await;
assert_eq!(
onchain_merkle_tree.root(),
state_tree_bundle.merkle_tree.root()
);
if !is_metadata_forester {
assert_forester_counter(
rpc,
&get_forester_epoch_pda_from_authority(&forester.pubkey(), epoch).0,
pre_forester_counter,
num_nullified,
)
.await
.unwrap();
}
// SAFEGUARD: check that the root changed if there was at least one element to nullify
if first.is_some() {
assert_ne!(pre_root, onchain_merkle_tree.root());
}
Ok(())
}
async fn assert_value_is_marked_in_queue<'a, R: RpcConnection>(
rpc: &mut R,
state_tree_bundle: &mut StateMerkleTreeBundle,
index_in_nullifier_queue: &usize,
compressed_account: &[u8; 32],
) {
let nullifier_queue = unsafe {
get_hash_set::<QueueAccount, R>(rpc, state_tree_bundle.accounts.nullifier_queue).await
};
let array_element = nullifier_queue
.get_bucket(*index_in_nullifier_queue)
.unwrap()
.unwrap();
assert_eq!(&array_element.value_bytes(), compressed_account);
let onchain_merkle_tree =
get_concurrent_merkle_tree::<StateMerkleTreeAccount, R, Poseidon, 26>(
rpc,
state_tree_bundle.accounts.merkle_tree,
)
.await;
assert_eq!(
array_element.sequence_number(),
Some(
onchain_merkle_tree.sequence_number()
+ onchain_merkle_tree.roots.capacity()
+ SAFETY_MARGIN as usize
)
);
}
pub async fn assert_forester_counter<R: RpcConnection>(
rpc: &mut R,
pubkey: &Pubkey,
pre: u64,
num_nullified: u64,
) -> Result<(), RpcError> {
let account = rpc
.get_anchor_account::<ForesterEpochPda>(pubkey)
.await?
.unwrap();
if account.work_counter != pre + num_nullified {
debug!("account.work_counter: {}", account.work_counter);
debug!("pre: {}", pre);
debug!("num_nullified: {}", num_nullified);
debug!("forester pubkey: {:?}", pubkey);
return Err(RpcError::CustomError(
"ForesterEpochPda counter not updated correctly".to_string(),
));
}
Ok(())
}
#[derive(Error, Debug)]
pub enum RelayerUpdateError {
#[error("Error in relayer update")]
RpcError,
}
/// Mocks the address insert logic of a forester.
/// Gets addresses from the AddressQueue and inserts them into the AddressMerkleTree.
///
/// Checks:
/// 1. Element has been marked correctly
/// 2. Merkle tree has been updated correctly
///
/// TODO: Event has been emitted, event doesn't exist yet
pub async fn empty_address_queue_test<R: RpcConnection>(
forester: &Keypair,
rpc: &mut R,
address_tree_bundle: &mut AddressMerkleTreeBundle,
signer_is_owner: bool,
epoch: u64,
is_metadata_forester: bool,
) -> Result<(), RelayerUpdateError> {
let address_merkle_tree_pubkey = address_tree_bundle.accounts.merkle_tree;
let address_queue_pubkey = address_tree_bundle.accounts.queue;
let initial_merkle_tree_state = address_tree_bundle.merkle_tree.clone();
let initial_indexed_array_state = address_tree_bundle.indexed_array.clone();
let relayer_merkle_tree = &mut address_tree_bundle.merkle_tree;
let relayer_indexing_array = &mut address_tree_bundle.indexed_array;
let mut update_errors: Vec<RpcError> = Vec::new();
let address_merkle_tree =
get_indexed_merkle_tree::<AddressMerkleTreeAccount, R, Poseidon, usize, 26, 16>(
rpc,
address_merkle_tree_pubkey,
)
.await;
let indexed_changelog_index = address_merkle_tree.indexed_changelog_index() as u16;
let changelog_index = address_merkle_tree.changelog_index() as u16;
let mut counter = 0;
loop {
let pre_forester_counter = if !signer_is_owner {
rpc.get_anchor_account::<ForesterEpochPda>(
&get_forester_epoch_pda_from_authority(&forester.pubkey(), epoch).0,
)
.await
.map_err(|e| RelayerUpdateError::RpcError)?
.unwrap()
.work_counter
} else {
0
};
let address_merkle_tree =
get_indexed_merkle_tree::<AddressMerkleTreeAccount, R, Poseidon, usize, 26, 16>(
rpc,
address_merkle_tree_pubkey,
)
.await;
assert_eq!(relayer_merkle_tree.root(), address_merkle_tree.root());
let address_queue =
unsafe { get_hash_set::<QueueAccount, R>(rpc, address_queue_pubkey).await };
let address = address_queue.first_no_seq().unwrap();
if address.is_none() {
break;
}
let (address, address_hashset_index) = address.unwrap();
// Create new element from the dequeued value.
let (old_low_address, old_low_address_next_value) = initial_indexed_array_state
.find_low_element_for_nonexistent(&address.value_biguint())
.unwrap();
let address_bundle = initial_indexed_array_state
.new_element_with_low_element_index(old_low_address.index, &address.value_biguint())
.unwrap();
// Get the Merkle proof for updating low element.
let low_address_proof = initial_merkle_tree_state
.get_proof_of_leaf(old_low_address.index, false)
.unwrap();
let old_sequence_number = address_merkle_tree.sequence_number();
let old_root = address_merkle_tree.root();
// Update on-chain tree.
let update_successful = match update_merkle_tree(
rpc,
forester,
address_queue_pubkey,
address_merkle_tree_pubkey,
address_hashset_index,
old_low_address.index as u64,
bigint_to_be_bytes_array(&old_low_address.value).unwrap(),
old_low_address.next_index as u64,
bigint_to_be_bytes_array(&old_low_address_next_value).unwrap(),
low_address_proof.to_array().unwrap(),
Some(changelog_index),
Some(indexed_changelog_index),
signer_is_owner,
epoch,
is_metadata_forester,
)
.await
{
Ok(event) => {
let event = event.unwrap();
match event.0 {
MerkleTreeEvent::V3(event) => {
// Only assert for the first update since the other updates might be patched
// the asserts are likely to fail
if counter == 0 {
assert_eq!(event.id, address_merkle_tree_pubkey.to_bytes());
assert_eq!(event.seq, old_sequence_number as u64 + 1);
assert_eq!(event.updates.len(), 1);
let event = &event.updates[0];
assert_eq!(
event.new_low_element.index, address_bundle.new_low_element.index,
"Empty Address Queue Test: invalid new_low_element.index"
);
assert_eq!(
event.new_low_element.next_index,
address_bundle.new_low_element.next_index,
"Empty Address Queue Test: invalid new_low_element.next_index"
);
assert_eq!(
event.new_low_element.value,
bigint_to_be_bytes_array::<32>(
&address_bundle.new_low_element.value
)
.unwrap(),
"Empty Address Queue Test: invalid new_low_element.value"
);
assert_eq!(
event.new_low_element.next_value,
bigint_to_be_bytes_array::<32>(&address_bundle.new_element.value)
.unwrap(),
"Empty Address Queue Test: invalid new_low_element.next_value"
);
let leaf_hash = address_bundle
.new_low_element
.hash::<Poseidon>(&address_bundle.new_element.value)
.unwrap();
assert_eq!(
event.new_low_element_hash, leaf_hash,
"Empty Address Queue Test: invalid new_low_element_hash"
);
let leaf_hash = address_bundle
.new_element
.hash::<Poseidon>(&address_bundle.new_element_next_value)
.unwrap();
assert_eq!(
event.new_high_element_hash, leaf_hash,
"Empty Address Queue Test: invalid new_high_element_hash"
);
assert_eq!(
event.new_high_element.index, address_bundle.new_element.index,
"Empty Address Queue Test: invalid new_high_element.index"
);
assert_eq!(
event.new_high_element.next_index,
address_bundle.new_element.next_index,
"Empty Address Queue Test: invalid new_high_element.next_index"
);
assert_eq!(
event.new_high_element.value,
bigint_to_be_bytes_array::<32>(&address_bundle.new_element.value)
.unwrap(),
"Empty Address Queue Test: invalid new_high_element.value"
);
assert_eq!(
event.new_high_element.next_value,
bigint_to_be_bytes_array::<32>(
&address_bundle.new_element_next_value
)
.unwrap(),
"Empty Address Queue Test: invalid new_high_element.next_value"
);
}
}
_ => {
panic!("Wrong event type.");
}
}
counter += 1;
true
}
Err(e) => {
update_errors.push(e);
break;
}
};
if update_successful {
if !signer_is_owner {
assert_forester_counter(
rpc,
&get_forester_epoch_pda_from_authority(&forester.pubkey(), epoch).0,
pre_forester_counter,
1,
)
.await
.unwrap();
}
let merkle_tree =
get_indexed_merkle_tree::<AddressMerkleTreeAccount, R, Poseidon, usize, 26, 16>(
rpc,
address_merkle_tree_pubkey,
)
.await;
let (old_low_address, _) = relayer_indexing_array
.find_low_element_for_nonexistent(&address.value_biguint())
.unwrap();
let address_bundle = relayer_indexing_array
.new_element_with_low_element_index(old_low_address.index, &address.value_biguint())
.unwrap();
let address_queue =
unsafe { get_hash_set::<QueueAccount, R>(rpc, address_queue_pubkey).await };
assert_eq!(
address_queue
.get_bucket(address_hashset_index as usize)
.unwrap()
.unwrap()
.sequence_number()
.unwrap(),
old_sequence_number + address_queue.sequence_threshold + 2 // We are doing two Merkle tree operations
);
relayer_merkle_tree
.update(
&address_bundle.new_low_element,
&address_bundle.new_element,
&address_bundle.new_element_next_value,
)
.unwrap();
relayer_indexing_array
.append_with_low_element_index(
address_bundle.new_low_element.index,
&address_bundle.new_element.value,
)
.unwrap();
assert_eq!(merkle_tree.sequence_number(), old_sequence_number + 2);
assert_ne!(old_root, merkle_tree.root(), "Root did not change.");
assert_eq!(
relayer_merkle_tree.root(),
merkle_tree.root(),
"Root off-chain onchain inconsistent."
);
let changelog_entry = merkle_tree
.changelog
.get(merkle_tree.changelog_index())
.unwrap();
let path = relayer_merkle_tree
.get_path_of_leaf(merkle_tree.current_index(), true)
.unwrap();
for i in 0..ADDRESS_MERKLE_TREE_HEIGHT as usize {
let changelog_node = changelog_entry.path[i].unwrap();
let path_node = path[i];
assert_eq!(changelog_node, path_node);
}
let indexed_changelog_entry = merkle_tree
.indexed_changelog
.get(merkle_tree.indexed_changelog_index())
.unwrap();
let proof = relayer_merkle_tree
.get_proof_of_leaf(merkle_tree.current_index(), false)
.unwrap();
assert_eq!(
address_bundle.new_element,
indexed_changelog_entry.element.into(),
);
assert_eq!(indexed_changelog_entry.proof.as_slice(), proof.as_slice());
assert_eq!(
indexed_changelog_entry.changelog_index,
merkle_tree.changelog_index()
);
}
}
if update_errors.is_empty() {
Ok(())
} else {
panic!("Errors: {:?}", update_errors);
}
}
#[allow(clippy::too_many_arguments)]
pub async fn update_merkle_tree<R: RpcConnection>(
rpc: &mut R,
forester: &Keypair,
address_queue_pubkey: Pubkey,
address_merkle_tree_pubkey: Pubkey,
value: u16,
low_address_index: u64,
low_address_value: [u8; 32],
low_address_next_index: u64,
low_address_next_value: [u8; 32],
low_address_proof: [[u8; 32]; 16],
changelog_index: Option<u16>,
indexed_changelog_index: Option<u16>,
signer_is_owner: bool,
epoch: u64,
is_metadata_forester: bool,
) -> Result<Option<(MerkleTreeEvent, Signature, u64)>, RpcError> {
let changelog_index = match changelog_index {
Some(changelog_index) => changelog_index,
None => {
let address_merkle_tree =
get_indexed_merkle_tree::<AddressMerkleTreeAccount, R, Poseidon, usize, 26, 16>(
rpc,
address_merkle_tree_pubkey,
)
.await;
address_merkle_tree.changelog_index() as u16
}
};
let indexed_changelog_index = match indexed_changelog_index {
Some(indexed_changelog_index) => indexed_changelog_index,
None => {
let address_merkle_tree =
get_indexed_merkle_tree::<AddressMerkleTreeAccount, R, Poseidon, usize, 26, 16>(
rpc,
address_merkle_tree_pubkey,
)
.await;
address_merkle_tree.indexed_changelog_index() as u16
}
};
let update_ix = if !signer_is_owner {
create_update_address_merkle_tree_instruction(
UpdateAddressMerkleTreeInstructionInputs {
authority: forester.pubkey(),
derivation: forester.pubkey(),
address_merkle_tree: address_merkle_tree_pubkey,
address_queue: address_queue_pubkey,
changelog_index,
indexed_changelog_index,
value,
low_address_index,
low_address_value,
low_address_next_index,
low_address_next_value,
low_address_proof,
is_metadata_forester,
},
epoch,
)
} else {
let instruction_data = UpdateAddressMerkleTree {
changelog_index,
indexed_changelog_index,
value,
low_address_index,
low_address_value,
low_address_next_index,
low_address_next_value,
low_address_proof,
};
Instruction {
program_id: ID,
accounts: vec![
AccountMeta::new(forester.pubkey(), true),
AccountMeta::new(ID, false),
AccountMeta::new(address_queue_pubkey, false),
AccountMeta::new(address_merkle_tree_pubkey, false),
AccountMeta::new(NOOP_PROGRAM_ID, false),
],
data: instruction_data.data(),
}
};
rpc.create_and_send_transaction_with_event::<MerkleTreeEvent>(
&[update_ix],
&forester.pubkey(),
&[forester],
None,
)
.await
}
pub async fn insert_addresses<R: RpcConnection>(
context: &mut R,
address_queue_pubkey: Pubkey,
address_merkle_tree_pubkey: Pubkey,
addresses: Vec<[u8; 32]>,
) -> Result<Signature, RpcError> {
let num_addresses = addresses.len();
let instruction_data = InsertAddresses { addresses };
let accounts = account_compression::accounts::InsertIntoQueues {
fee_payer: context.get_payer().pubkey(),
authority: context.get_payer().pubkey(),
registered_program_pda: None,
system_program: system_program::ID,
};
let insert_ix = Instruction {
program_id: ID,
accounts: [
accounts.to_account_metas(Some(true)),
vec![
vec![
AccountMeta::new(address_queue_pubkey, false),
AccountMeta::new(address_merkle_tree_pubkey, false)
];
num_addresses
]
.iter()
.flat_map(|x| x.to_vec())
.collect::<Vec<AccountMeta>>(),
]
.concat(),
data: instruction_data.data(),
};
let latest_blockhash = context.get_latest_blockhash().await.unwrap();
let transaction = Transaction::new_signed_with_payer(
&[insert_ix],
Some(&context.get_payer().pubkey()),
&[&context.get_payer()],
latest_blockhash,
);
context.process_transaction(transaction).await
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-utils
|
solana_public_repos/Lightprotocol/light-protocol/test-utils/src/lib.rs
|
use account_compression::initialize_address_merkle_tree::Pubkey;
use account_compression::{
AddressMerkleTreeConfig, AddressQueueConfig, QueueType, RegisteredProgram,
};
use solana_sdk::signature::{Keypair, Signature, Signer};
use solana_sdk::{instruction::InstructionError, transaction};
use std::cmp;
pub mod address_tree_rollover;
pub mod assert_address_merkle_tree;
pub mod assert_compressed_tx;
pub mod assert_epoch;
pub mod assert_merkle_tree;
pub mod assert_queue;
pub mod assert_rollover;
pub mod assert_token_tx;
pub mod e2e_test_env;
#[allow(unused)]
pub mod indexer;
pub mod spl;
pub mod state_tree_rollover;
pub mod system_program;
#[allow(unused)]
pub mod test_forester;
use crate::assert_address_merkle_tree::assert_address_merkle_tree_initialized;
use crate::assert_queue::assert_address_queue_initialized;
pub use forester_utils::{
airdrop_lamports, create_account_instruction,
forester_epoch::{Epoch, TreeAccounts, TreeType},
get_concurrent_merkle_tree, get_hash_set, get_indexed_merkle_tree,
indexer::{AddressMerkleTreeAccounts, AddressMerkleTreeBundle, Indexer, TokenDataWithContext},
registry::{
create_rollover_address_merkle_tree_instructions,
create_rollover_state_merkle_tree_instructions, register_test_forester,
update_test_forester,
},
AccountZeroCopy,
};
pub use light_client::{
rpc::{
assert_rpc_error, solana_rpc::SolanaRpcUrl, RpcConnection, RpcError, SolanaRpcConnection,
},
transaction_params::{FeeConfig, TransactionParams},
};
use light_hasher::Poseidon;
use light_program_test::test_env::create_address_merkle_tree_and_queue_account;
use light_registry::account_compression_cpi::sdk::get_registered_program_pda;
#[allow(clippy::too_many_arguments)]
#[inline(never)]
pub async fn create_address_merkle_tree_and_queue_account_with_assert<R: RpcConnection>(
payer: &Keypair,
registry: bool,
context: &mut R,
address_merkle_tree_keypair: &Keypair,
address_queue_keypair: &Keypair,
program_owner: Option<Pubkey>,
forester: Option<Pubkey>,
merkle_tree_config: &AddressMerkleTreeConfig,
queue_config: &AddressQueueConfig,
index: u64,
) -> Result<Signature, RpcError> {
let result = create_address_merkle_tree_and_queue_account(
payer,
registry,
context,
address_merkle_tree_keypair,
address_queue_keypair,
program_owner,
forester,
merkle_tree_config,
queue_config,
index,
)
.await;
// To initialize the indexed tree we do 4 operations:
// 1. insert 0 append 0 and update 0
// 2. insert 1 append BN254_FIELD_SIZE -1 and update 0
// we appended two values this the expected next index is 2;
// The right most leaf is the hash of the indexed array element with value FIELD_SIZE - 1
// index 1, next_index: 0
let expected_change_log_length = cmp::min(4, merkle_tree_config.changelog_size as usize);
let expected_roots_length = cmp::min(4, merkle_tree_config.roots_size as usize);
let expected_next_index = 2;
let expected_indexed_change_log_length =
cmp::min(4, merkle_tree_config.address_changelog_size as usize);
let mut reference_tree =
light_indexed_merkle_tree::reference::IndexedMerkleTree::<Poseidon, usize>::new(
account_compression::utils::constants::ADDRESS_MERKLE_TREE_HEIGHT as usize,
account_compression::utils::constants::ADDRESS_MERKLE_TREE_CANOPY_DEPTH as usize,
)
.unwrap();
reference_tree.init().unwrap();
let expected_right_most_leaf = reference_tree
.merkle_tree
.get_leaf(reference_tree.merkle_tree.rightmost_index - 1);
let _expected_right_most_leaf = [
30, 164, 22, 238, 180, 2, 24, 181, 64, 193, 207, 184, 219, 233, 31, 109, 84, 232, 162, 158,
220, 48, 163, 158, 50, 107, 64, 87, 167, 217, 99, 245,
];
assert_eq!(expected_right_most_leaf, _expected_right_most_leaf);
let owner = if registry {
let registered_program = get_registered_program_pda(&light_registry::ID);
let registered_program_account = context
.get_anchor_account::<RegisteredProgram>(®istered_program)
.await
.unwrap()
.unwrap();
registered_program_account.group_authority_pda
} else {
payer.pubkey()
};
assert_address_merkle_tree_initialized(
context,
&address_merkle_tree_keypair.pubkey(),
&address_queue_keypair.pubkey(),
merkle_tree_config,
index,
program_owner,
forester,
expected_change_log_length,
expected_roots_length,
expected_next_index,
&expected_right_most_leaf,
&owner,
expected_indexed_change_log_length,
)
.await;
assert_address_queue_initialized(
context,
&address_queue_keypair.pubkey(),
queue_config,
&address_merkle_tree_keypair.pubkey(),
merkle_tree_config,
QueueType::AddressQueue,
index,
program_owner,
forester,
&owner,
)
.await;
result
}
/// Asserts that the given `BanksTransactionResultWithMetadata` is an error with a custom error code
/// or a program error.
/// Unfortunately BanksTransactionResultWithMetadata does not reliably expose the custom error code, so
/// we allow program error as well.
// TODO: unify with assert_rpc_error
pub fn assert_custom_error_or_program_error(
result: Result<solana_sdk::signature::Signature, RpcError>,
error_code: u32,
) -> Result<(), RpcError> {
let accepted_errors = [
(0, InstructionError::ProgramFailedToComplete),
(0, InstructionError::Custom(error_code)),
];
let is_accepted = accepted_errors.iter().any(|(index, error)| {
matches!(result, Err(RpcError::TransactionError(transaction::TransactionError::InstructionError(i, ref e))) if i == (*index as u8) && e == error)
});
if !is_accepted {
println!("result {:?}", result);
println!("error_code {:?}", error_code);
return Err(RpcError::AssertRpcError(format!(
"Expected error code {} or program error, got {:?}",
error_code, result
)));
}
Ok(())
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-utils
|
solana_public_repos/Lightprotocol/light-protocol/test-utils/src/assert_address_merkle_tree.rs
|
use forester_utils::{get_indexed_merkle_tree, AccountZeroCopy};
use light_client::rpc::RpcConnection;
use light_hasher::Poseidon;
use solana_sdk::pubkey::Pubkey;
#[allow(clippy::too_many_arguments)]
pub async fn assert_address_merkle_tree_initialized<R: RpcConnection>(
rpc: &mut R,
merkle_tree_pubkey: &Pubkey,
queue_pubkey: &Pubkey,
merkle_tree_config: &account_compression::AddressMerkleTreeConfig,
index: u64,
program_owner: Option<Pubkey>,
forester: Option<Pubkey>,
expected_changelog_length: usize,
expected_roots_length: usize,
expected_next_index: usize,
expected_rightmost_leaf: &[u8; 32],
owner_pubkey: &Pubkey,
expected_indexed_changelog_length: usize,
) {
let merkle_tree = AccountZeroCopy::<account_compression::AddressMerkleTreeAccount>::new(
rpc,
*merkle_tree_pubkey,
)
.await;
let merkle_tree_account = merkle_tree.deserialized();
assert_eq!(
merkle_tree_account
.metadata
.rollover_metadata
.rollover_threshold,
merkle_tree_config.rollover_threshold.unwrap_or_default()
);
assert_eq!(
merkle_tree_account.metadata.rollover_metadata.network_fee,
merkle_tree_config.network_fee.unwrap_or_default()
);
// The address Merkle tree is never directly called by the user.
// The whole rollover fees are collected by the address queue.
let expected_rollover_fee = 0;
assert_eq!(
merkle_tree_account.metadata.rollover_metadata.rollover_fee,
expected_rollover_fee
);
assert_eq!(merkle_tree_account.metadata.rollover_metadata.index, index);
assert_eq!(
merkle_tree_account
.metadata
.rollover_metadata
.rolledover_slot,
u64::MAX
);
assert_eq!(
merkle_tree_account
.metadata
.rollover_metadata
.close_threshold,
merkle_tree_config.close_threshold.unwrap_or(u64::MAX)
);
assert_eq!(
merkle_tree_account.metadata.next_merkle_tree,
Pubkey::default()
);
let expected_access_meta_data = account_compression::AccessMetadata {
owner: *owner_pubkey,
program_owner: program_owner.unwrap_or_default(),
forester: forester.unwrap_or_default(),
};
assert_eq!(
merkle_tree_account.metadata.access_metadata,
expected_access_meta_data
);
assert_eq!(merkle_tree_account.metadata.associated_queue, *queue_pubkey);
let merkle_tree = get_indexed_merkle_tree::<
account_compression::AddressMerkleTreeAccount,
R,
Poseidon,
usize,
26,
16,
>(rpc, *merkle_tree_pubkey)
.await;
assert_eq!(merkle_tree.height, merkle_tree_config.height as usize);
assert_eq!(
merkle_tree.merkle_tree.changelog.capacity(),
merkle_tree_config.changelog_size as usize
);
assert_eq!(
merkle_tree.merkle_tree.changelog.len(),
expected_changelog_length
);
assert_eq!(
merkle_tree.merkle_tree.changelog_index(),
expected_changelog_length.saturating_sub(1)
);
assert_eq!(
merkle_tree.roots.capacity(),
merkle_tree_config.roots_size as usize
);
assert_eq!(merkle_tree.roots.len(), expected_roots_length);
assert_eq!(
merkle_tree.root_index(),
expected_roots_length.saturating_sub(1)
);
assert_eq!(
merkle_tree.canopy_depth,
merkle_tree_config.canopy_depth as usize
);
assert_eq!(merkle_tree.next_index(), expected_next_index);
assert_eq!(
merkle_tree.sequence_number() % merkle_tree_config.roots_size as usize,
expected_roots_length.saturating_sub(1)
);
assert_eq!(&merkle_tree.rightmost_leaf(), expected_rightmost_leaf);
// TODO: complete asserts
assert_eq!(
merkle_tree.indexed_changelog_index(),
expected_indexed_changelog_length.saturating_sub(1)
);
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-utils
|
solana_public_repos/Lightprotocol/light-protocol/test-utils/src/assert_merkle_tree.rs
|
use account_compression::StateMerkleTreeAccount;
use forester_utils::{get_concurrent_merkle_tree, AccountZeroCopy};
use light_client::rpc::RpcConnection;
use light_hasher::Poseidon;
use light_utils::fee::compute_rollover_fee;
use solana_sdk::pubkey::Pubkey;
#[allow(clippy::too_many_arguments)]
pub async fn assert_merkle_tree_initialized<R: RpcConnection>(
rpc: &mut R,
merkle_tree_pubkey: &Pubkey,
queue_pubkey: &Pubkey,
height: usize,
changelog_capacity: usize,
roots_capacity: usize,
canopy_depth: usize,
expected_changelog_length: usize,
expected_roots_length: usize,
expected_next_index: usize,
expected_rightmost_leaf: &[u8; 32],
rollover_threshold: Option<u64>,
close_threshold: Option<u64>,
network_fee: u64,
payer_pubkey: &Pubkey,
) {
let merkle_tree_account = AccountZeroCopy::<account_compression::StateMerkleTreeAccount>::new(
rpc,
*merkle_tree_pubkey,
)
.await;
let merkle_tree_account = merkle_tree_account.deserialized();
let balance_merkle_tree = rpc
.get_account(*merkle_tree_pubkey)
.await
.unwrap()
.unwrap()
.lamports;
let balance_nullifier_queue = rpc
.get_account(*queue_pubkey)
.await
.unwrap()
.unwrap()
.lamports;
assert_eq!(
merkle_tree_account
.metadata
.rollover_metadata
.rollover_threshold,
rollover_threshold.unwrap_or_default()
);
assert_eq!(
merkle_tree_account.metadata.rollover_metadata.network_fee,
network_fee
);
let expected_rollover_fee = match rollover_threshold {
Some(rollover_threshold) => {
compute_rollover_fee(rollover_threshold, height as u32, balance_merkle_tree).unwrap()
+ compute_rollover_fee(rollover_threshold, height as u32, balance_nullifier_queue)
.unwrap()
}
None => 0,
};
assert_eq!(
merkle_tree_account.metadata.rollover_metadata.rollover_fee,
expected_rollover_fee
);
assert_eq!(merkle_tree_account.metadata.rollover_metadata.index, 1);
assert_eq!(
merkle_tree_account
.metadata
.rollover_metadata
.rolledover_slot,
u64::MAX
);
assert_eq!(
merkle_tree_account
.metadata
.rollover_metadata
.close_threshold,
close_threshold.unwrap_or(u64::MAX)
);
assert_eq!(
merkle_tree_account.metadata.next_merkle_tree,
Pubkey::default()
);
assert_eq!(
merkle_tree_account.metadata.access_metadata.owner,
*payer_pubkey
);
assert_eq!(
merkle_tree_account.metadata.access_metadata.program_owner,
Pubkey::default()
);
assert_eq!(merkle_tree_account.metadata.associated_queue, *queue_pubkey);
let merkle_tree = get_concurrent_merkle_tree::<StateMerkleTreeAccount, R, Poseidon, 26>(
rpc,
*merkle_tree_pubkey,
)
.await;
assert_eq!(merkle_tree.height, height);
assert_eq!(merkle_tree.changelog.capacity(), changelog_capacity);
assert_eq!(merkle_tree.changelog.len(), expected_changelog_length);
assert_eq!(
merkle_tree.changelog_index(),
expected_changelog_length.saturating_sub(1)
);
assert_eq!(merkle_tree.roots.capacity(), roots_capacity);
assert_eq!(merkle_tree.roots.len(), expected_roots_length);
assert_eq!(
merkle_tree.root_index(),
expected_roots_length.saturating_sub(1)
);
assert_eq!(merkle_tree.canopy_depth, canopy_depth);
assert_eq!(merkle_tree.next_index(), expected_next_index);
assert_eq!(
merkle_tree.sequence_number(),
expected_roots_length.saturating_sub(1)
);
assert_eq!(&merkle_tree.rightmost_leaf(), expected_rightmost_leaf);
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-utils
|
solana_public_repos/Lightprotocol/light-protocol/test-utils/src/address_tree_rollover.rs
|
#![allow(clippy::await_holding_refcell_ref)]
use anchor_lang::{InstructionData, Key, Lamports, ToAccountInfo, ToAccountMetas};
use solana_sdk::clock::Slot;
use solana_sdk::{
account::{AccountSharedData, WritableAccount},
account_info::AccountInfo,
instruction::Instruction,
pubkey::Pubkey,
signature::Keypair,
signer::Signer,
transaction::Transaction,
};
use crate::assert_rollover::{
assert_rolledover_merkle_trees, assert_rolledover_merkle_trees_metadata,
assert_rolledover_queues_metadata,
};
use account_compression::{
accounts, initialize_address_merkle_tree::AccountLoader, instruction, state::QueueAccount,
AddressMerkleTreeAccount,
};
use account_compression::{AddressMerkleTreeConfig, AddressQueueConfig};
use forester_utils::registry::{
create_rollover_address_merkle_tree_instructions,
create_rollover_state_merkle_tree_instructions,
};
use forester_utils::{create_account_instruction, get_hash_set, get_indexed_merkle_tree};
use light_client::rpc::{RpcConnection, RpcError};
use light_hasher::Poseidon;
use light_indexed_merkle_tree::zero_copy::IndexedMerkleTreeZeroCopyMut;
pub async fn set_address_merkle_tree_next_index<R: RpcConnection>(
rpc: &mut R,
merkle_tree_pubkey: &Pubkey,
next_index: u64,
lamports: u64,
) {
let mut merkle_tree = rpc.get_account(*merkle_tree_pubkey).await.unwrap().unwrap();
let merkle_tree_deserialized =
&mut IndexedMerkleTreeZeroCopyMut::<Poseidon, usize, 26, 16>::from_bytes_zero_copy_mut(
&mut merkle_tree.data[8 + std::mem::size_of::<AddressMerkleTreeAccount>()..],
)
.unwrap();
unsafe {
*merkle_tree_deserialized.next_index = next_index as usize;
}
let mut account_share_data = AccountSharedData::from(merkle_tree);
account_share_data.set_lamports(lamports);
rpc.set_account(merkle_tree_pubkey, &account_share_data);
let mut merkle_tree = rpc.get_account(*merkle_tree_pubkey).await.unwrap().unwrap();
let merkle_tree_deserialized =
IndexedMerkleTreeZeroCopyMut::<Poseidon, usize, 26, 16>::from_bytes_zero_copy_mut(
&mut merkle_tree.data[8 + std::mem::size_of::<AddressMerkleTreeAccount>()..],
)
.unwrap();
assert_eq!(merkle_tree_deserialized.next_index() as u64, next_index);
}
pub async fn perform_address_merkle_tree_roll_over<R: RpcConnection>(
context: &mut R,
new_queue_keypair: &Keypair,
new_address_merkle_tree_keypair: &Keypair,
old_merkle_tree_pubkey: &Pubkey,
old_queue_pubkey: &Pubkey,
merkle_tree_config: &AddressMerkleTreeConfig,
queue_config: &AddressQueueConfig,
) -> Result<solana_sdk::signature::Signature, RpcError> {
let payer = context.get_payer().insecure_clone();
let size = QueueAccount::size(queue_config.capacity as usize).unwrap();
let account_create_ix = create_account_instruction(
&payer.pubkey(),
size,
context
.get_minimum_balance_for_rent_exemption(size)
.await
.unwrap(),
&account_compression::ID,
Some(new_queue_keypair),
);
let size = AddressMerkleTreeAccount::size(
merkle_tree_config.height as usize,
merkle_tree_config.changelog_size as usize,
merkle_tree_config.roots_size as usize,
merkle_tree_config.canopy_depth as usize,
merkle_tree_config.address_changelog_size as usize,
);
let mt_account_create_ix = create_account_instruction(
&payer.pubkey(),
size,
context
.get_minimum_balance_for_rent_exemption(size)
.await
.unwrap(),
&account_compression::ID,
Some(new_address_merkle_tree_keypair),
);
let instruction_data = instruction::RolloverAddressMerkleTreeAndQueue {};
let accounts = accounts::RolloverAddressMerkleTreeAndQueue {
fee_payer: context.get_payer().pubkey(),
authority: context.get_payer().pubkey(),
registered_program_pda: None,
new_address_merkle_tree: new_address_merkle_tree_keypair.pubkey(),
new_queue: new_queue_keypair.pubkey(),
old_address_merkle_tree: *old_merkle_tree_pubkey,
old_queue: *old_queue_pubkey,
};
let instruction = Instruction {
program_id: account_compression::ID,
accounts: [accounts.to_account_metas(Some(true))].concat(),
data: instruction_data.data(),
};
let blockhash = context.get_latest_blockhash().await.unwrap();
let transaction = Transaction::new_signed_with_payer(
&[account_create_ix, mt_account_create_ix, instruction],
Some(&context.get_payer().pubkey()),
&vec![
&context.get_payer(),
&new_queue_keypair,
&new_address_merkle_tree_keypair,
],
blockhash,
);
context.process_transaction(transaction).await
}
pub async fn assert_rolled_over_address_merkle_tree_and_queue<R: RpcConnection>(
payer: &Pubkey,
rpc: &mut R,
fee_payer_prior_balance: &u64,
old_merkle_tree_pubkey: &Pubkey,
old_queue_pubkey: &Pubkey,
new_merkle_tree_pubkey: &Pubkey,
new_queue_pubkey: &Pubkey,
) {
let current_slot = rpc.get_slot().await.unwrap();
let mut new_mt_account = rpc
.get_account(*new_merkle_tree_pubkey)
.await
.unwrap()
.unwrap();
let mut new_mt_lamports = 0u64;
let account_info = AccountInfo::new(
new_merkle_tree_pubkey,
false,
false,
&mut new_mt_lamports,
&mut new_mt_account.data,
&account_compression::ID,
false,
0u64,
);
let new_mt_account =
AccountLoader::<AddressMerkleTreeAccount>::try_from(&account_info).unwrap();
let new_loaded_mt_account = new_mt_account.load().unwrap();
let mut old_mt_account = rpc
.get_account(*old_merkle_tree_pubkey)
.await
.unwrap()
.unwrap();
let mut old_mt_lamports = 0u64;
let account_info = AccountInfo::new(
old_merkle_tree_pubkey,
false,
false,
&mut old_mt_lamports,
&mut old_mt_account.data,
&account_compression::ID,
false,
0u64,
);
let old_mt_account =
AccountLoader::<AddressMerkleTreeAccount>::try_from(&account_info).unwrap();
let old_loaded_mt_account = old_mt_account.load().unwrap();
assert_eq!(
new_mt_account.to_account_info().data.borrow().len(),
old_mt_account.to_account_info().data.borrow().len()
);
assert_rolledover_merkle_trees_metadata(
&old_loaded_mt_account.metadata,
&new_loaded_mt_account.metadata,
current_slot,
new_queue_pubkey,
);
drop(new_loaded_mt_account);
drop(old_loaded_mt_account);
let struct_old =
get_indexed_merkle_tree::<AddressMerkleTreeAccount, R, Poseidon, usize, 26, 16>(
rpc,
old_mt_account.key(),
)
.await;
let struct_new =
get_indexed_merkle_tree::<AddressMerkleTreeAccount, R, Poseidon, usize, 26, 16>(
rpc,
new_mt_account.key(),
)
.await;
assert_rolledover_merkle_trees(&struct_old.merkle_tree, &struct_new.merkle_tree);
assert_eq!(
struct_old.merkle_tree.changelog.capacity(),
struct_new.merkle_tree.changelog.capacity()
);
{
let mut new_queue_account = rpc.get_account(*new_queue_pubkey).await.unwrap().unwrap();
let mut new_mt_lamports = 0u64;
let account_info = AccountInfo::new(
new_queue_pubkey,
false,
false,
&mut new_mt_lamports,
&mut new_queue_account.data,
&account_compression::ID,
false,
0u64,
);
let new_queue_account = AccountLoader::<QueueAccount>::try_from(&account_info).unwrap();
let new_loaded_queue_account = new_queue_account.load().unwrap();
let mut old_queue_account = rpc.get_account(*old_queue_pubkey).await.unwrap().unwrap();
let mut old_mt_lamports = 0u64;
let account_info = AccountInfo::new(
old_queue_pubkey,
false,
false,
&mut old_mt_lamports,
&mut old_queue_account.data,
&account_compression::ID,
false,
0u64,
);
let old_queue_account = AccountLoader::<QueueAccount>::try_from(&account_info).unwrap();
let old_loaded_queue_account = old_queue_account.load().unwrap();
assert_eq!(
old_queue_account.to_account_info().data.borrow().len(),
new_queue_account.to_account_info().data.borrow().len(),
);
assert_rolledover_queues_metadata(
&old_loaded_queue_account.metadata,
&new_loaded_queue_account.metadata,
current_slot,
new_merkle_tree_pubkey,
new_queue_pubkey,
old_mt_account.get_lamports(),
new_mt_account.get_lamports(),
new_queue_account.get_lamports(),
);
}
let fee_payer_post_balance = rpc.get_account(*payer).await.unwrap().unwrap().lamports;
// rent is reimbursed, 3 signatures cost 3 x 5000 lamports
assert_eq!(*fee_payer_prior_balance, fee_payer_post_balance + 15000);
{
let old_address_queue =
unsafe { get_hash_set::<QueueAccount, R>(rpc, *old_queue_pubkey).await };
let new_address_queue =
unsafe { get_hash_set::<QueueAccount, R>(rpc, *new_queue_pubkey).await };
assert_eq!(
old_address_queue.get_capacity(),
new_address_queue.get_capacity()
);
assert_eq!(
old_address_queue.sequence_threshold,
new_address_queue.sequence_threshold,
);
}
}
#[allow(clippy::too_many_arguments)]
pub async fn perform_address_merkle_tree_roll_over_forester<R: RpcConnection>(
payer: &Keypair,
context: &mut R,
new_queue_keypair: &Keypair,
new_address_merkle_tree_keypair: &Keypair,
old_merkle_tree_pubkey: &Pubkey,
old_queue_pubkey: &Pubkey,
epoch: u64,
is_metadata_forester: bool,
) -> Result<solana_sdk::signature::Signature, RpcError> {
let instructions = create_rollover_address_merkle_tree_instructions(
context,
&payer.pubkey(),
&payer.pubkey(),
new_queue_keypair,
new_address_merkle_tree_keypair,
old_merkle_tree_pubkey,
old_queue_pubkey,
epoch,
is_metadata_forester,
)
.await;
let blockhash = context.get_latest_blockhash().await.unwrap();
let transaction = Transaction::new_signed_with_payer(
&instructions,
Some(&payer.pubkey()),
&vec![&payer, &new_queue_keypair, &new_address_merkle_tree_keypair],
blockhash,
);
context.process_transaction(transaction).await
}
#[allow(clippy::too_many_arguments)]
pub async fn perform_state_merkle_tree_roll_over_forester<R: RpcConnection>(
payer: &Keypair,
context: &mut R,
new_queue_keypair: &Keypair,
new_address_merkle_tree_keypair: &Keypair,
new_cpi_signature_keypair: &Keypair,
old_merkle_tree_pubkey: &Pubkey,
old_queue_pubkey: &Pubkey,
epoch: u64,
is_metadata_forester: bool,
) -> Result<(solana_sdk::signature::Signature, Slot), RpcError> {
let instructions = create_rollover_state_merkle_tree_instructions(
context,
&payer.pubkey(),
&payer.pubkey(),
new_queue_keypair,
new_address_merkle_tree_keypair,
new_cpi_signature_keypair,
old_merkle_tree_pubkey,
old_queue_pubkey,
epoch,
is_metadata_forester,
)
.await;
let blockhash = context.get_latest_blockhash().await.unwrap();
let transaction = Transaction::new_signed_with_payer(
&instructions,
Some(&payer.pubkey()),
&vec![
&payer,
&new_queue_keypair,
&new_address_merkle_tree_keypair,
&new_cpi_signature_keypair,
],
blockhash,
);
context.process_transaction_with_context(transaction).await
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-utils
|
solana_public_repos/Lightprotocol/light-protocol/test-utils/src/assert_rollover.rs
|
use account_compression::{MerkleTreeMetadata, QueueMetadata};
use anchor_lang::prelude::Pubkey;
use light_concurrent_merkle_tree::ConcurrentMerkleTree;
use light_hasher::Hasher;
pub fn assert_rolledover_merkle_trees<H, const HEIGHT: usize>(
old_merkle_tree: &ConcurrentMerkleTree<H, HEIGHT>,
new_merkle_tree: &ConcurrentMerkleTree<H, HEIGHT>,
) where
H: Hasher,
{
assert_eq!(old_merkle_tree.height, new_merkle_tree.height);
assert_eq!(
old_merkle_tree.changelog.capacity(),
new_merkle_tree.changelog.capacity(),
);
assert_eq!(
old_merkle_tree.changelog.capacity(),
new_merkle_tree.changelog.capacity()
);
assert_eq!(
old_merkle_tree.roots.capacity(),
new_merkle_tree.roots.capacity()
);
assert_eq!(
old_merkle_tree.roots.capacity(),
new_merkle_tree.roots.capacity()
);
assert_eq!(old_merkle_tree.canopy_depth, new_merkle_tree.canopy_depth);
}
pub fn assert_rolledover_merkle_trees_metadata(
old_merkle_tree_metadata: &MerkleTreeMetadata,
new_merkle_tree_metadata: &MerkleTreeMetadata,
current_slot: u64,
new_queue_pubkey: &Pubkey,
) {
// Old Merkle tree
// 1. rolled over slot is set to current slot
// 2. next Merkle tree is set to the new Merkle tree
// New Merkle tree
// 1. index is equal to the old Merkle tree index
// 2. rollover fee is equal to the old Merkle tree rollover fee (the fee is calculated onchain in case rent should change the fee might be different)
// 3. network_fee is equal to the old Merkle tree network_fee
// 4. rollover threshold is equal to the old Merkle tree rollover threshold
// 5. rolled over slot is set to u64::MAX (not rolled over)
// 6. close threshold is equal to the old Merkle tree close threshold
// 7. associated queue is equal to the new queue
// 7. next merkle tree is set to Pubkey::default() (not set)
// 8. owner is equal to the old Merkle tree owner
// 9. delegate is equal to the old Merkle tree delegate
assert_eq!(
old_merkle_tree_metadata.access_metadata,
new_merkle_tree_metadata.access_metadata
);
assert_eq!(
old_merkle_tree_metadata.rollover_metadata.index,
new_merkle_tree_metadata.rollover_metadata.index
);
assert_eq!(
old_merkle_tree_metadata.rollover_metadata.rollover_fee,
new_merkle_tree_metadata.rollover_metadata.rollover_fee,
);
assert_eq!(
old_merkle_tree_metadata
.rollover_metadata
.rollover_threshold,
new_merkle_tree_metadata
.rollover_metadata
.rollover_threshold,
);
assert_eq!(
old_merkle_tree_metadata.rollover_metadata.network_fee,
new_merkle_tree_metadata.rollover_metadata.network_fee,
);
assert_eq!(
old_merkle_tree_metadata.rollover_metadata.rolledover_slot,
current_slot,
);
assert_eq!(
old_merkle_tree_metadata.rollover_metadata.close_threshold,
new_merkle_tree_metadata.rollover_metadata.close_threshold
);
assert_eq!(
old_merkle_tree_metadata.rollover_metadata.additional_bytes,
new_merkle_tree_metadata.rollover_metadata.additional_bytes
);
assert_eq!(new_merkle_tree_metadata.associated_queue, *new_queue_pubkey);
assert_eq!(new_merkle_tree_metadata.next_merkle_tree, Pubkey::default());
}
#[allow(clippy::too_many_arguments)]
pub fn assert_rolledover_queues_metadata(
old_queue_metadata: &QueueMetadata,
new_queue_metadata: &QueueMetadata,
current_slot: u64,
new_merkle_tree_pubkey: &Pubkey,
new_queue_pubkey: &Pubkey,
old_merkle_tree_lamports: u64,
new_merkle_tree_lamports: u64,
new_queue_lamports: u64,
) {
assert_eq!(
old_queue_metadata.rollover_metadata.rolledover_slot,
current_slot
);
// Isn't this wrong???
assert_eq!(
old_queue_metadata.rollover_metadata.index,
new_queue_metadata.rollover_metadata.index,
);
assert_eq!(
old_queue_metadata.rollover_metadata.rollover_fee,
new_queue_metadata.rollover_metadata.rollover_fee
);
assert_eq!(
old_queue_metadata.rollover_metadata.network_fee,
new_queue_metadata.rollover_metadata.network_fee
);
assert_eq!(
u64::MAX,
new_queue_metadata.rollover_metadata.rolledover_slot
);
assert_eq!(
old_queue_metadata.access_metadata.owner,
new_queue_metadata.access_metadata.owner
);
assert_eq!(
old_queue_metadata.access_metadata.program_owner,
new_queue_metadata.access_metadata.program_owner
);
assert_eq!(
new_queue_metadata.associated_merkle_tree,
*new_merkle_tree_pubkey
);
assert_eq!(old_queue_metadata.next_queue, *new_queue_pubkey);
assert_eq!(
old_merkle_tree_lamports,
new_merkle_tree_lamports + new_queue_lamports + old_merkle_tree_lamports
);
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-utils
|
solana_public_repos/Lightprotocol/light-protocol/test-utils/src/spl.rs
|
use anchor_spl::token::{Mint, TokenAccount};
use forester_utils::create_account_instruction;
use forester_utils::indexer::{Indexer, TokenDataWithContext};
use light_compressed_token::process_compress_spl_token_account::sdk::create_compress_spl_token_account_instruction;
use light_compressed_token::{
burn::sdk::{create_burn_instruction, CreateBurnInstructionInputs},
delegation::sdk::{
create_approve_instruction, create_revoke_instruction, CreateApproveInstructionInputs,
CreateRevokeInstructionInputs,
},
freeze::sdk::{create_instruction, CreateInstructionInputs},
get_token_pool_pda,
mint_sdk::{create_create_token_pool_instruction, create_mint_to_instruction},
process_transfer::{transfer_sdk::create_transfer_instruction, TokenTransferOutputData},
token_data::AccountState,
TokenData,
};
use light_hasher::Poseidon;
use light_system_program::{
invoke::processor::CompressedProof,
sdk::{compressed_account::MerkleContext, event::PublicTransactionEvent},
};
use solana_program_test::BanksClientError;
use solana_sdk::{
instruction::Instruction,
program_pack::Pack,
pubkey::Pubkey,
signature::{Keypair, Signature, Signer},
};
use spl_token::instruction::initialize_mint;
use crate::{
assert_compressed_tx::get_merkle_tree_snapshots,
assert_token_tx::{assert_create_mint, assert_mint_to, assert_transfer},
};
use light_client::rpc::errors::RpcError;
use light_client::rpc::RpcConnection;
use light_client::transaction_params::TransactionParams;
pub async fn mint_tokens_helper<R: RpcConnection, I: Indexer<R>>(
rpc: &mut R,
test_indexer: &mut I,
merkle_tree_pubkey: &Pubkey,
mint_authority: &Keypair,
mint: &Pubkey,
amounts: Vec<u64>,
recipients: Vec<Pubkey>,
) {
mint_tokens_helper_with_lamports(
rpc,
test_indexer,
merkle_tree_pubkey,
mint_authority,
mint,
amounts,
recipients,
None,
)
.await
}
pub async fn mint_spl_tokens<R: RpcConnection>(
rpc: &mut R,
mint: &Pubkey,
token_account: &Pubkey,
token_owner: &Pubkey,
mint_authority: &Keypair,
amount: u64,
is_token_22: bool,
) -> Result<Signature, RpcError> {
let mint_to_instruction = if is_token_22 {
spl_token_2022::instruction::mint_to(
&spl_token_2022::ID,
mint,
token_account,
token_owner,
&[&mint_authority.pubkey()],
amount,
)
.unwrap()
} else {
spl_token::instruction::mint_to(
&spl_token::ID,
mint,
token_account,
token_owner,
&[&mint_authority.pubkey()],
amount,
)
.unwrap()
};
rpc.create_and_send_transaction(
&[mint_to_instruction],
&mint_authority.pubkey(),
&[mint_authority],
)
.await
}
#[allow(clippy::too_many_arguments)]
pub async fn mint_tokens_helper_with_lamports<R: RpcConnection, I: Indexer<R>>(
rpc: &mut R,
test_indexer: &mut I,
merkle_tree_pubkey: &Pubkey,
mint_authority: &Keypair,
mint: &Pubkey,
amounts: Vec<u64>,
recipients: Vec<Pubkey>,
lamports: Option<u64>,
) {
mint_tokens_22_helper_with_lamports(
rpc,
test_indexer,
merkle_tree_pubkey,
mint_authority,
mint,
amounts,
recipients,
lamports,
false,
)
.await;
}
#[allow(clippy::too_many_arguments)]
pub async fn mint_tokens_22_helper_with_lamports<R: RpcConnection, I: Indexer<R>>(
rpc: &mut R,
test_indexer: &mut I,
merkle_tree_pubkey: &Pubkey,
mint_authority: &Keypair,
mint: &Pubkey,
amounts: Vec<u64>,
recipients: Vec<Pubkey>,
lamports: Option<u64>,
token_22: bool,
) {
let payer_pubkey = mint_authority.pubkey();
let instruction = create_mint_to_instruction(
&payer_pubkey,
&payer_pubkey,
mint,
merkle_tree_pubkey,
amounts.clone(),
recipients.clone(),
lamports,
token_22,
);
let output_merkle_tree_accounts =
test_indexer.get_state_merkle_tree_accounts(&vec![*merkle_tree_pubkey; amounts.len()]);
let snapshots = get_merkle_tree_snapshots::<R>(rpc, &output_merkle_tree_accounts).await;
let previous_mint_supply =
spl_token::state::Mint::unpack(&rpc.get_account(*mint).await.unwrap().unwrap().data)
.unwrap()
.supply;
let pool: Pubkey = get_token_pool_pda(mint);
let previous_pool_amount =
spl_token::state::Account::unpack(&rpc.get_account(pool).await.unwrap().unwrap().data)
.unwrap()
.amount;
let (event, _signature, _) = rpc
.create_and_send_transaction_with_event::<PublicTransactionEvent>(
&[instruction],
&payer_pubkey,
&[mint_authority],
None,
)
.await
.unwrap()
.unwrap();
let (_, created_token_accounts) = test_indexer.add_event_and_compressed_accounts(&event);
assert_mint_to(
rpc,
test_indexer,
&recipients,
*mint,
amounts.as_slice(),
&snapshots,
&created_token_accounts,
previous_mint_supply,
previous_pool_amount,
)
.await;
}
pub async fn create_token_pool<R: RpcConnection>(
rpc: &mut R,
payer: &Keypair,
mint_authority: &Pubkey,
decimals: u8,
mint_keypair: Option<&Keypair>,
) -> Pubkey {
let keypair = Keypair::new();
let mint_keypair = match mint_keypair {
Some(mint_keypair) => mint_keypair,
None => &keypair,
};
let mint_pubkey = (*mint_keypair).pubkey();
let mint_rent = rpc
.get_minimum_balance_for_rent_exemption(Mint::LEN)
.await
.unwrap();
let (instructions, _) = create_initialize_mint_instructions(
&payer.pubkey(),
mint_authority,
mint_rent,
decimals,
mint_keypair,
);
rpc.create_and_send_transaction(&instructions, &payer.pubkey(), &[payer, mint_keypair])
.await
.unwrap();
mint_pubkey
}
pub async fn create_mint_helper<R: RpcConnection>(rpc: &mut R, payer: &Keypair) -> Pubkey {
let payer_pubkey = payer.pubkey();
let rent = rpc
.get_minimum_balance_for_rent_exemption(Mint::LEN)
.await
.unwrap();
let mint = Keypair::new();
let (instructions, pool) =
create_initialize_mint_instructions(&payer_pubkey, &payer_pubkey, rent, 2, &mint);
rpc.create_and_send_transaction(&instructions, &payer_pubkey, &[payer, &mint])
.await
.unwrap();
assert_create_mint(rpc, &payer_pubkey, &mint.pubkey(), &pool).await;
mint.pubkey()
}
pub async fn create_mint_22_helper<R: RpcConnection>(rpc: &mut R, payer: &Keypair) -> Pubkey {
let payer_pubkey = payer.pubkey();
let rent = rpc
.get_minimum_balance_for_rent_exemption(Mint::LEN)
.await
.unwrap();
let mint = Keypair::new();
let (instructions, pool) =
create_initialize_mint_22_instructions(&payer_pubkey, &payer_pubkey, rent, 2, &mint, true);
rpc.create_and_send_transaction(&instructions, &payer_pubkey, &[payer, &mint])
.await
.unwrap();
assert_create_mint(rpc, &payer_pubkey, &mint.pubkey(), &pool).await;
mint.pubkey()
}
pub async fn mint_wrapped_sol<R: RpcConnection>(
rpc: &mut R,
payer: &Keypair,
token_account: &Pubkey,
amount: u64,
is_token_22: bool,
) -> Result<Signature, RpcError> {
let transfer_ix = anchor_lang::solana_program::system_instruction::transfer(
&payer.pubkey(),
token_account,
amount,
);
let sync_native_ix = if is_token_22 {
spl_token_2022::instruction::sync_native(&spl_token_2022::ID, token_account)
.map_err(|e| RpcError::CustomError(format!("{:?}", e)))?
} else {
spl_token::instruction::sync_native(&spl_token::ID, token_account)
.map_err(|e| RpcError::CustomError(format!("{:?}", e)))?
};
rpc.create_and_send_transaction(&[transfer_ix, sync_native_ix], &payer.pubkey(), &[payer])
.await
}
pub fn create_initialize_mint_instructions(
payer: &Pubkey,
authority: &Pubkey,
rent: u64,
decimals: u8,
mint_keypair: &Keypair,
) -> ([Instruction; 4], Pubkey) {
create_initialize_mint_22_instructions(payer, authority, rent, decimals, mint_keypair, false)
}
pub fn create_initialize_mint_22_instructions(
payer: &Pubkey,
authority: &Pubkey,
rent: u64,
decimals: u8,
mint_keypair: &Keypair,
token_22: bool,
) -> ([Instruction; 4], Pubkey) {
let program_id = if token_22 {
anchor_spl::token_2022::ID
} else {
spl_token::ID
};
let account_create_ix =
create_account_instruction(payer, Mint::LEN, rent, &program_id, Some(mint_keypair));
let mint_pubkey = mint_keypair.pubkey();
let create_mint_instruction = if token_22 {
spl_token_2022::instruction::initialize_mint(
&program_id,
&mint_keypair.pubkey(),
authority,
Some(authority),
decimals,
)
.unwrap()
} else {
initialize_mint(
&program_id,
&mint_keypair.pubkey(),
authority,
Some(authority),
decimals,
)
.unwrap()
};
let transfer_ix =
anchor_lang::solana_program::system_instruction::transfer(payer, &mint_pubkey, rent);
let instruction = create_create_token_pool_instruction(payer, &mint_pubkey, token_22);
let pool_pubkey = get_token_pool_pda(&mint_pubkey);
(
[
account_create_ix,
create_mint_instruction,
transfer_ix,
instruction,
],
pool_pubkey,
)
}
/// Creates a spl token account and initializes it with the given mint and owner.
/// This function is useful to create token accounts for spl compression and decompression tests.
pub async fn create_token_account<R: RpcConnection>(
rpc: &mut R,
mint: &Pubkey,
account_keypair: &Keypair,
owner: &Keypair,
) -> Result<(), BanksClientError> {
create_token_2022_account(rpc, mint, account_keypair, owner, false).await
}
pub async fn create_token_2022_account<R: RpcConnection>(
rpc: &mut R,
mint: &Pubkey,
account_keypair: &Keypair,
owner: &Keypair,
token_22: bool,
) -> Result<(), BanksClientError> {
let account_len = if token_22 {
spl_token_2022::state::Account::LEN
} else {
spl_token::state::Account::LEN
};
let rent = rpc
.get_minimum_balance_for_rent_exemption(account_len)
.await
.unwrap();
let program_id = if token_22 {
spl_token_2022::ID
} else {
spl_token::ID
};
let account_create_ix = create_account_instruction(
&owner.pubkey(),
TokenAccount::LEN,
rent,
&program_id,
Some(account_keypair),
);
let instruction = if token_22 {
spl_token_2022::instruction::initialize_account(
&program_id,
&account_keypair.pubkey(),
mint,
&owner.pubkey(),
)
.unwrap()
} else {
spl_token::instruction::initialize_account(
&program_id,
&account_keypair.pubkey(),
mint,
&owner.pubkey(),
)
.unwrap()
};
rpc.create_and_send_transaction(
&[account_create_ix, instruction],
&owner.pubkey(),
&[account_keypair, owner],
)
.await
.unwrap();
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub async fn compressed_transfer_test<R: RpcConnection, I: Indexer<R>>(
payer: &Keypair,
rpc: &mut R,
test_indexer: &mut I,
mint: &Pubkey,
from: &Keypair,
recipients: &[Pubkey],
amounts: &[u64],
lamports: Option<Vec<Option<u64>>>,
input_compressed_accounts: &[TokenDataWithContext],
output_merkle_tree_pubkeys: &[Pubkey],
delegate_change_account_index: Option<u8>,
delegate_is_signer: bool,
transaction_params: Option<TransactionParams>,
) {
compressed_transfer_22_test(
payer,
rpc,
test_indexer,
mint,
from,
recipients,
amounts,
lamports,
input_compressed_accounts,
output_merkle_tree_pubkeys,
delegate_change_account_index,
delegate_is_signer,
transaction_params,
false,
)
.await;
}
#[allow(clippy::too_many_arguments)]
pub async fn compressed_transfer_22_test<R: RpcConnection, I: Indexer<R>>(
payer: &Keypair,
rpc: &mut R,
test_indexer: &mut I,
mint: &Pubkey,
from: &Keypair,
recipients: &[Pubkey],
amounts: &[u64],
mut lamports: Option<Vec<Option<u64>>>,
input_compressed_accounts: &[TokenDataWithContext],
output_merkle_tree_pubkeys: &[Pubkey],
delegate_change_account_index: Option<u8>,
delegate_is_signer: bool,
transaction_params: Option<TransactionParams>,
token_22: bool,
) {
if recipients.len() != amounts.len() && amounts.len() != output_merkle_tree_pubkeys.len() {
println!("{:?}", recipients);
println!("{:?}", amounts);
println!("{:?}", output_merkle_tree_pubkeys);
panic!("recipients, amounts, and output_merkle_tree_pubkeys must have the same length");
}
let mut input_merkle_tree_context = Vec::new();
let mut input_compressed_account_token_data = Vec::new();
let mut input_compressed_account_hashes = Vec::new();
let mut sum_input_amounts = 0;
for account in input_compressed_accounts {
let leaf_index = account.compressed_account.merkle_context.leaf_index;
input_compressed_account_token_data.push(account.token_data.clone());
input_compressed_account_hashes.push(
account
.compressed_account
.compressed_account
.hash::<Poseidon>(
&account.compressed_account.merkle_context.merkle_tree_pubkey,
&leaf_index,
)
.unwrap(),
);
sum_input_amounts += account.token_data.amount;
input_merkle_tree_context.push(MerkleContext {
merkle_tree_pubkey: account.compressed_account.merkle_context.merkle_tree_pubkey,
nullifier_queue_pubkey: account
.compressed_account
.merkle_context
.nullifier_queue_pubkey,
leaf_index,
queue_index: None,
});
}
let output_lamports = lamports
.clone()
.unwrap_or_else(|| vec![None; recipients.len()]);
let mut output_compressed_accounts = Vec::new();
for (((recipient, amount), merkle_tree_pubkey), lamports) in recipients
.iter()
.zip(amounts)
.zip(output_merkle_tree_pubkeys)
.zip(output_lamports)
{
let account = TokenTransferOutputData {
amount: *amount,
owner: *recipient,
lamports,
merkle_tree: *merkle_tree_pubkey,
};
sum_input_amounts -= amount;
output_compressed_accounts.push(account);
}
// add change compressed account if tokens are left
if sum_input_amounts > 0 {
let account = TokenTransferOutputData {
amount: sum_input_amounts,
owner: from.pubkey(),
lamports: None,
merkle_tree: *output_merkle_tree_pubkeys.last().unwrap(),
};
output_compressed_accounts.push(account);
}
let input_merkle_tree_pubkeys: Vec<Pubkey> = input_merkle_tree_context
.iter()
.map(|x| x.merkle_tree_pubkey)
.collect();
println!("{:?}", input_compressed_accounts);
println!(
"input_compressed_account_hashes: {:?}",
input_compressed_account_hashes
);
let proof_rpc_result = test_indexer
.create_proof_for_compressed_accounts(
Some(&input_compressed_account_hashes),
Some(&input_merkle_tree_pubkeys),
None,
None,
rpc,
)
.await;
output_compressed_accounts.sort_by(|a, b| a.merkle_tree.cmp(&b.merkle_tree));
let delegate_pubkey = if delegate_is_signer {
Some(payer.pubkey())
} else {
None
};
let authority_signer = if delegate_is_signer { payer } else { from };
let instruction = create_transfer_instruction(
&payer.pubkey(),
&authority_signer.pubkey(), // authority
&input_merkle_tree_context,
&output_compressed_accounts,
&proof_rpc_result.root_indices,
&Some(proof_rpc_result.proof),
&input_compressed_account_token_data, // input_token_data
&input_compressed_accounts
.iter()
.map(|x| &x.compressed_account.compressed_account)
.cloned()
.collect::<Vec<_>>(),
*mint,
delegate_pubkey, // owner_if_delegate_change_account_index
false, // is_compress
None, // compression_amount
None, // token_pool_pda
None, // compress_or_decompress_token_account
true,
delegate_change_account_index,
None,
token_22,
)
.unwrap();
let sum_input_lamports = input_compressed_accounts
.iter()
.map(|x| &x.compressed_account.compressed_account.lamports)
.sum::<u64>();
let sum_output_lamports = output_compressed_accounts
.iter()
.map(|x| x.lamports.unwrap_or(0))
.sum::<u64>();
let sum_output_amounts = output_compressed_accounts
.iter()
.map(|x| x.amount)
.sum::<u64>();
let output_merkle_tree_pubkeys = if sum_input_lamports > sum_output_lamports
|| sum_input_amounts > sum_output_amounts && delegate_is_signer
{
let mut output_merkle_tree_pubkeys = output_merkle_tree_pubkeys.to_vec();
output_merkle_tree_pubkeys.push(*output_merkle_tree_pubkeys.last().unwrap());
if let Some(lamports) = &mut lamports {
if sum_input_lamports != sum_output_lamports {
lamports.push(Some(sum_input_lamports - sum_output_lamports));
} else {
lamports.push(None);
}
}
output_merkle_tree_pubkeys
} else {
output_merkle_tree_pubkeys.to_vec()
};
let output_merkle_tree_accounts =
test_indexer.get_state_merkle_tree_accounts(output_merkle_tree_pubkeys.as_slice());
let input_merkle_tree_accounts =
test_indexer.get_state_merkle_tree_accounts(&input_merkle_tree_pubkeys);
let snapshots =
get_merkle_tree_snapshots::<R>(rpc, output_merkle_tree_accounts.as_slice()).await;
let input_snapshots =
get_merkle_tree_snapshots::<R>(rpc, input_merkle_tree_accounts.as_slice()).await;
let (event, _signature, _) = rpc
.create_and_send_transaction_with_event::<PublicTransactionEvent>(
&[instruction],
&payer.pubkey(),
&[payer, authority_signer],
transaction_params,
)
.await
.unwrap()
.unwrap();
let (created_change_output_account, created_token_output_accounts) =
test_indexer.add_event_and_compressed_accounts(&event);
let delegates = if let Some(index) = delegate_change_account_index {
let mut delegates = vec![None; created_token_output_accounts.len()];
delegates[index as usize] = Some(payer.pubkey());
Some(delegates)
} else {
None
};
let mut created_output_accounts = Vec::new();
created_token_output_accounts.iter().for_each(|x| {
created_output_accounts.push(x.compressed_account.clone());
});
created_change_output_account.iter().for_each(|x| {
created_output_accounts.push(x.clone());
});
assert_transfer(
rpc,
test_indexer,
&output_compressed_accounts,
created_output_accounts.as_slice(),
lamports,
&input_compressed_account_hashes,
&snapshots,
&input_snapshots,
&event,
delegates,
)
.await;
}
#[allow(clippy::too_many_arguments)]
pub async fn decompress_test<R: RpcConnection, I: Indexer<R>>(
payer: &Keypair,
rpc: &mut R,
test_indexer: &mut I,
input_compressed_accounts: Vec<TokenDataWithContext>,
amount: u64,
output_merkle_tree_pubkey: &Pubkey,
recipient_token_account: &Pubkey,
transaction_params: Option<TransactionParams>,
is_token_22: bool,
) {
let max_amount: u64 = input_compressed_accounts
.iter()
.map(|x| x.token_data.amount)
.sum();
let change_out_compressed_account = TokenTransferOutputData {
amount: max_amount - amount,
owner: payer.pubkey(),
lamports: None,
merkle_tree: *output_merkle_tree_pubkey,
};
let input_compressed_account_hashes = input_compressed_accounts
.iter()
.map(|x| x.compressed_account.hash().unwrap())
.collect::<Vec<_>>();
let input_merkle_tree_pubkeys = input_compressed_accounts
.iter()
.map(|x| x.compressed_account.merkle_context.merkle_tree_pubkey)
.collect::<Vec<_>>();
let proof_rpc_result = test_indexer
.create_proof_for_compressed_accounts(
Some(&input_compressed_account_hashes),
Some(&input_merkle_tree_pubkeys),
None,
None,
rpc,
)
.await;
let mint = input_compressed_accounts[0].token_data.mint;
let instruction = create_transfer_instruction(
&rpc.get_payer().pubkey(),
&payer.pubkey(), // authority
&input_compressed_accounts
.iter()
.map(|x| x.compressed_account.merkle_context)
.collect::<Vec<_>>(), // input_compressed_account_merkle_tree_pubkeys
&[change_out_compressed_account], // output_compressed_accounts
&proof_rpc_result.root_indices, // root_indices
&Some(proof_rpc_result.proof),
input_compressed_accounts
.iter()
.map(|x| x.token_data.clone())
.collect::<Vec<_>>()
.as_slice(), // input_token_data
&input_compressed_accounts
.iter()
.map(|x| &x.compressed_account.compressed_account)
.cloned()
.collect::<Vec<_>>(),
mint, // mint
None, // owner_if_delegate_change_account_index
false, // is_compress
Some(amount), // compression_amount
Some(get_token_pool_pda(&mint)), // token_pool_pda
Some(*recipient_token_account), // compress_or_decompress_token_account
true,
None,
None,
is_token_22,
)
.unwrap();
let output_merkle_tree_pubkeys = vec![*output_merkle_tree_pubkey];
let output_merkle_tree_accounts =
test_indexer.get_state_merkle_tree_accounts(&output_merkle_tree_pubkeys);
let input_merkle_tree_accounts =
test_indexer.get_state_merkle_tree_accounts(&input_merkle_tree_pubkeys);
let output_merkle_tree_test_snapshots =
get_merkle_tree_snapshots::<R>(rpc, output_merkle_tree_accounts.as_slice()).await;
let input_merkle_tree_test_snapshots =
get_merkle_tree_snapshots::<R>(rpc, input_merkle_tree_accounts.as_slice()).await;
let recipient_token_account_data_pre = spl_token::state::Account::unpack(
&rpc.get_account(*recipient_token_account)
.await
.unwrap()
.unwrap()
.data,
)
.unwrap();
let context_payer = rpc.get_payer().insecure_clone();
let (event, _signature, _) = rpc
.create_and_send_transaction_with_event::<PublicTransactionEvent>(
&[instruction],
&context_payer.pubkey(),
&[&context_payer, payer],
transaction_params,
)
.await
.unwrap()
.unwrap();
let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(&event);
assert_transfer(
rpc,
test_indexer,
&[change_out_compressed_account],
created_output_accounts
.iter()
.map(|x| x.compressed_account.clone())
.collect::<Vec<_>>()
.as_slice(),
None,
input_compressed_account_hashes.as_slice(),
&output_merkle_tree_test_snapshots,
&input_merkle_tree_test_snapshots,
&event,
None,
)
.await;
let recipient_token_account_data = spl_token::state::Account::unpack(
&rpc.get_account(*recipient_token_account)
.await
.unwrap()
.unwrap()
.data,
)
.unwrap();
assert_eq!(
recipient_token_account_data.amount,
recipient_token_account_data_pre.amount + amount
);
}
#[allow(clippy::too_many_arguments)]
pub async fn perform_compress_spl_token_account<R: RpcConnection, I: Indexer<R>>(
rpc: &mut R,
test_indexer: &mut I,
payer: &Keypair,
token_owner: &Keypair,
mint: &Pubkey,
token_account: &Pubkey,
merkle_tree_pubkey: &Pubkey,
remaining_amount: Option<u64>,
is_token_22: bool,
) -> Result<(), RpcError> {
let pre_token_account_amount = spl_token::state::Account::unpack(
&rpc.get_account(*token_account).await.unwrap().unwrap().data,
)
.unwrap()
.amount;
let instruction = create_compress_spl_token_account_instruction(
&token_owner.pubkey(),
remaining_amount,
None,
&payer.pubkey(),
&token_owner.pubkey(),
mint,
merkle_tree_pubkey,
token_account,
is_token_22,
);
let (event, _, _) = rpc
.create_and_send_transaction_with_event::<PublicTransactionEvent>(
&[instruction],
&token_owner.pubkey(),
&[payer, token_owner],
None,
)
.await?
.unwrap();
test_indexer.add_event_and_compressed_accounts(&event);
let created_compressed_token_account =
test_indexer.get_compressed_token_accounts_by_owner(&token_owner.pubkey())[0].clone();
let expected_token_data = TokenData {
amount: pre_token_account_amount - remaining_amount.unwrap_or_default(),
mint: *mint,
owner: token_owner.pubkey(),
state: AccountState::Initialized,
delegate: None,
tlv: None,
};
assert_eq!(
created_compressed_token_account.token_data,
expected_token_data
);
assert_eq!(
created_compressed_token_account
.compressed_account
.merkle_context
.merkle_tree_pubkey,
*merkle_tree_pubkey
);
if let Some(remaining_amount) = remaining_amount {
let post_token_account_amount = spl_token::state::Account::unpack(
&rpc.get_account(*token_account).await.unwrap().unwrap().data,
)
.unwrap()
.amount;
assert_eq!(post_token_account_amount, remaining_amount);
}
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub async fn compress_test<R: RpcConnection, I: Indexer<R>>(
payer: &Keypair,
rpc: &mut R,
test_indexer: &mut I,
amount: u64,
mint: &Pubkey,
output_merkle_tree_pubkey: &Pubkey,
sender_token_account: &Pubkey,
transaction_params: Option<TransactionParams>,
is_token_22: bool,
) {
let output_compressed_account = TokenTransferOutputData {
amount,
owner: payer.pubkey(),
lamports: None,
merkle_tree: *output_merkle_tree_pubkey,
};
let instruction = create_transfer_instruction(
&rpc.get_payer().pubkey(),
&payer.pubkey(), // authority
&Vec::new(), // input_compressed_account_merkle_tree_pubkeys
&[output_compressed_account], // output_compressed_accounts
&Vec::new(), // root_indices
&None,
&Vec::new(), // input_token_data
&Vec::new(), // input_compressed_accounts
*mint, // mint
None, // owner_if_delegate_is_signer
true, // is_compress
Some(amount), // compression_amount
Some(get_token_pool_pda(mint)), // token_pool_pda
Some(*sender_token_account), // compress_or_decompress_token_account
true,
None,
None,
is_token_22,
)
.unwrap();
let output_merkle_tree_pubkeys = vec![*output_merkle_tree_pubkey];
let output_merkle_tree_accounts =
test_indexer.get_state_merkle_tree_accounts(&output_merkle_tree_pubkeys);
let output_merkle_tree_test_snapshots =
get_merkle_tree_snapshots::<R>(rpc, output_merkle_tree_accounts.as_slice()).await;
let input_merkle_tree_test_snapshots = Vec::new();
let recipient_token_account_data_pre = spl_token::state::Account::unpack(
&rpc.get_account(*sender_token_account)
.await
.unwrap()
.unwrap()
.data,
)
.unwrap();
let context_payer = rpc.get_payer().insecure_clone();
let (event, _signature, _) = rpc
.create_and_send_transaction_with_event::<PublicTransactionEvent>(
&[instruction],
&payer.pubkey(),
&[&context_payer, payer],
transaction_params,
)
.await
.unwrap()
.unwrap();
let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(&event);
assert_transfer(
rpc,
test_indexer,
&[output_compressed_account],
created_output_accounts
.iter()
.map(|x| x.compressed_account.clone())
.collect::<Vec<_>>()
.as_slice(),
None,
Vec::new().as_slice(),
&output_merkle_tree_test_snapshots,
&input_merkle_tree_test_snapshots,
&event,
None,
)
.await;
let recipient_token_account_data = spl_token::state::Account::unpack(
&rpc.get_account(*sender_token_account)
.await
.unwrap()
.unwrap()
.data,
)
.unwrap();
assert_eq!(
recipient_token_account_data.amount,
recipient_token_account_data_pre.amount - amount
);
}
#[allow(clippy::too_many_arguments)]
pub async fn approve_test<R: RpcConnection, I: Indexer<R>>(
authority: &Keypair,
rpc: &mut R,
test_indexer: &mut I,
input_compressed_accounts: Vec<TokenDataWithContext>,
delegated_amount: u64,
delegate_lamports: Option<u64>,
delegate: &Pubkey,
delegated_compressed_account_merkle_tree: &Pubkey,
change_compressed_account_merkle_tree: &Pubkey,
transaction_params: Option<TransactionParams>,
) {
let input_compressed_account_hashes = input_compressed_accounts
.iter()
.map(|x| x.compressed_account.hash().unwrap())
.collect::<Vec<_>>();
let input_merkle_tree_pubkeys = input_compressed_accounts
.iter()
.map(|x| x.compressed_account.merkle_context.merkle_tree_pubkey)
.collect::<Vec<_>>();
println!(
"input_compressed_account_hashes: {:?}",
input_compressed_account_hashes
);
println!("input compressed accounts: {:?}", input_compressed_accounts);
let proof_rpc_result = test_indexer
.create_proof_for_compressed_accounts(
Some(&input_compressed_account_hashes),
Some(&input_merkle_tree_pubkeys),
None,
None,
rpc,
)
.await;
let mint = input_compressed_accounts[0].token_data.mint;
let inputs = CreateApproveInstructionInputs {
fee_payer: rpc.get_payer().pubkey(),
authority: authority.pubkey(),
input_merkle_contexts: input_compressed_accounts
.iter()
.map(|x| x.compressed_account.merkle_context)
.collect(),
input_token_data: input_compressed_accounts
.iter()
.map(|x| x.token_data.clone())
.collect(),
input_compressed_accounts: input_compressed_accounts
.iter()
.map(|x| &x.compressed_account.compressed_account)
.cloned()
.collect::<Vec<_>>(),
mint,
delegated_amount,
delegate_lamports,
delegated_compressed_account_merkle_tree: *delegated_compressed_account_merkle_tree,
change_compressed_account_merkle_tree: *change_compressed_account_merkle_tree,
delegate: *delegate,
root_indices: proof_rpc_result.root_indices,
proof: proof_rpc_result.proof,
};
let instruction = create_approve_instruction(inputs).unwrap();
let mut output_merkle_tree_pubkeys = vec![*delegated_compressed_account_merkle_tree];
let input_amount = input_compressed_accounts
.iter()
.map(|x| x.token_data.amount)
.sum::<u64>();
let change_amount = input_amount - delegated_amount;
let input_lamports = input_compressed_accounts
.iter()
.map(|x| x.compressed_account.compressed_account.lamports)
.sum::<u64>();
let (change_lamports, change_lamports_greater_zero) =
if let Some(delegate_lamports) = delegate_lamports {
let change_lamports = input_lamports - delegate_lamports;
let option_change_lamports = if change_lamports > 0 {
Some(change_lamports)
} else {
None
};
(
Some(vec![Some(delegate_lamports), option_change_lamports]),
change_lamports > 0,
)
} else if input_lamports > 0 {
(Some(vec![None, Some(input_lamports)]), true)
} else {
(None, false)
};
if change_lamports_greater_zero || change_amount > 0 {
output_merkle_tree_pubkeys.push(*change_compressed_account_merkle_tree);
}
let output_merkle_tree_accounts =
test_indexer.get_state_merkle_tree_accounts(&output_merkle_tree_pubkeys);
let output_merkle_tree_test_snapshots =
get_merkle_tree_snapshots::<R>(rpc, output_merkle_tree_accounts.as_slice()).await;
let input_merkle_tree_accounts =
test_indexer.get_state_merkle_tree_accounts(&input_merkle_tree_pubkeys);
let input_merkle_tree_test_snapshots =
get_merkle_tree_snapshots::<R>(rpc, input_merkle_tree_accounts.as_slice()).await;
let context_payer = rpc.get_payer().insecure_clone();
let (event, _signature, _) = rpc
.create_and_send_transaction_with_event::<PublicTransactionEvent>(
&[instruction],
&context_payer.pubkey(),
&[&context_payer, authority],
transaction_params,
)
.await
.unwrap()
.unwrap();
let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(&event);
let expected_delegated_token_data = TokenData {
mint,
owner: authority.pubkey(),
amount: delegated_amount,
delegate: Some(*delegate),
state: AccountState::Initialized,
tlv: None,
};
assert_eq!(
expected_delegated_token_data,
created_output_accounts[0].token_data
);
let mut expected_token_data = vec![expected_delegated_token_data];
let mut delegates = vec![Some(*delegate)];
if delegated_amount != input_amount {
let expected_change_token_data = TokenData {
mint,
owner: authority.pubkey(),
amount: change_amount,
delegate: None,
state: AccountState::Initialized,
tlv: None,
};
assert_eq!(
expected_change_token_data,
created_output_accounts[1].token_data
);
expected_token_data.push(expected_change_token_data);
delegates.push(None);
}
let expected_compressed_output_accounts =
create_expected_token_output_data(expected_token_data, &output_merkle_tree_pubkeys);
assert_transfer(
rpc,
test_indexer,
expected_compressed_output_accounts.as_slice(),
created_output_accounts
.iter()
.map(|x| x.compressed_account.clone())
.collect::<Vec<_>>()
.as_slice(),
change_lamports,
input_compressed_account_hashes.as_slice(),
&output_merkle_tree_test_snapshots,
&input_merkle_tree_test_snapshots,
&event,
Some(delegates),
)
.await;
}
#[allow(clippy::too_many_arguments)]
pub async fn revoke_test<R: RpcConnection, I: Indexer<R>>(
authority: &Keypair,
rpc: &mut R,
test_indexer: &mut I,
input_compressed_accounts: Vec<TokenDataWithContext>,
output_account_merkle_tree: &Pubkey,
transaction_params: Option<TransactionParams>,
) {
let input_compressed_account_hashes = input_compressed_accounts
.iter()
.map(|x| x.compressed_account.hash().unwrap())
.collect::<Vec<_>>();
let input_merkle_tree_pubkeys = input_compressed_accounts
.iter()
.map(|x| x.compressed_account.merkle_context.merkle_tree_pubkey)
.collect::<Vec<_>>();
let proof_rpc_result = test_indexer
.create_proof_for_compressed_accounts(
Some(&input_compressed_account_hashes),
Some(&input_merkle_tree_pubkeys),
None,
None,
rpc,
)
.await;
let mint = input_compressed_accounts[0].token_data.mint;
let inputs = CreateRevokeInstructionInputs {
fee_payer: rpc.get_payer().pubkey(),
authority: authority.pubkey(),
input_merkle_contexts: input_compressed_accounts
.iter()
.map(|x| x.compressed_account.merkle_context)
.collect(),
input_token_data: input_compressed_accounts
.iter()
.map(|x| x.token_data.clone())
.collect(),
input_compressed_accounts: input_compressed_accounts
.iter()
.map(|x| &x.compressed_account.compressed_account)
.cloned()
.collect::<Vec<_>>(),
mint,
output_account_merkle_tree: *output_account_merkle_tree,
root_indices: proof_rpc_result.root_indices,
proof: proof_rpc_result.proof,
};
let instruction = create_revoke_instruction(inputs).unwrap();
let output_merkle_tree_pubkeys = vec![*output_account_merkle_tree];
let output_merkle_tree_accounts =
test_indexer.get_state_merkle_tree_accounts(&output_merkle_tree_pubkeys);
let input_merkle_tree_accounts =
test_indexer.get_state_merkle_tree_accounts(&input_merkle_tree_pubkeys);
let output_merkle_tree_test_snapshots =
get_merkle_tree_snapshots::<R>(rpc, output_merkle_tree_accounts.as_slice()).await;
let input_merkle_tree_test_snapshots =
get_merkle_tree_snapshots::<R>(rpc, input_merkle_tree_accounts.as_slice()).await;
let context_payer = rpc.get_payer().insecure_clone();
let (event, _signature, _) = rpc
.create_and_send_transaction_with_event::<PublicTransactionEvent>(
&[instruction],
&context_payer.pubkey(),
&[&context_payer, authority],
transaction_params,
)
.await
.unwrap()
.unwrap();
let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(&event);
let input_amount = input_compressed_accounts
.iter()
.map(|x| x.token_data.amount)
.sum::<u64>();
let expected_token_data = TokenData {
mint,
owner: authority.pubkey(),
amount: input_amount,
delegate: None,
state: AccountState::Initialized,
tlv: None,
};
assert_eq!(expected_token_data, created_output_accounts[0].token_data);
let expected_compressed_output_accounts =
create_expected_token_output_data(vec![expected_token_data], &output_merkle_tree_pubkeys);
let sum_inputs = input_compressed_accounts
.iter()
.map(|x| x.compressed_account.compressed_account.lamports)
.sum::<u64>();
let change_lamports = if sum_inputs > 0 {
Some(vec![Some(sum_inputs)])
} else {
None
};
assert_transfer(
rpc,
test_indexer,
expected_compressed_output_accounts.as_slice(),
created_output_accounts
.iter()
.map(|x| x.compressed_account.clone())
.collect::<Vec<_>>()
.as_slice(),
change_lamports,
input_compressed_account_hashes.as_slice(),
&output_merkle_tree_test_snapshots,
&input_merkle_tree_test_snapshots,
&event,
None,
)
.await;
}
pub async fn freeze_test<R: RpcConnection, I: Indexer<R>>(
authority: &Keypair,
rpc: &mut R,
test_indexer: &mut I,
input_compressed_accounts: Vec<TokenDataWithContext>,
outputs_merkle_tree: &Pubkey,
transaction_params: Option<TransactionParams>,
) {
freeze_or_thaw_test::<R, true, I>(
authority,
rpc,
test_indexer,
input_compressed_accounts,
outputs_merkle_tree,
transaction_params,
)
.await;
}
pub async fn thaw_test<R: RpcConnection, I: Indexer<R>>(
authority: &Keypair,
rpc: &mut R,
test_indexer: &mut I,
input_compressed_accounts: Vec<TokenDataWithContext>,
outputs_merkle_tree: &Pubkey,
transaction_params: Option<TransactionParams>,
) {
freeze_or_thaw_test::<R, false, I>(
authority,
rpc,
test_indexer,
input_compressed_accounts,
outputs_merkle_tree,
transaction_params,
)
.await;
}
pub async fn freeze_or_thaw_test<R: RpcConnection, const FREEZE: bool, I: Indexer<R>>(
authority: &Keypair,
rpc: &mut R,
test_indexer: &mut I,
input_compressed_accounts: Vec<TokenDataWithContext>,
outputs_merkle_tree: &Pubkey,
transaction_params: Option<TransactionParams>,
) {
let input_compressed_account_hashes = input_compressed_accounts
.iter()
.map(|x| x.compressed_account.hash().unwrap())
.collect::<Vec<_>>();
let input_merkle_tree_pubkeys = input_compressed_accounts
.iter()
.map(|x| x.compressed_account.merkle_context.merkle_tree_pubkey)
.collect::<Vec<_>>();
let proof_rpc_result = test_indexer
.create_proof_for_compressed_accounts(
Some(&input_compressed_account_hashes),
Some(&input_merkle_tree_pubkeys),
None,
None,
rpc,
)
.await;
let mint = input_compressed_accounts[0].token_data.mint;
let inputs = CreateInstructionInputs {
fee_payer: rpc.get_payer().pubkey(),
authority: authority.pubkey(),
input_merkle_contexts: input_compressed_accounts
.iter()
.map(|x| x.compressed_account.merkle_context)
.collect(),
input_token_data: input_compressed_accounts
.iter()
.map(|x| x.token_data.clone())
.collect(),
input_compressed_accounts: input_compressed_accounts
.iter()
.map(|x| &x.compressed_account.compressed_account)
.cloned()
.collect::<Vec<_>>(),
outputs_merkle_tree: *outputs_merkle_tree,
root_indices: proof_rpc_result.root_indices,
proof: proof_rpc_result.proof,
};
let instruction = create_instruction::<FREEZE>(inputs).unwrap();
let output_merkle_tree_pubkeys =
vec![*outputs_merkle_tree; input_compressed_account_hashes.len()];
let output_merkle_tree_accounts =
test_indexer.get_state_merkle_tree_accounts(&output_merkle_tree_pubkeys);
let input_merkle_tree_accounts =
test_indexer.get_state_merkle_tree_accounts(&input_merkle_tree_pubkeys);
let output_merkle_tree_test_snapshots =
get_merkle_tree_snapshots::<R>(rpc, output_merkle_tree_accounts.as_slice()).await;
let input_merkle_tree_test_snapshots =
get_merkle_tree_snapshots::<R>(rpc, input_merkle_tree_accounts.as_slice()).await;
let context_payer = rpc.get_payer().insecure_clone();
let (event, _signature, _) = rpc
.create_and_send_transaction_with_event::<PublicTransactionEvent>(
&[instruction],
&context_payer.pubkey(),
&[&context_payer, authority],
transaction_params,
)
.await
.unwrap()
.unwrap();
let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(&event);
let mut delegates = Vec::new();
let mut expected_output_accounts = Vec::new();
for account in input_compressed_accounts.iter() {
let state = if FREEZE {
AccountState::Frozen
} else {
AccountState::Initialized
};
let expected_token_data = TokenData {
mint,
owner: input_compressed_accounts[0].token_data.owner,
amount: account.token_data.amount,
delegate: account.token_data.delegate,
state,
tlv: None,
};
if let Some(delegate) = account.token_data.delegate {
delegates.push(Some(delegate));
} else {
delegates.push(None);
}
expected_output_accounts.push(expected_token_data);
}
let expected_compressed_output_accounts =
create_expected_token_output_data(expected_output_accounts, &output_merkle_tree_pubkeys);
let sum_inputs = input_compressed_accounts
.iter()
.map(|x| x.compressed_account.compressed_account.lamports)
.sum::<u64>();
let change_lamports = if sum_inputs > 0 {
let mut change_lamports = Vec::new();
for account in input_compressed_accounts.iter() {
if account.compressed_account.compressed_account.lamports > 0 {
change_lamports.push(Some(account.compressed_account.compressed_account.lamports));
} else {
change_lamports.push(None);
}
}
Some(change_lamports)
} else {
None
};
assert_transfer(
rpc,
test_indexer,
expected_compressed_output_accounts.as_slice(),
created_output_accounts
.iter()
.map(|x| x.compressed_account.clone())
.collect::<Vec<_>>()
.as_slice(),
change_lamports,
input_compressed_account_hashes.as_slice(),
&output_merkle_tree_test_snapshots,
&input_merkle_tree_test_snapshots,
&event,
Some(delegates),
)
.await;
}
#[allow(clippy::too_many_arguments)]
pub async fn burn_test<R: RpcConnection, I: Indexer<R>>(
authority: &Keypair,
rpc: &mut R,
test_indexer: &mut I,
input_compressed_accounts: Vec<TokenDataWithContext>,
change_account_merkle_tree: &Pubkey,
burn_amount: u64,
signer_is_delegate: bool,
transaction_params: Option<TransactionParams>,
is_token_22: bool,
) {
let (
input_compressed_account_hashes,
input_merkle_tree_pubkeys,
mint,
output_amount,
instruction,
) = create_burn_test_instruction(
authority,
rpc,
test_indexer,
&input_compressed_accounts,
change_account_merkle_tree,
burn_amount,
signer_is_delegate,
BurnInstructionMode::Normal,
is_token_22,
)
.await;
let output_merkle_tree_pubkeys = vec![*change_account_merkle_tree; 1];
let output_merkle_tree_test_snapshots = if output_amount > 0 {
let output_merkle_tree_accounts =
test_indexer.get_state_merkle_tree_accounts(&output_merkle_tree_pubkeys);
get_merkle_tree_snapshots::<R>(rpc, output_merkle_tree_accounts.as_slice()).await
} else {
Vec::new()
};
let token_pool_pda_address = get_token_pool_pda(&mint);
let pre_token_pool_account = rpc
.get_account(token_pool_pda_address)
.await
.unwrap()
.unwrap();
let pre_token_pool_balance = spl_token::state::Account::unpack(&pre_token_pool_account.data)
.unwrap()
.amount;
let input_merkle_tree_accounts =
test_indexer.get_state_merkle_tree_accounts(&input_merkle_tree_pubkeys);
let input_merkle_tree_test_snapshots =
get_merkle_tree_snapshots::<R>(rpc, input_merkle_tree_accounts.as_slice()).await;
let context_payer = rpc.get_payer().insecure_clone();
let (event, _signature, _) = rpc
.create_and_send_transaction_with_event::<PublicTransactionEvent>(
&[instruction],
&context_payer.pubkey(),
&[&context_payer, authority],
transaction_params,
)
.await
.unwrap()
.unwrap();
let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(&event);
let mut delegates = Vec::new();
let mut expected_output_accounts = Vec::new();
let delegate = if signer_is_delegate {
Some(authority.pubkey())
} else {
None
};
if output_amount > 0 {
let expected_token_data = TokenData {
mint,
owner: input_compressed_accounts[0].token_data.owner,
amount: output_amount,
delegate,
state: AccountState::Initialized,
tlv: None,
};
if let Some(delegate) = expected_token_data.delegate {
delegates.push(Some(delegate));
} else {
delegates.push(None);
}
expected_output_accounts.push(expected_token_data);
}
let expected_compressed_output_accounts =
create_expected_token_output_data(expected_output_accounts, &output_merkle_tree_pubkeys);
let sum_inputs = input_compressed_accounts
.iter()
.map(|x| x.compressed_account.compressed_account.lamports)
.sum::<u64>();
let change_lamports = if sum_inputs > 0 {
Some(vec![Some(sum_inputs)])
} else {
None
};
assert_transfer(
rpc,
test_indexer,
expected_compressed_output_accounts.as_slice(),
created_output_accounts
.iter()
.map(|x| x.compressed_account.clone())
.collect::<Vec<_>>()
.as_slice(),
change_lamports,
input_compressed_account_hashes.as_slice(),
&output_merkle_tree_test_snapshots,
&input_merkle_tree_test_snapshots,
&event,
Some(delegates),
)
.await;
let post_token_pool_account = rpc
.get_account(token_pool_pda_address)
.await
.unwrap()
.unwrap();
let post_token_pool_balance = spl_token::state::Account::unpack(&post_token_pool_account.data)
.unwrap()
.amount;
assert_eq!(
post_token_pool_balance,
pre_token_pool_balance - burn_amount
);
}
#[derive(Debug, Clone, PartialEq)]
pub enum BurnInstructionMode {
Normal,
InvalidProof,
InvalidMint,
}
#[allow(clippy::too_many_arguments)]
pub async fn create_burn_test_instruction<R: RpcConnection, I: Indexer<R>>(
authority: &Keypair,
rpc: &mut R,
test_indexer: &mut I,
input_compressed_accounts: &[TokenDataWithContext],
change_account_merkle_tree: &Pubkey,
burn_amount: u64,
signer_is_delegate: bool,
mode: BurnInstructionMode,
is_token_22: bool,
) -> (Vec<[u8; 32]>, Vec<Pubkey>, Pubkey, u64, Instruction) {
let input_compressed_account_hashes = input_compressed_accounts
.iter()
.map(|x| x.compressed_account.hash().unwrap())
.collect::<Vec<_>>();
let input_merkle_tree_pubkeys = input_compressed_accounts
.iter()
.map(|x| x.compressed_account.merkle_context.merkle_tree_pubkey)
.collect::<Vec<_>>();
let proof_rpc_result = test_indexer
.create_proof_for_compressed_accounts(
Some(&input_compressed_account_hashes),
Some(&input_merkle_tree_pubkeys),
None,
None,
rpc,
)
.await;
let mint = if mode == BurnInstructionMode::InvalidMint {
Pubkey::new_unique()
} else {
input_compressed_accounts[0].token_data.mint
};
let proof = if mode == BurnInstructionMode::InvalidProof {
CompressedProof {
a: proof_rpc_result.proof.a,
b: proof_rpc_result.proof.b,
c: proof_rpc_result.proof.a, // flip c to make proof invalid but not run into decompress errors
}
} else {
proof_rpc_result.proof
};
let inputs = CreateBurnInstructionInputs {
fee_payer: rpc.get_payer().pubkey(),
authority: authority.pubkey(),
input_merkle_contexts: input_compressed_accounts
.iter()
.map(|x| x.compressed_account.merkle_context)
.collect(),
input_token_data: input_compressed_accounts
.iter()
.map(|x| x.token_data.clone())
.collect(),
input_compressed_accounts: input_compressed_accounts
.iter()
.map(|x| &x.compressed_account.compressed_account)
.cloned()
.collect::<Vec<_>>(),
change_account_merkle_tree: *change_account_merkle_tree,
root_indices: proof_rpc_result.root_indices,
proof,
mint,
signer_is_delegate,
burn_amount,
is_token_22,
};
let input_amount_sum = input_compressed_accounts
.iter()
.map(|x| x.token_data.amount)
.sum::<u64>();
let output_amount = input_amount_sum - burn_amount;
let instruction = create_burn_instruction(inputs).unwrap();
(
input_compressed_account_hashes,
input_merkle_tree_pubkeys,
mint,
output_amount,
instruction,
)
}
pub fn create_expected_token_output_data(
expected_token_data: Vec<TokenData>,
merkle_tree_pubkeys: &[Pubkey],
) -> Vec<TokenTransferOutputData> {
let mut expected_compressed_output_accounts = Vec::new();
for (token_data, merkle_tree_pubkey) in
expected_token_data.iter().zip(merkle_tree_pubkeys.iter())
{
expected_compressed_output_accounts.push(TokenTransferOutputData {
owner: token_data.owner,
amount: token_data.amount,
merkle_tree: *merkle_tree_pubkey,
lamports: None,
});
}
expected_compressed_output_accounts
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-utils
|
solana_public_repos/Lightprotocol/light-protocol/test-utils/src/assert_queue.rs
|
use account_compression::{QueueAccount, QueueMetadata, QueueType, RolloverMetadata};
use forester_utils::{get_hash_set, AccountZeroCopy};
use light_client::rpc::RpcConnection;
use light_utils::fee::compute_rollover_fee;
use solana_sdk::pubkey::Pubkey;
#[allow(clippy::too_many_arguments)]
pub async fn assert_address_queue_initialized<R: RpcConnection>(
rpc: &mut R,
queue_pubkey: &Pubkey,
queue_config: &account_compression::AddressQueueConfig,
associated_merkle_tree_pubkey: &Pubkey,
associated_tree_config: &account_compression::AddressMerkleTreeConfig,
expected_queue_type: QueueType,
expected_index: u64,
expected_program_owner: Option<Pubkey>,
expected_forester: Option<Pubkey>,
payer_pubkey: &Pubkey,
) {
assert_address_queue(
rpc,
queue_pubkey,
queue_config,
associated_merkle_tree_pubkey,
associated_tree_config,
expected_queue_type,
expected_index,
expected_program_owner,
expected_forester,
None,
None,
payer_pubkey,
)
.await;
}
#[allow(clippy::too_many_arguments)]
pub async fn assert_nullifier_queue_initialized<R: RpcConnection>(
rpc: &mut R,
queue_pubkey: &Pubkey,
queue_config: &account_compression::NullifierQueueConfig,
associated_merkle_tree_pubkey: &Pubkey,
associated_tree_config: &account_compression::StateMerkleTreeConfig,
expected_queue_type: QueueType,
expected_index: u64,
expected_program_owner: Option<Pubkey>,
expected_forester: Option<Pubkey>,
payer_pubkey: &Pubkey,
) {
let associated_tree_config = account_compression::AddressMerkleTreeConfig {
height: associated_tree_config.height,
changelog_size: associated_tree_config.changelog_size,
// not asserted here
address_changelog_size: 0,
roots_size: associated_tree_config.roots_size,
canopy_depth: associated_tree_config.canopy_depth,
rollover_threshold: associated_tree_config.rollover_threshold,
close_threshold: associated_tree_config.close_threshold,
network_fee: associated_tree_config.network_fee,
};
// The address queue is the only account that collects the rollover fees.
let expected_rollover_fee = 0;
assert_queue(
rpc,
queue_pubkey,
queue_config,
associated_merkle_tree_pubkey,
&associated_tree_config,
expected_rollover_fee,
expected_queue_type,
expected_index,
expected_program_owner,
expected_forester,
None,
None,
payer_pubkey,
)
.await;
}
#[allow(clippy::too_many_arguments)]
pub async fn assert_address_queue<R: RpcConnection>(
rpc: &mut R,
queue_pubkey: &Pubkey,
queue_config: &account_compression::AddressQueueConfig,
associated_merkle_tree_pubkey: &Pubkey,
associated_tree_config: &account_compression::AddressMerkleTreeConfig,
expected_queue_type: QueueType,
expected_index: u64,
expected_program_owner: Option<Pubkey>,
expected_forester: Option<Pubkey>,
expected_rolledover_slot: Option<u64>,
expected_next_queue: Option<Pubkey>,
payer_pubkey: &Pubkey,
) {
let balance_merkle_tree = rpc
.get_account(*associated_merkle_tree_pubkey)
.await
.unwrap()
.unwrap()
.lamports;
let balance_queue = rpc
.get_account(*queue_pubkey)
.await
.unwrap()
.unwrap()
.lamports;
// The address queue is the only account that collects the rollover fees.
let expected_rollover_fee = match associated_tree_config.rollover_threshold {
Some(threshold) => {
compute_rollover_fee(threshold, associated_tree_config.height, balance_queue).unwrap()
+ compute_rollover_fee(
threshold,
associated_tree_config.height,
balance_merkle_tree,
)
.unwrap()
}
None => 0,
};
assert_queue(
rpc,
queue_pubkey,
queue_config,
associated_merkle_tree_pubkey,
associated_tree_config,
expected_rollover_fee,
expected_queue_type,
expected_index,
expected_program_owner,
expected_forester,
expected_rolledover_slot,
expected_next_queue,
payer_pubkey,
)
.await;
}
#[allow(clippy::too_many_arguments)]
pub async fn assert_queue<R: RpcConnection>(
rpc: &mut R,
queue_pubkey: &Pubkey,
queue_config: &account_compression::AddressQueueConfig,
associated_merkle_tree_pubkey: &Pubkey,
associated_tree_config: &account_compression::AddressMerkleTreeConfig,
expected_rollover_fee: u64,
expected_queue_type: QueueType,
expected_index: u64,
expected_program_owner: Option<Pubkey>,
expected_forester: Option<Pubkey>,
expected_rolledover_slot: Option<u64>,
expected_next_queue: Option<Pubkey>,
payer_pubkey: &Pubkey,
) {
let queue = AccountZeroCopy::<account_compression::QueueAccount>::new(rpc, *queue_pubkey).await;
let queue_account = queue.deserialized();
let expected_rollover_meta_data = RolloverMetadata {
index: expected_index,
rolledover_slot: expected_rolledover_slot.unwrap_or(u64::MAX),
rollover_threshold: associated_tree_config
.rollover_threshold
.unwrap_or_default(),
network_fee: queue_config.network_fee.unwrap_or_default(),
rollover_fee: expected_rollover_fee,
close_threshold: associated_tree_config.close_threshold.unwrap_or(u64::MAX),
additional_bytes: 0,
};
let expected_access_meta_data = account_compression::AccessMetadata {
owner: *payer_pubkey,
program_owner: expected_program_owner.unwrap_or_default(),
forester: expected_forester.unwrap_or_default(),
};
let expected_queue_meta_data = QueueMetadata {
access_metadata: expected_access_meta_data,
rollover_metadata: expected_rollover_meta_data,
associated_merkle_tree: *associated_merkle_tree_pubkey,
next_queue: expected_next_queue.unwrap_or_default(),
queue_type: expected_queue_type as u64,
};
assert_eq!(queue_account.metadata, expected_queue_meta_data);
let queue = unsafe { get_hash_set::<QueueAccount, R>(rpc, *queue_pubkey).await };
assert_eq!(queue.get_capacity(), queue_config.capacity as usize);
assert_eq!(
queue.sequence_threshold,
queue_config.sequence_threshold as usize
);
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-utils
|
solana_public_repos/Lightprotocol/light-protocol/test-utils/src/assert_token_tx.rs
|
use crate::assert_compressed_tx::{
assert_merkle_tree_after_tx, assert_nullifiers_exist_in_hash_sets,
assert_public_transaction_event, MerkleTreeTestSnapShot,
};
use anchor_lang::AnchorSerialize;
use forester_utils::indexer::{Indexer, TokenDataWithContext};
use light_client::rpc::RpcConnection;
use light_compressed_token::{
get_token_pool_pda,
process_transfer::{get_cpi_authority_pda, TokenTransferOutputData},
};
use light_system_program::sdk::{
compressed_account::CompressedAccountWithMerkleContext, event::PublicTransactionEvent,
};
use solana_sdk::{program_pack::Pack, pubkey::Pubkey};
/// General token tx assert:
/// 1. outputs created
/// 2. inputs nullified
/// 3. Public Transaction event emitted correctly
/// 4. Merkle tree was updated correctly
/// 5. TODO: Fees have been paid (after fee refactor)
/// 6. Check compression amount was transferred (outside of this function)
/// No addresses in token transactions
#[allow(clippy::too_many_arguments)]
pub async fn assert_transfer<R: RpcConnection, I: Indexer<R>>(
context: &mut R,
test_indexer: &mut I,
out_compressed_accounts: &[TokenTransferOutputData],
created_output_compressed_accounts: &[CompressedAccountWithMerkleContext],
lamports: Option<Vec<Option<u64>>>,
input_compressed_account_hashes: &[[u8; 32]],
output_merkle_tree_snapshots: &[MerkleTreeTestSnapShot],
input_merkle_tree_test_snapshots: &[MerkleTreeTestSnapShot],
event: &PublicTransactionEvent,
delegates: Option<Vec<Option<Pubkey>>>,
) {
// CHECK 1
assert_compressed_token_accounts(
test_indexer,
out_compressed_accounts,
lamports,
output_merkle_tree_snapshots,
delegates,
);
// CHECK 2
assert_nullifiers_exist_in_hash_sets(
context,
input_merkle_tree_test_snapshots,
input_compressed_account_hashes,
)
.await;
let vec;
let input_compressed_account_hashes = if input_compressed_account_hashes.is_empty() {
None
} else {
vec = input_compressed_account_hashes.to_vec();
Some(&vec)
};
// CHECK 4
let sequence_numbers =
assert_merkle_tree_after_tx(context, output_merkle_tree_snapshots, test_indexer).await;
// CHECK 3
assert_public_transaction_event(
event,
input_compressed_account_hashes,
output_merkle_tree_snapshots
.iter()
.map(|x| x.accounts)
.collect::<Vec<_>>()
.as_slice(),
&created_output_compressed_accounts
.iter()
.map(|x| x.merkle_context.leaf_index)
.collect::<Vec<_>>(),
None,
false,
None,
sequence_numbers,
);
}
pub fn assert_compressed_token_accounts<R: RpcConnection, I: Indexer<R>>(
test_indexer: &mut I,
out_compressed_accounts: &[TokenTransferOutputData],
lamports: Option<Vec<Option<u64>>>,
output_merkle_tree_snapshots: &[MerkleTreeTestSnapShot],
delegates: Option<Vec<Option<Pubkey>>>,
) {
let delegates = delegates.unwrap_or(vec![None; out_compressed_accounts.len()]);
let mut tree = Pubkey::default();
let mut index = 0;
let output_lamports = lamports.unwrap_or(vec![None; out_compressed_accounts.len()]);
println!("out_compressed_accounts {:?}", out_compressed_accounts);
for (i, out_compressed_account) in out_compressed_accounts.iter().enumerate() {
if output_merkle_tree_snapshots[i].accounts.merkle_tree != tree {
tree = output_merkle_tree_snapshots[i].accounts.merkle_tree;
index = 0;
} else {
index += 1;
}
let pos = test_indexer
.get_token_compressed_accounts()
.iter()
.position(|x| {
x.token_data.owner == out_compressed_account.owner
&& x.token_data.amount == out_compressed_account.amount
&& x.token_data.delegate == delegates[i]
})
.expect("transfer recipient compressed account not found in mock indexer");
let transfer_recipient_token_compressed_account =
test_indexer.get_token_compressed_accounts()[pos].clone();
assert_eq!(
transfer_recipient_token_compressed_account
.token_data
.amount,
out_compressed_account.amount
);
assert_eq!(
transfer_recipient_token_compressed_account.token_data.owner,
out_compressed_account.owner
);
assert_eq!(
transfer_recipient_token_compressed_account
.token_data
.delegate,
delegates[i]
);
let transfer_recipient_compressed_account = transfer_recipient_token_compressed_account
.compressed_account
.clone();
println!(
"transfer_recipient_compressed_account {:?}",
transfer_recipient_compressed_account
);
if i < output_lamports.len() {
assert_eq!(
transfer_recipient_compressed_account
.compressed_account
.lamports,
output_lamports[i].unwrap_or(0)
);
} else if i != output_lamports.len() {
// This check accounts for change accounts which are dynamically created onchain.
panic!("lamports not found in output_lamports");
}
assert!(transfer_recipient_compressed_account
.compressed_account
.data
.is_some());
let mut data = Vec::new();
transfer_recipient_token_compressed_account
.token_data
.serialize(&mut data)
.unwrap();
assert_eq!(
transfer_recipient_compressed_account
.compressed_account
.data
.as_ref()
.unwrap()
.data,
data
);
assert_eq!(
transfer_recipient_compressed_account
.compressed_account
.owner,
light_compressed_token::ID
);
if !test_indexer
.get_token_compressed_accounts()
.iter()
.any(|x| {
x.compressed_account.merkle_context.leaf_index as usize
== output_merkle_tree_snapshots[i].next_index + index
})
{
println!(
"token_compressed_accounts {:?}",
test_indexer.get_token_compressed_accounts()
);
println!("snapshot {:?}", output_merkle_tree_snapshots[i]);
println!("index {:?}", index);
panic!("transfer recipient compressed account not found in mock indexer");
};
}
}
#[allow(clippy::too_many_arguments)]
pub async fn assert_mint_to<'a, R: RpcConnection, I: Indexer<R>>(
rpc: &mut R,
test_indexer: &'a mut I,
recipients: &[Pubkey],
mint: Pubkey,
amounts: &[u64],
snapshots: &[MerkleTreeTestSnapShot],
created_token_accounts: &[TokenDataWithContext],
previous_mint_supply: u64,
previous_sol_pool_amount: u64,
) {
let mut created_token_accounts = created_token_accounts.to_vec();
for (recipient, amount) in recipients.iter().zip(amounts) {
let pos = created_token_accounts
.iter()
.position(|x| {
x.token_data.owner == *recipient
&& x.token_data.amount == *amount
&& x.token_data.mint == mint
&& x.token_data.delegate.is_none()
})
.expect("Mint to failed to create expected compressed token account.");
created_token_accounts.remove(pos);
}
assert_merkle_tree_after_tx(rpc, snapshots, test_indexer).await;
let mint_account: spl_token::state::Mint =
spl_token::state::Mint::unpack(&rpc.get_account(mint).await.unwrap().unwrap().data)
.unwrap();
let sum_amounts = amounts.iter().sum::<u64>();
assert_eq!(mint_account.supply, previous_mint_supply + sum_amounts);
let pool = get_token_pool_pda(&mint);
let pool_account =
spl_token::state::Account::unpack(&rpc.get_account(pool).await.unwrap().unwrap().data)
.unwrap();
assert_eq!(pool_account.amount, previous_sol_pool_amount + sum_amounts);
}
pub async fn assert_create_mint<R: RpcConnection>(
context: &mut R,
authority: &Pubkey,
mint: &Pubkey,
pool: &Pubkey,
) {
let mint_account: spl_token::state::Mint =
spl_token::state::Mint::unpack(&context.get_account(*mint).await.unwrap().unwrap().data)
.unwrap();
assert_eq!(mint_account.supply, 0);
assert_eq!(mint_account.decimals, 2);
assert_eq!(mint_account.mint_authority.unwrap(), *authority);
assert_eq!(mint_account.freeze_authority, Some(*authority).into());
assert!(mint_account.is_initialized);
let mint_account: spl_token::state::Account =
spl_token::state::Account::unpack(&context.get_account(*pool).await.unwrap().unwrap().data)
.unwrap();
assert_eq!(mint_account.amount, 0);
assert_eq!(mint_account.delegate, None.into());
assert_eq!(mint_account.mint, *mint);
assert_eq!(mint_account.owner, get_cpi_authority_pda().0);
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-utils
|
solana_public_repos/Lightprotocol/light-protocol/test-utils/src/e2e_test_env.rs
|
// Flow:
// init indexer
// init first keypair
// init crank
// vec of public Merkle tree NF queue pairs
// vec of public address Mt and queue pairs
// for i in rounds
// randomly add new keypair
// for every keypair randomly select whether it does an action
// Architecture:
// - bundle trees, indexer etc in a E2ETestEnv struct
// - methods:
// // bundles all general actions
// - activate general actions
// // bundles all keypair actions
// - activate keypair actions
// // calls general and keypair actions
// - execute round
// // every action takes a probability as input
// // if you want to execute the action on purpose pass 1
// - method for every action
// - add action activation config with default configs
// - all enabled
// - only spl, only sol, etc
// Forester struct
// - payer keypair, authority keypair
// -methods
// - empty nullifier queue
// - empty address queue
// - rollover Merkle tree
// - rollover address Merkle tree
// keypair actions:
// safeguard every action in case of no balance
// 1. compress sol
// 2. decompress sol
// 2. transfer sol
// 3. compress spl
// 4. decompress spl
// 5. mint spl
// 6. transfer spl
// general actions:
// add keypair
// create new state Mt
// create new address Mt
// extension:
// keypair actions:
// - create pda
// - escrow tokens
// - delegate, revoke, delegated transaction
// general actions:
// - create new program owned state Merkle tree and queue
// - create new program owned address Merkle tree and queue
// minimal start
// struct with env and test-indexer
// only spl transactions
// second pr
// refactor sol tests to functions that can be reused
// TODO: implement traits for context object and indexer that we can implement with an rpc as well
// context trait: send_transaction -> return transaction result, get_account_info -> return account info
// indexer trait: get_compressed_accounts_by_owner -> return compressed accounts,
// refactor all tests to work with that so that we can run all tests with a test validator and concurrency
use light_compressed_token::token_data::AccountState;
use light_prover_client::gnark::helpers::{ProofType, ProverConfig};
use light_registry::protocol_config::state::{ProtocolConfig, ProtocolConfigPda};
use light_registry::sdk::create_finalize_registration_instruction;
use light_registry::utils::get_protocol_config_pda_address;
use light_registry::ForesterConfig;
use log::info;
use num_bigint::{BigUint, RandBigInt};
use num_traits::Num;
use rand::distributions::uniform::{SampleRange, SampleUniform};
use rand::prelude::SliceRandom;
use rand::rngs::{StdRng, ThreadRng};
use rand::{Rng, RngCore, SeedableRng};
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::Keypair;
use solana_sdk::signature::Signature;
use solana_sdk::signer::{SeedDerivable, Signer};
use spl_token::solana_program::native_token::LAMPORTS_PER_SOL;
use crate::address_tree_rollover::{
assert_rolled_over_address_merkle_tree_and_queue,
perform_address_merkle_tree_roll_over_forester, perform_state_merkle_tree_roll_over_forester,
};
use crate::assert_epoch::{
assert_finalized_epoch_registration, assert_report_work, fetch_epoch_and_forester_pdas,
};
use crate::spl::{
approve_test, burn_test, compress_test, compressed_transfer_test, create_mint_helper,
create_token_account, decompress_test, freeze_test, mint_tokens_helper, revoke_test, thaw_test,
};
use crate::state_tree_rollover::assert_rolled_over_pair;
use crate::system_program::{
compress_sol_test, create_addresses_test, decompress_sol_test, transfer_compressed_sol_test,
};
use crate::test_forester::{empty_address_queue_test, nullify_compressed_accounts};
use account_compression::utils::constants::{
STATE_MERKLE_TREE_CANOPY_DEPTH, STATE_MERKLE_TREE_HEIGHT,
};
use account_compression::{
AddressMerkleTreeConfig, AddressQueueConfig, NullifierQueueConfig, StateMerkleTreeConfig,
SAFETY_MARGIN,
};
use forester_utils::address_merkle_tree_config::{
address_tree_ready_for_rollover, state_tree_ready_for_rollover,
};
use forester_utils::forester_epoch::{Epoch, Forester, TreeAccounts, TreeType};
use forester_utils::indexer::{
AddressMerkleTreeAccounts, AddressMerkleTreeBundle, Indexer, StateMerkleTreeAccounts,
StateMerkleTreeBundle, TokenDataWithContext,
};
use forester_utils::registry::register_test_forester;
use forester_utils::{airdrop_lamports, AccountZeroCopy};
use light_hasher::Poseidon;
use light_indexed_merkle_tree::HIGHEST_ADDRESS_PLUS_ONE;
use light_indexed_merkle_tree::{array::IndexedArray, reference::IndexedMerkleTree};
use light_system_program::sdk::compressed_account::CompressedAccountWithMerkleContext;
use light_utils::bigint::bigint_to_be_bytes_array;
use light_utils::rand::gen_prime;
use crate::create_address_merkle_tree_and_queue_account_with_assert;
use crate::indexer::TestIndexer;
use light_client::rpc::errors::RpcError;
use light_client::rpc::RpcConnection;
use light_client::transaction_params::{FeeConfig, TransactionParams};
use light_program_test::test_env::{create_state_merkle_tree_and_queue_account, EnvAccounts};
use light_program_test::test_rpc::ProgramTestRpcConnection;
use light_prover_client::gnark::helpers::ProverMode;
pub struct User {
pub keypair: Keypair,
// Vector of (mint, token account)
pub token_accounts: Vec<(Pubkey, Pubkey)>,
}
#[derive(Debug, Default)]
pub struct Stats {
pub spl_transfers: u64,
pub mints: u64,
pub spl_decompress: u64,
pub spl_compress: u64,
pub sol_transfers: u64,
pub sol_decompress: u64,
pub sol_compress: u64,
pub create_address: u64,
pub create_pda: u64,
pub create_state_mt: u64,
pub create_address_mt: u64,
pub rolledover_state_trees: u64,
pub rolledover_address_trees: u64,
pub spl_approved: u64,
pub spl_revoked: u64,
pub spl_burned: u64,
pub spl_frozen: u64,
pub spl_thawed: u64,
pub registered_foresters: u64,
pub created_foresters: u64,
pub work_reported: u64,
pub finalized_registrations: u64,
}
impl Stats {
pub fn print(&self, users: u64) {
println!("Stats:");
println!("Users {}", users);
println!("Mints {}", self.mints);
println!("Spl transfers {}", self.spl_transfers);
println!("Spl decompress {}", self.spl_decompress);
println!("Spl compress {}", self.spl_compress);
println!("Sol transfers {}", self.sol_transfers);
println!("Sol decompress {}", self.sol_decompress);
println!("Sol compress {}", self.sol_compress);
println!("Create address {}", self.create_address);
println!("Create pda {}", self.create_pda);
println!("Create state mt {}", self.create_state_mt);
println!("Create address mt {}", self.create_address_mt);
println!("Rolled over state trees {}", self.rolledover_state_trees);
println!(
"Rolled over address trees {}",
self.rolledover_address_trees
);
println!("Spl approved {}", self.spl_approved);
println!("Spl revoked {}", self.spl_revoked);
println!("Spl burned {}", self.spl_burned);
println!("Spl frozen {}", self.spl_frozen);
println!("Spl thawed {}", self.spl_thawed);
println!("Registered foresters {}", self.registered_foresters);
println!("Created foresters {}", self.created_foresters);
println!("Work reported {}", self.work_reported);
println!("Finalized registrations {}", self.finalized_registrations);
}
}
pub async fn init_program_test_env(
rpc: ProgramTestRpcConnection,
env_accounts: &EnvAccounts,
) -> E2ETestEnv<ProgramTestRpcConnection, TestIndexer<ProgramTestRpcConnection>> {
let indexer: TestIndexer<ProgramTestRpcConnection> = TestIndexer::init_from_env(
&env_accounts.forester.insecure_clone(),
env_accounts,
Some(ProverConfig {
run_mode: Some(ProverMode::Rpc),
circuits: vec![],
}),
)
.await;
E2ETestEnv::<ProgramTestRpcConnection, TestIndexer<ProgramTestRpcConnection>>::new(
rpc,
indexer,
env_accounts,
KeypairActionConfig::all_default(),
GeneralActionConfig::default(),
10,
None,
)
.await
}
#[derive(Debug, PartialEq)]
pub struct TestForester {
keypair: Keypair,
forester: Forester,
is_registered: Option<u64>,
}
pub struct E2ETestEnv<R: RpcConnection, I: Indexer<R>> {
pub payer: Keypair,
pub governance_keypair: Keypair,
pub indexer: I,
pub users: Vec<User>,
pub mints: Vec<Pubkey>,
pub foresters: Vec<TestForester>,
pub rpc: R,
pub keypair_action_config: KeypairActionConfig,
pub general_action_config: GeneralActionConfig,
pub round: u64,
pub rounds: u64,
pub rng: StdRng,
pub stats: Stats,
pub epoch: u64,
pub slot: u64,
/// Forester struct is reused but not used for foresting here
/// Epoch config keeps track of the ongong epochs.
pub epoch_config: Forester,
pub protocol_config: ProtocolConfig,
pub registration_epoch: u64,
}
impl<R: RpcConnection, I: Indexer<R>> E2ETestEnv<R, I>
where
R: RpcConnection,
I: Indexer<R>,
{
pub async fn new(
mut rpc: R,
mut indexer: I,
env_accounts: &EnvAccounts,
keypair_action_config: KeypairActionConfig,
general_action_config: GeneralActionConfig,
rounds: u64,
seed: Option<u64>,
) -> Self {
let payer = rpc.get_payer().insecure_clone();
airdrop_lamports(&mut rpc, &payer.pubkey(), 1_000_000_000_000)
.await
.unwrap();
airdrop_lamports(&mut rpc, &env_accounts.forester.pubkey(), 1_000_000_000_000)
.await
.unwrap();
let mut thread_rng = ThreadRng::default();
let random_seed = thread_rng.next_u64();
let seed: u64 = seed.unwrap_or(random_seed);
// Keep this print so that in case the test fails
// we can use the seed to reproduce the error.
println!("\n\ne2e test seed {}\n\n", seed);
let mut rng = StdRng::seed_from_u64(seed);
let user = Self::create_user(&mut rng, &mut rpc).await;
let mint = create_mint_helper(&mut rpc, &payer).await;
mint_tokens_helper(
&mut rpc,
&mut indexer,
&env_accounts.merkle_tree_pubkey,
&payer,
&mint,
vec![100_000_000; 1],
vec![user.keypair.pubkey()],
)
.await;
let protocol_config_pda_address = get_protocol_config_pda_address().0;
println!("here");
let protocol_config = rpc
.get_anchor_account::<ProtocolConfigPda>(&protocol_config_pda_address)
.await
.unwrap()
.unwrap()
.config;
// TODO: add clear test env enum
// register foresters is only compatible with ProgramTest environment
let (foresters, epoch_config) =
if let Some(registered_epoch) = env_accounts.forester_epoch.as_ref() {
let _forester = Forester {
registration: registered_epoch.clone(),
active: registered_epoch.clone(),
..Default::default()
};
// Forester epoch account is assumed to exist (is inited with test program deployment)
let forester = TestForester {
keypair: env_accounts.forester.insecure_clone(),
forester: _forester.clone(),
is_registered: Some(0),
};
(vec![forester], _forester)
} else {
(Vec::<TestForester>::new(), Forester::default())
};
Self {
payer,
indexer,
users: vec![user],
rpc,
keypair_action_config,
general_action_config,
round: 0,
rounds,
rng,
mints: vec![],
stats: Stats::default(),
foresters,
registration_epoch: 0,
epoch: 0,
slot: 0,
epoch_config,
protocol_config,
governance_keypair: env_accounts.governance_authority.insecure_clone(),
}
}
/// Creates a new user with a random keypair and 100 sol
pub async fn create_user(rng: &mut StdRng, rpc: &mut R) -> User {
let keypair: Keypair = Keypair::from_seed(&[rng.gen_range(0..255); 32]).unwrap();
rpc.airdrop_lamports(&keypair.pubkey(), LAMPORTS_PER_SOL * 5000)
.await
.unwrap();
User {
keypair,
token_accounts: vec![],
}
}
pub async fn get_balance(&mut self, pubkey: &Pubkey) -> u64 {
self.rpc.get_balance(pubkey).await.unwrap()
}
pub async fn execute_rounds(&mut self) {
for _ in 0..=self.rounds {
self.execute_round().await;
}
}
pub async fn execute_round(&mut self) {
println!("\n------------------------------------------------------\n");
println!("Round: {}", self.round);
self.stats.print(self.users.len() as u64);
// TODO: check at the beginning of the round that the Merkle trees are in sync
let len = self.users.len();
for i in 0..len {
self.activate_keypair_actions(&self.users[i].keypair.pubkey())
.await;
}
self.activate_general_actions().await;
self.round += 1;
}
/// 1. Add a new keypair
/// 2. Create a new state Merkle tree
pub async fn activate_general_actions(&mut self) {
// If we want to test rollovers we set the threshold to 0 for all newly created trees
let rollover_threshold = if self.general_action_config.rollover.is_some() {
Some(0)
} else {
None
};
if self
.rng
.gen_bool(self.general_action_config.add_keypair.unwrap_or_default())
{
let user = Self::create_user(&mut self.rng, &mut self.rpc).await;
self.users.push(user);
}
if self.rng.gen_bool(
self.general_action_config
.create_state_mt
.unwrap_or_default(),
) {
self.create_state_tree(rollover_threshold).await;
self.stats.create_state_mt += 1;
}
if self.rng.gen_bool(
self.general_action_config
.create_address_mt
.unwrap_or_default(),
) {
self.create_address_tree(rollover_threshold).await;
self.stats.create_address_mt += 1;
}
if self.rng.gen_bool(
self.general_action_config
.nullify_compressed_accounts
.unwrap_or_default(),
) {
for state_tree_bundle in self.indexer.get_state_merkle_trees_mut().iter_mut() {
println!("\n --------------------------------------------------\n\t\t NULLIFYING LEAVES\n --------------------------------------------------");
// find forester which is eligible this slot for this tree
if let Some(payer) = Self::get_eligible_forester_for_queue(
&state_tree_bundle.accounts.nullifier_queue,
&self.foresters,
self.slot,
) {
// TODO: add newly addeded trees to foresters
nullify_compressed_accounts(
&mut self.rpc,
&payer,
state_tree_bundle,
self.epoch,
false,
)
.await
.unwrap();
} else {
println!("No forester found for nullifier queue");
};
}
}
if self.rng.gen_bool(
self.general_action_config
.empty_address_queue
.unwrap_or_default(),
) {
for address_merkle_tree_bundle in self.indexer.get_address_merkle_trees_mut().iter_mut()
{
// find forester which is eligible this slot for this tree
if let Some(payer) = Self::get_eligible_forester_for_queue(
&address_merkle_tree_bundle.accounts.queue,
&self.foresters,
self.slot,
) {
println!("\n --------------------------------------------------\n\t\t Empty Address Queue\n --------------------------------------------------");
println!("epoch {}", self.epoch);
println!("forester {}", payer.pubkey());
// TODO: add newly addeded trees to foresters
empty_address_queue_test(
&payer,
&mut self.rpc,
address_merkle_tree_bundle,
false,
self.epoch,
false,
)
.await
.unwrap();
} else {
println!("No forester found for address queue");
};
}
}
for index in 0..self.indexer.get_state_merkle_trees().len() {
let is_read_for_rollover = state_tree_ready_for_rollover(
&mut self.rpc,
self.indexer.get_state_merkle_trees()[index]
.accounts
.merkle_tree,
)
.await;
if self
.rng
.gen_bool(self.general_action_config.rollover.unwrap_or_default())
&& is_read_for_rollover
{
println!("\n --------------------------------------------------\n\t\t Rollover State Merkle Tree\n --------------------------------------------------");
// find forester which is eligible this slot for this tree
if let Some(payer) = Self::get_eligible_forester_for_queue(
&self.indexer.get_state_merkle_trees()[index]
.accounts
.nullifier_queue,
&self.foresters,
self.slot,
) {
self.rollover_state_merkle_tree_and_queue(index, &payer, self.epoch)
.await
.unwrap();
self.stats.rolledover_state_trees += 1;
}
}
}
for index in 0..self.indexer.get_address_merkle_trees().len() {
let is_read_for_rollover = address_tree_ready_for_rollover(
&mut self.rpc,
self.indexer.get_address_merkle_trees()[index]
.accounts
.merkle_tree,
)
.await;
if self
.rng
.gen_bool(self.general_action_config.rollover.unwrap_or_default())
&& is_read_for_rollover
{
// find forester which is eligible this slot for this tree
if let Some(payer) = Self::get_eligible_forester_for_queue(
&self.indexer.get_address_merkle_trees()[index]
.accounts
.queue,
&self.foresters,
self.slot,
) {
println!("\n --------------------------------------------------\n\t\t Rollover Address Merkle Tree\n --------------------------------------------------");
self.rollover_address_merkle_tree_and_queue(index, &payer, self.epoch)
.await
.unwrap();
self.stats.rolledover_address_trees += 1;
}
}
}
if self
.rng
.gen_bool(self.general_action_config.add_forester.unwrap_or_default())
{
println!("\n --------------------------------------------------\n\t\t Add Forester\n --------------------------------------------------");
let forester = TestForester {
keypair: Keypair::new(),
forester: Forester::default(),
is_registered: None,
};
let forester_config = ForesterConfig {
fee: self.rng.gen_range(0..=100),
};
register_test_forester(
&mut self.rpc,
&self.governance_keypair,
&forester.keypair.pubkey(),
forester_config,
)
.await
.unwrap();
self.foresters.push(forester);
self.stats.created_foresters += 1;
}
// advance to next light slot and perform forester epoch actions
if !self.general_action_config.disable_epochs {
println!("\n --------------------------------------------------\n\t\t Start Epoch Actions \n --------------------------------------------------");
let current_solana_slot = self.rpc.get_slot().await.unwrap();
let current_light_slot = self
.protocol_config
.get_current_active_epoch_progress(current_solana_slot)
/ self.protocol_config.slot_length;
// If slot didn't change, advance to next slot
// if current_light_slot != self.slot {
let new_slot = current_solana_slot + self.protocol_config.slot_length;
println!("advanced slot from {} to {}", self.slot, current_light_slot);
println!("solana slot from {} to {}", current_solana_slot, new_slot);
self.rpc.warp_to_slot(new_slot).await.unwrap();
self.slot = current_light_slot + 1;
let current_solana_slot = self.rpc.get_slot().await.unwrap();
// need to detect whether new registration phase started
let current_registration_epoch = self
.protocol_config
.get_latest_register_epoch(current_solana_slot)
.unwrap();
// If reached new registration phase register all foresters
if current_registration_epoch != self.registration_epoch {
println!("\n --------------------------------------------------\n\t\t Register Foresters for new Epoch \n --------------------------------------------------");
self.registration_epoch = current_registration_epoch;
println!("new register epoch {}", self.registration_epoch);
println!("num foresters {}", self.foresters.len());
for forester in self.foresters.iter_mut() {
println!(
"registered forester {} for epoch {}",
forester.keypair.pubkey(),
self.registration_epoch
);
let registered_epoch = Epoch::register(
&mut self.rpc,
&self.protocol_config,
&forester.keypair,
&forester.keypair.pubkey(),
)
.await
.unwrap()
.unwrap();
println!("registered_epoch {:?}", registered_epoch.phases);
forester.forester.registration = registered_epoch;
if forester.is_registered.is_none() {
forester.is_registered = Some(self.registration_epoch);
}
self.stats.registered_foresters += 1;
}
}
let current_active_epoch = self
.protocol_config
.get_current_active_epoch(current_solana_slot)
.unwrap();
// If reached new active epoch
// 1. move epoch in every forester to report work epoch
// 2. report work for every forester
// 3. finalize registration for every forester
#[allow(clippy::comparison_chain)]
if current_active_epoch > self.epoch {
self.slot = current_light_slot;
self.epoch = current_active_epoch;
// 1. move epoch in every forester to report work epoch
for forester in self.foresters.iter_mut() {
if forester.is_registered.is_none() {
continue;
}
forester.forester.switch_to_report_work();
}
println!("\n --------------------------------------------------\n\t\t Report Work \n --------------------------------------------------");
// 2. report work for every forester
for forester in self.foresters.iter_mut() {
if forester.is_registered.is_none() {
continue;
}
println!("report work for forester {}", forester.keypair.pubkey());
println!(
"forester.forester.report_work.forester_epoch_pda {}",
forester.forester.report_work.forester_epoch_pda
);
println!(
"forester.forester.report_work.epoch_pda {}",
forester.forester.report_work.epoch_pda
);
let (pre_forester_epoch_pda, pre_epoch_pda) = fetch_epoch_and_forester_pdas(
&mut self.rpc,
&forester.forester.report_work.forester_epoch_pda,
&forester.forester.report_work.epoch_pda,
)
.await;
forester
.forester
.report_work(&mut self.rpc, &forester.keypair, &forester.keypair.pubkey())
.await
.unwrap();
println!("reported work");
assert_report_work(
&mut self.rpc,
&forester.forester.report_work.forester_epoch_pda,
&forester.forester.report_work.epoch_pda,
pre_forester_epoch_pda,
pre_epoch_pda,
)
.await;
self.stats.work_reported += 1;
}
// 3. finalize registration for every forester
println!("\n --------------------------------------------------\n\t\t Finalize Registration \n --------------------------------------------------");
// 3.1 get tree accounts
// TODO: use TreeAccounts in TestIndexer
let mut tree_accounts = self
.indexer
.get_state_merkle_trees()
.iter()
.map(|state_merkle_tree_bundle| TreeAccounts {
tree_type: TreeType::State,
merkle_tree: state_merkle_tree_bundle.accounts.merkle_tree,
queue: state_merkle_tree_bundle.accounts.nullifier_queue,
is_rolledover: false,
})
.collect::<Vec<TreeAccounts>>();
self.indexer.get_address_merkle_trees().iter().for_each(
|address_merkle_tree_bundle| {
tree_accounts.push(TreeAccounts {
tree_type: TreeType::Address,
merkle_tree: address_merkle_tree_bundle.accounts.merkle_tree,
queue: address_merkle_tree_bundle.accounts.queue,
is_rolledover: false,
});
},
);
// 3.2 finalize registration for every forester
for forester in self.foresters.iter_mut() {
if forester.is_registered.is_none() {
continue;
}
println!(
"registered forester {} for epoch {}",
forester.keypair.pubkey(),
self.epoch
);
println!(
"forester.forester registration epoch {:?}",
forester.forester.registration.epoch
);
println!(
"forester.forester active epoch {:?}",
forester.forester.active.epoch
);
println!(
"forester.forester report_work epoch {:?}",
forester.forester.report_work.epoch
);
forester
.forester
.active
.fetch_account_and_add_trees_with_schedule(&mut self.rpc, &tree_accounts)
.await
.unwrap();
let ix = create_finalize_registration_instruction(
&forester.keypair.pubkey(),
&forester.keypair.pubkey(),
forester.forester.active.epoch,
);
self.rpc
.create_and_send_transaction(
&[ix],
&forester.keypair.pubkey(),
&[&forester.keypair],
)
.await
.unwrap();
assert_finalized_epoch_registration(
&mut self.rpc,
&forester.forester.active.forester_epoch_pda,
&forester.forester.active.epoch_pda,
)
.await;
self.stats.finalized_registrations += 1;
}
} else if current_active_epoch < self.epoch {
panic!(
"current_active_epoch {} is less than self.epoch {}",
current_active_epoch, self.epoch
);
}
}
}
pub async fn create_state_tree(&mut self, rollover_threshold: Option<u64>) {
let merkle_tree_keypair = Keypair::new(); //from_seed(&[self.rng.gen_range(0..255); 32]).unwrap();
let nullifier_queue_keypair = Keypair::new(); //from_seed(&[self.rng.gen_range(0..255); 32]).unwrap();
let cpi_context_keypair = Keypair::new();
let rollover_threshold = if let Some(rollover_threshold) = rollover_threshold {
Some(rollover_threshold)
} else if self.rng.gen_bool(0.5) && !self.keypair_action_config.fee_assert {
Some(self.rng.gen_range(1..100))
} else {
None
};
let merkle_tree_config = if !self.keypair_action_config.fee_assert {
StateMerkleTreeConfig {
height: 26,
changelog_size: self.rng.gen_range(1..5000),
roots_size: self.rng.gen_range(1..10000),
canopy_depth: 10,
network_fee: Some(5000),
close_threshold: None,
rollover_threshold,
}
} else {
StateMerkleTreeConfig::default()
};
println!("merkle tree config: {:?}", merkle_tree_config);
let queue_config = if !self.keypair_action_config.fee_assert {
let capacity: u32 = gen_prime(&mut self.rng, 1..10000).unwrap();
NullifierQueueConfig {
capacity: capacity as u16,
sequence_threshold: merkle_tree_config.roots_size + SAFETY_MARGIN,
network_fee: None,
}
} else if rollover_threshold.is_some() {
panic!("rollover_threshold should not be set when fee_assert is set (keypair_action_config.fee_assert)");
} else {
NullifierQueueConfig::default()
};
let forester = Pubkey::new_unique();
println!("queue config: {:?}", queue_config);
create_state_merkle_tree_and_queue_account(
&self.payer,
true,
&mut self.rpc,
&merkle_tree_keypair,
&nullifier_queue_keypair,
Some(&cpi_context_keypair),
None,
Some(forester),
1,
&merkle_tree_config,
&queue_config,
)
.await
.unwrap();
let merkle_tree = Box::new(light_merkle_tree_reference::MerkleTree::<Poseidon>::new(
STATE_MERKLE_TREE_HEIGHT as usize,
STATE_MERKLE_TREE_CANOPY_DEPTH as usize,
));
let state_tree_account =
AccountZeroCopy::<account_compression::StateMerkleTreeAccount>::new(
&mut self.rpc,
nullifier_queue_keypair.pubkey(),
)
.await;
self.indexer
.get_state_merkle_trees_mut()
.push(StateMerkleTreeBundle {
rollover_fee: state_tree_account
.deserialized()
.metadata
.rollover_metadata
.rollover_fee as i64,
accounts: StateMerkleTreeAccounts {
merkle_tree: merkle_tree_keypair.pubkey(),
nullifier_queue: nullifier_queue_keypair.pubkey(),
cpi_context: cpi_context_keypair.pubkey(),
},
merkle_tree,
});
// TODO: Add assert
}
pub async fn create_address_tree(&mut self, rollover_threshold: Option<u64>) {
let merkle_tree_keypair = Keypair::new();
let nullifier_queue_keypair = Keypair::new();
let rollover_threshold = if let Some(rollover_threshold) = rollover_threshold {
Some(rollover_threshold)
} else if self.rng.gen_bool(0.5) && !self.keypair_action_config.fee_assert {
Some(self.rng.gen_range(1..100))
} else {
None
};
let (config, address_config) = if !self.keypair_action_config.fee_assert {
let root_history = self.rng.gen_range(1..10000);
(
AddressMerkleTreeConfig {
height: 26,
changelog_size: self.rng.gen_range(1..5000),
roots_size: root_history,
canopy_depth: 10,
address_changelog_size: self.rng.gen_range(1..5000),
rollover_threshold,
network_fee: Some(5000),
close_threshold: None,
// TODO: double check that close threshold cannot be set
},
AddressQueueConfig {
sequence_threshold: root_history + SAFETY_MARGIN,
..Default::default()
},
)
} else if rollover_threshold.is_some() {
panic!("rollover_threshold should not be set when fee_assert is set (keypair_action_config.fee_assert)");
} else {
(
AddressMerkleTreeConfig::default(),
AddressQueueConfig::default(),
)
};
create_address_merkle_tree_and_queue_account_with_assert(
&self.payer,
true,
&mut self.rpc,
&merkle_tree_keypair,
&nullifier_queue_keypair,
None,
None,
&config,
&address_config,
0,
)
.await
.unwrap();
let init_value = BigUint::from_str_radix(HIGHEST_ADDRESS_PLUS_ONE, 10).unwrap();
let mut merkle_tree = Box::new(
IndexedMerkleTree::<Poseidon, usize>::new(
STATE_MERKLE_TREE_HEIGHT as usize,
STATE_MERKLE_TREE_CANOPY_DEPTH as usize,
)
.unwrap(),
);
let mut indexed_array = Box::<IndexedArray<Poseidon, usize>>::default();
merkle_tree.append(&init_value, &mut indexed_array).unwrap();
let queue_account = AccountZeroCopy::<account_compression::QueueAccount>::new(
&mut self.rpc,
nullifier_queue_keypair.pubkey(),
)
.await;
self.indexer
.get_address_merkle_trees_mut()
.push(AddressMerkleTreeBundle {
rollover_fee: queue_account
.deserialized()
.metadata
.rollover_metadata
.rollover_fee as i64,
accounts: AddressMerkleTreeAccounts {
merkle_tree: merkle_tree_keypair.pubkey(),
queue: nullifier_queue_keypair.pubkey(),
},
merkle_tree,
indexed_array,
});
// TODO: Add assert
}
pub fn safe_gen_range<T, RR>(rng: &mut StdRng, range: RR, empty_fallback: T) -> T
where
T: SampleUniform + Copy,
RR: SampleRange<T> + Sized,
{
if range.is_empty() {
return empty_fallback;
}
rng.gen_range(range)
}
/// 1. Transfer spl tokens between random users
pub async fn activate_keypair_actions(&mut self, user: &Pubkey) {
let user_index = self
.users
.iter()
.position(|u| &u.keypair.pubkey() == user)
.unwrap();
// compress spl
// check sufficient spl balance
if self
.rng
.gen_bool(self.keypair_action_config.compress_spl.unwrap_or(0.0))
&& self.users[user_index].token_accounts.is_empty()
// TODO: enable compress spl test
{
self.compress_spl(user_index).await;
}
// decompress spl
// check sufficient compressed spl balance
if self
.rng
.gen_bool(self.keypair_action_config.decompress_spl.unwrap_or(0.0))
{
self.decompress_spl(user_index).await;
}
// transfer spl
// check sufficient compressed spl balance
if self
.rng
.gen_bool(self.keypair_action_config.transfer_spl.unwrap_or(0.0))
{
self.transfer_spl(user_index).await;
}
// create address
if self
.rng
.gen_bool(self.keypair_action_config.create_address.unwrap_or(0.0))
{
self.create_address(None, None).await;
}
// compress sol
// check sufficient sol balance
let balance = self
.rpc
.get_balance(&self.users[user_index].keypair.pubkey())
.await
.unwrap();
if self
.rng
.gen_bool(self.keypair_action_config.compress_sol.unwrap_or(0.0))
&& balance > 1000
{
self.compress_sol(user_index, balance).await;
} else {
println!("Not enough balance to compress sol. Balance: {}", balance);
}
// decompress sol
// check sufficient compressed sol balance
if self
.rng
.gen_bool(self.keypair_action_config.decompress_sol.unwrap_or(0.0))
{
self.decompress_sol(user_index).await;
}
// transfer sol
if self
.rng
.gen_bool(self.keypair_action_config.transfer_sol.unwrap_or(0.0))
{
self.transfer_sol(user_index).await;
}
// approve spl
if self
.rng
.gen_bool(self.keypair_action_config.approve_spl.unwrap_or(0.0))
&& !self.users[user_index].token_accounts.is_empty()
{
self.approve_spl(user_index).await;
}
// revoke spl
if self
.rng
.gen_bool(self.keypair_action_config.revoke_spl.unwrap_or(0.0))
&& !self.users[user_index].token_accounts.is_empty()
{
self.revoke_spl(user_index).await;
}
// burn spl
if self
.rng
.gen_bool(self.keypair_action_config.burn_spl.unwrap_or(0.0))
&& !self.users[user_index].token_accounts.is_empty()
{
self.burn_spl(user_index).await;
}
// freeze spl
if self
.rng
.gen_bool(self.keypair_action_config.freeze_spl.unwrap_or(0.0))
&& !self.users[user_index].token_accounts.is_empty()
{
self.freeze_spl(user_index).await;
}
// thaw spl
if self
.rng
.gen_bool(self.keypair_action_config.thaw_spl.unwrap_or(0.0))
&& !self.users[user_index].token_accounts.is_empty()
{
self.thaw_spl(user_index).await;
}
}
pub fn get_eligible_forester_for_queue(
queue_pubkey: &Pubkey,
foresters: &[TestForester],
light_slot: u64,
) -> Option<Keypair> {
for f in foresters.iter() {
let tree = f
.forester
.active
.merkle_trees
.iter()
.find(|mt| mt.tree_accounts.queue == *queue_pubkey);
if let Some(tree) = tree {
if tree.is_eligible(light_slot) {
return Some(f.keypair.insecure_clone());
}
}
}
None
}
pub async fn transfer_sol_deterministic(
&mut self,
from: &Keypair,
to: &Pubkey,
tree_index: Option<usize>,
) -> Result<Signature, RpcError> {
let input_compressed_accounts = self.get_compressed_sol_accounts(&from.pubkey());
let output_merkle_tree = self.indexer.get_state_merkle_trees()[tree_index.unwrap_or(0)]
.accounts
.merkle_tree;
let recipients = vec![*to];
let transaction_params = if self.keypair_action_config.fee_assert {
Some(TransactionParams {
num_new_addresses: 0,
num_input_compressed_accounts: input_compressed_accounts.len() as u8,
num_output_compressed_accounts: 1u8,
compress: 0,
fee_config: FeeConfig::default(),
})
} else {
None
};
transfer_compressed_sol_test(
&mut self.rpc,
&mut self.indexer,
from,
input_compressed_accounts.as_slice(),
recipients.as_slice(),
&[output_merkle_tree],
transaction_params,
)
.await
}
pub async fn transfer_sol(&mut self, user_index: usize) {
let input_compressed_accounts = self.get_random_compressed_sol_accounts(user_index);
if !input_compressed_accounts.is_empty() {
println!("\n --------------------------------------------------\n\t\t Transfer Sol\n --------------------------------------------------");
let recipients = self
.users
.iter()
.map(|u| u.keypair.pubkey())
.collect::<Vec<Pubkey>>();
let num_output_merkle_trees = Self::safe_gen_range(
&mut self.rng,
1..std::cmp::min(
self.keypair_action_config
.max_output_accounts
.unwrap_or(recipients.len() as u64),
recipients.len() as u64,
),
1,
);
let recipients = recipients
.choose_multiple(&mut self.rng, num_output_merkle_trees as usize)
.copied()
.collect::<Vec<_>>();
let output_merkle_trees = self.get_merkle_tree_pubkeys(num_output_merkle_trees);
let transaction_parameters = if self.keypair_action_config.fee_assert {
Some(TransactionParams {
num_new_addresses: 0,
num_input_compressed_accounts: input_compressed_accounts.len() as u8,
num_output_compressed_accounts: num_output_merkle_trees as u8,
compress: 0,
fee_config: FeeConfig::default(),
})
} else {
None
};
transfer_compressed_sol_test(
&mut self.rpc,
&mut self.indexer,
&self.users[user_index].keypair,
input_compressed_accounts.as_slice(),
recipients.as_slice(),
output_merkle_trees.as_slice(),
transaction_parameters,
)
.await
.unwrap();
self.stats.sol_transfers += 1;
}
}
pub async fn decompress_sol(&mut self, user_index: usize) {
let input_compressed_accounts = self.get_random_compressed_sol_accounts(user_index);
if !input_compressed_accounts.is_empty() {
println!("\n --------------------------------------------------\n\t\t Decompress Sol\n --------------------------------------------------");
let output_merkle_tree = self.get_merkle_tree_pubkeys(1)[0];
let recipient = self.users
[Self::safe_gen_range(&mut self.rng, 0..std::cmp::min(self.users.len(), 6), 0)]
.keypair
.pubkey();
let balance = input_compressed_accounts
.iter()
.map(|x| x.compressed_account.lamports)
.sum::<u64>();
let decompress_amount = Self::safe_gen_range(&mut self.rng, 1000..balance, balance / 2);
let transaction_paramets = if self.keypair_action_config.fee_assert {
Some(TransactionParams {
num_new_addresses: 0,
num_input_compressed_accounts: input_compressed_accounts.len() as u8,
num_output_compressed_accounts: 1u8,
compress: 0,
fee_config: FeeConfig::default(),
})
} else {
None
};
decompress_sol_test(
&mut self.rpc,
&mut self.indexer,
&self.users[user_index].keypair,
&input_compressed_accounts,
&recipient,
decompress_amount,
&output_merkle_tree,
transaction_paramets,
)
.await
.unwrap();
self.stats.sol_decompress += 1;
}
}
pub async fn compress_sol_deterministic(
&mut self,
from: &Keypair,
amount: u64,
tree_index: Option<usize>,
) {
let input_compressed_accounts = self.get_compressed_sol_accounts(&from.pubkey());
let output_merkle_tree = self.indexer.get_state_merkle_trees()[tree_index.unwrap_or(0)]
.accounts
.merkle_tree;
let transaction_parameters = if self.keypair_action_config.fee_assert {
Some(TransactionParams {
num_new_addresses: 0,
num_input_compressed_accounts: input_compressed_accounts.len() as u8,
num_output_compressed_accounts: 1u8,
compress: amount as i64,
fee_config: FeeConfig::default(),
})
} else {
None
};
compress_sol_test(
&mut self.rpc,
&mut self.indexer,
from,
input_compressed_accounts.as_slice(),
false,
amount,
&output_merkle_tree,
transaction_parameters,
)
.await
.unwrap();
}
pub async fn compress_sol(&mut self, user_index: usize, balance: u64) {
println!("\n --------------------------------------------------\n\t\t Compress Sol\n --------------------------------------------------");
// Limit max compress amount to 1 sol so that context.payer doesn't get depleted by airdrops.
let max_amount = std::cmp::min(balance, 1_000_000_000);
let amount = Self::safe_gen_range(&mut self.rng, 1000..max_amount, max_amount / 2);
let input_compressed_accounts = self.get_random_compressed_sol_accounts(user_index);
let create_output_compressed_accounts_for_input_accounts = false;
// TODO: debug Merkle trees in wrong order
// if input_compressed_accounts.is_empty() {
// false
// } else {
// self.rng.gen_bool(0.5)
// };
let output_merkle_tree = self.get_merkle_tree_pubkeys(1)[0];
let transaction_parameters = if self.keypair_action_config.fee_assert {
Some(TransactionParams {
num_new_addresses: 0,
num_input_compressed_accounts: input_compressed_accounts.len() as u8,
num_output_compressed_accounts: 1u8,
compress: amount as i64,
fee_config: FeeConfig::default(),
})
} else {
None
};
compress_sol_test(
&mut self.rpc,
&mut self.indexer,
&self.users[user_index].keypair,
input_compressed_accounts.as_slice(),
create_output_compressed_accounts_for_input_accounts,
amount,
&output_merkle_tree,
transaction_parameters,
)
.await
.unwrap();
airdrop_lamports(
&mut self.rpc,
&self.users[user_index].keypair.pubkey(),
amount,
)
.await
.unwrap();
self.stats.sol_compress += 1;
}
pub async fn create_address(
&mut self,
optional_addresses: Option<Vec<Pubkey>>,
address_tree_index: Option<usize>,
) -> Vec<Pubkey> {
println!("\n --------------------------------------------------\n\t\t Create Address\n --------------------------------------------------");
// select number of addresses to create
let num_addresses = self.rng.gen_range(1..=2);
let (address_merkle_tree_pubkeys, address_queue_pubkeys) =
if let Some(address_tree_index) = address_tree_index {
(
vec![
self.indexer.get_address_merkle_trees()[address_tree_index]
.accounts
.merkle_tree;
num_addresses as usize
],
vec![
self.indexer.get_address_merkle_trees()[address_tree_index]
.accounts
.queue;
num_addresses as usize
],
)
} else {
// select random address Merkle tree(s)
self.get_address_merkle_tree_pubkeys(num_addresses)
};
let mut address_seeds = Vec::new();
let mut created_addresses = Vec::new();
if let Some(addresses) = optional_addresses {
for address in addresses {
let address_seed: [u8; 32] = address.to_bytes();
address_seeds.push(address_seed);
created_addresses.push(address);
}
} else {
for _ in 0..num_addresses {
let address_seed: [u8; 32] =
bigint_to_be_bytes_array::<32>(&self.rng.gen_biguint(256)).unwrap();
address_seeds.push(address_seed);
created_addresses.push(Pubkey::from(address_seed));
}
}
let output_compressed_accounts = self.get_merkle_tree_pubkeys(num_addresses);
let transaction_parameters = if self.keypair_action_config.fee_assert {
Some(TransactionParams {
num_new_addresses: num_addresses as u8,
num_input_compressed_accounts: 0u8,
num_output_compressed_accounts: num_addresses as u8,
compress: 0,
fee_config: FeeConfig::default(),
})
} else {
None
};
// TODO: add other input compressed accounts
// (to test whether the address generation degrades performance)
create_addresses_test(
&mut self.rpc,
&mut self.indexer,
address_merkle_tree_pubkeys.as_slice(),
address_queue_pubkeys.as_slice(),
output_compressed_accounts,
address_seeds.as_slice(),
&Vec::new(),
false,
transaction_parameters,
)
.await
.unwrap();
self.stats.create_address += num_addresses;
created_addresses
}
pub async fn transfer_spl(&mut self, user_index: usize) {
let user = &self.users[user_index].keypair.pubkey();
println!("\n --------------------------------------------------\n\t\t Tranfer Spl\n --------------------------------------------------");
let (mint, mut token_accounts) = self.select_random_compressed_token_accounts(user).await;
if token_accounts.is_empty() {
let mt_pubkeys = self.get_merkle_tree_pubkeys(1);
mint_tokens_helper(
&mut self.rpc,
&mut self.indexer,
&mt_pubkeys[0],
&self.payer,
&mint,
vec![Self::safe_gen_range(&mut self.rng, 100_000..1_000_000, 100_000); 1],
vec![*user; 1],
)
.await;
let (_, _token_accounts) = self.select_random_compressed_token_accounts(user).await;
token_accounts = _token_accounts;
}
let recipients = token_accounts
.iter()
.map(|_| {
self.users
[Self::safe_gen_range(&mut self.rng, 0..std::cmp::min(self.users.len(), 6), 0)]
.keypair
.pubkey()
})
.collect::<Vec<_>>();
println!("Recipients: {:?}", recipients.len());
let max_amount = token_accounts
.iter()
.map(|token_account| token_account.token_data.amount)
.sum::<u64>();
let amount = Self::safe_gen_range(&mut self.rng, 1000..max_amount, max_amount / 2);
let equal_amount = amount / recipients.len() as u64;
let num_output_compressed_accounts = if max_amount - amount != 0 {
recipients.len() + 1
} else {
recipients.len()
};
// get different amounts for each recipient so that every compressed account is unique
let amounts = recipients
.iter()
.enumerate()
.map(|(i, _)| equal_amount - i as u64)
.collect::<Vec<u64>>();
let output_merkle_tree_pubkeys =
self.get_merkle_tree_pubkeys(num_output_compressed_accounts as u64);
let transaction_paramets = if self.keypair_action_config.fee_assert {
Some(TransactionParams {
num_new_addresses: 0u8,
num_input_compressed_accounts: token_accounts.len() as u8,
num_output_compressed_accounts: output_merkle_tree_pubkeys.len() as u8,
compress: 0,
fee_config: FeeConfig::default(),
})
} else {
None
};
compressed_transfer_test(
&self.rpc.get_payer().insecure_clone(),
&mut self.rpc,
&mut self.indexer,
&mint,
&self.users[user_index].keypair.insecure_clone(),
&recipients,
&amounts,
None,
&token_accounts,
&output_merkle_tree_pubkeys,
None,
false,
transaction_paramets,
)
.await;
self.stats.spl_transfers += 1;
}
pub async fn approve_spl(&mut self, user_index: usize) {
let user = &self.users[user_index].keypair.pubkey();
println!("\n --------------------------------------------------\n\t\t Approve Spl\n --------------------------------------------------");
let (mint, mut token_accounts) = self.select_random_compressed_token_accounts(user).await;
if token_accounts.is_empty() {
let mt_pubkeys = self.get_merkle_tree_pubkeys(1);
mint_tokens_helper(
&mut self.rpc,
&mut self.indexer,
&mt_pubkeys[0],
&self.payer,
&mint,
vec![Self::safe_gen_range(&mut self.rng, 100_000..1_000_000, 100_000); 1],
vec![*user; 1],
)
.await;
let (_, _token_accounts) = self.select_random_compressed_token_accounts(user).await;
token_accounts = _token_accounts;
}
println!("token_accounts: {:?}", token_accounts);
let rnd_user_index = self.rng.gen_range(0..self.users.len());
let delegate = self.users[rnd_user_index].keypair.pubkey();
let max_amount = token_accounts
.iter()
.map(|token_account| token_account.token_data.amount)
.sum::<u64>();
let delegate_amount = Self::safe_gen_range(&mut self.rng, 0..max_amount, max_amount / 2);
let num_output_compressed_accounts = if delegate_amount != max_amount { 2 } else { 1 };
let output_merkle_tree_pubkeys = self.get_merkle_tree_pubkeys(2);
let transaction_paramets = if self.keypair_action_config.fee_assert {
Some(TransactionParams {
num_new_addresses: 0u8,
num_input_compressed_accounts: token_accounts.len() as u8,
num_output_compressed_accounts,
compress: 0,
fee_config: FeeConfig::default(),
})
} else {
None
};
approve_test(
&self.users[user_index].keypair,
&mut self.rpc,
&mut self.indexer,
token_accounts,
delegate_amount,
None,
&delegate,
&output_merkle_tree_pubkeys[0],
&output_merkle_tree_pubkeys[1],
transaction_paramets,
)
.await;
self.stats.spl_approved += 1;
}
pub async fn revoke_spl(&mut self, user_index: usize) {
let user = &self.users[user_index].keypair.pubkey();
println!("\n --------------------------------------------------\n\t\t Revoke Spl\n --------------------------------------------------");
let (mint, mut token_accounts) = self
.select_random_compressed_token_accounts_delegated(user, true, None, false)
.await;
if token_accounts.is_empty() {
let mt_pubkeys = self.get_merkle_tree_pubkeys(1);
mint_tokens_helper(
&mut self.rpc,
&mut self.indexer,
&mt_pubkeys[0],
&self.payer,
&mint,
vec![Self::safe_gen_range(&mut self.rng, 100_000..1_000_000, 100_000); 1],
vec![*user; 1],
)
.await;
self.approve_spl(user_index).await;
let (_, _token_accounts) = self
.select_random_compressed_token_accounts_delegated(user, true, None, false)
.await;
token_accounts = _token_accounts;
}
let num_output_compressed_accounts = 1;
let output_merkle_tree_pubkeys = self.get_merkle_tree_pubkeys(1);
let transaction_paramets = if self.keypair_action_config.fee_assert {
Some(TransactionParams {
num_new_addresses: 0u8,
num_input_compressed_accounts: token_accounts.len() as u8,
num_output_compressed_accounts,
compress: 0,
fee_config: FeeConfig::default(),
})
} else {
None
};
revoke_test(
&self.users[user_index].keypair,
&mut self.rpc,
&mut self.indexer,
token_accounts,
&output_merkle_tree_pubkeys[0],
transaction_paramets,
)
.await;
self.stats.spl_revoked += 1;
}
pub async fn burn_spl(&mut self, user_index: usize) {
let user = &self.users[user_index].keypair.pubkey();
println!("\n --------------------------------------------------\n\t\t Burn Spl\n --------------------------------------------------");
let (mint, mut token_accounts) = self.select_random_compressed_token_accounts(user).await;
if token_accounts.is_empty() {
let mt_pubkeys = self.get_merkle_tree_pubkeys(1);
mint_tokens_helper(
&mut self.rpc,
&mut self.indexer,
&mt_pubkeys[0],
&self.payer,
&mint,
vec![Self::safe_gen_range(&mut self.rng, 100_000..1_000_000, 100_000); 1],
vec![*user; 1],
)
.await;
let (_, _token_accounts) = self.select_random_compressed_token_accounts(user).await;
token_accounts = _token_accounts;
}
let max_amount = token_accounts
.iter()
.map(|token_account| token_account.token_data.amount)
.sum::<u64>();
let burn_amount = Self::safe_gen_range(&mut self.rng, 0..max_amount, max_amount / 2);
let num_output_compressed_accounts = if burn_amount != max_amount { 1 } else { 0 };
let output_merkle_tree_pubkeys = self.get_merkle_tree_pubkeys(1);
let transaction_paramets = if self.keypair_action_config.fee_assert {
Some(TransactionParams {
num_new_addresses: 0u8,
num_input_compressed_accounts: token_accounts.len() as u8,
num_output_compressed_accounts,
compress: 0,
fee_config: FeeConfig::default(),
})
} else {
None
};
burn_test(
&self.users[user_index].keypair,
&mut self.rpc,
&mut self.indexer,
token_accounts,
&output_merkle_tree_pubkeys[0],
burn_amount,
false,
transaction_paramets,
false,
)
.await;
self.stats.spl_burned += 1;
}
pub async fn freeze_spl(&mut self, user_index: usize) {
let user = &self.users[user_index].keypair.pubkey();
println!("\n --------------------------------------------------\n\t\t Freeze Spl\n --------------------------------------------------");
let (mint, mut token_accounts) = self.select_random_compressed_token_accounts(user).await;
if token_accounts.is_empty() {
let mt_pubkeys = self.get_merkle_tree_pubkeys(1);
mint_tokens_helper(
&mut self.rpc,
&mut self.indexer,
&mt_pubkeys[0],
&self.payer,
&mint,
vec![Self::safe_gen_range(&mut self.rng, 100_000..1_000_000, 100_000); 1],
vec![*user; 1],
)
.await;
let (_, _token_accounts) = self
.select_random_compressed_token_accounts_delegated(user, false, None, false)
.await;
token_accounts = _token_accounts;
}
let output_merkle_tree_pubkeys = self.get_merkle_tree_pubkeys(1);
let transaction_paramets = if self.keypair_action_config.fee_assert {
Some(TransactionParams {
num_new_addresses: 0u8,
num_input_compressed_accounts: token_accounts.len() as u8,
num_output_compressed_accounts: token_accounts.len() as u8,
compress: 0,
fee_config: FeeConfig::default(),
})
} else {
None
};
freeze_test(
&self.rpc.get_payer().insecure_clone(),
&mut self.rpc,
&mut self.indexer,
token_accounts,
&output_merkle_tree_pubkeys[0],
transaction_paramets,
)
.await;
self.stats.spl_frozen += 1;
}
pub async fn thaw_spl(&mut self, user_index: usize) {
let user = &self.users[user_index].keypair.pubkey();
println!("\n --------------------------------------------------\n\t\t Thaw Spl\n --------------------------------------------------");
let (_, mut token_accounts) = self
.select_random_compressed_token_accounts_frozen(user)
.await;
if token_accounts.is_empty() {
self.freeze_spl(user_index).await;
let (_, _token_accounts) = self
.select_random_compressed_token_accounts_frozen(user)
.await;
token_accounts = _token_accounts;
}
let output_merkle_tree_pubkeys = self.get_merkle_tree_pubkeys(1);
let transaction_paramets = if self.keypair_action_config.fee_assert {
Some(TransactionParams {
num_new_addresses: 0u8,
num_input_compressed_accounts: token_accounts.len() as u8,
num_output_compressed_accounts: token_accounts.len() as u8,
compress: 0,
fee_config: FeeConfig::default(),
})
} else {
None
};
thaw_test(
&self.rpc.get_payer().insecure_clone(),
&mut self.rpc,
&mut self.indexer,
token_accounts,
&output_merkle_tree_pubkeys[0],
transaction_paramets,
)
.await;
self.stats.spl_thawed += 1;
}
pub async fn compress_spl(&mut self, user_index: usize) {
println!("\n --------------------------------------------------\n\t\t Compress Spl\n --------------------------------------------------");
let mut balance = 0;
let mut mint = Pubkey::default();
let mut token_account = Pubkey::default();
for _ in 0..self.users[user_index].token_accounts.len() {
let (_mint, _token_account) = self.users[user_index].token_accounts[self
.rng
.gen_range(0..self.users[user_index].token_accounts.len())];
token_account = _token_account;
mint = _mint;
self.rpc.get_account(_token_account).await.unwrap();
use solana_sdk::program_pack::Pack;
let account = spl_token::state::Account::unpack(
&self
.rpc
.get_account(_token_account)
.await
.unwrap()
.unwrap()
.data,
)
.unwrap();
balance = account.amount;
if balance != 0 {
break;
}
}
if balance != 0 {
self.users[user_index]
.token_accounts
.push((mint, token_account));
let output_merkle_tree_account = self.get_merkle_tree_pubkeys(1);
let amount = Self::safe_gen_range(&mut self.rng, 1000..balance, balance / 2);
let transaction_paramets = if self.keypair_action_config.fee_assert {
Some(TransactionParams {
num_new_addresses: 0u8,
num_input_compressed_accounts: 0u8,
num_output_compressed_accounts: 1u8,
compress: 0, // sol amount this is a spl compress test
fee_config: FeeConfig::default(),
})
} else {
None
};
compress_test(
&self.users[user_index].keypair,
&mut self.rpc,
&mut self.indexer,
amount,
&mint,
&output_merkle_tree_account[0],
&token_account,
transaction_paramets,
false,
)
.await;
self.stats.spl_compress += 1;
}
}
pub async fn decompress_spl(&mut self, user_index: usize) {
let user = &self.users[user_index].keypair.pubkey();
println!("\n --------------------------------------------------\n\t\t Decompress Spl\n --------------------------------------------------");
let (mint, mut token_accounts) = self.select_random_compressed_token_accounts(user).await;
if token_accounts.is_empty() {
let mt_pubkeys = self.get_merkle_tree_pubkeys(1);
mint_tokens_helper(
&mut self.rpc,
&mut self.indexer,
&mt_pubkeys[0],
&self.payer,
&mint,
vec![Self::safe_gen_range(&mut self.rng, 100_000..1_000_000, 100_000); 1],
vec![*user; 1],
)
.await;
let (_, _token_accounts) = self.select_random_compressed_token_accounts(user).await;
token_accounts = _token_accounts;
}
let token_account = match self.users[user_index]
.token_accounts
.iter()
.find(|t| t.0 == mint)
{
Some(token_account) => token_account.1,
None => {
let token_account_keypair = Keypair::new();
create_token_account(
&mut self.rpc,
&mint,
&token_account_keypair,
&self.users[user_index].keypair,
)
.await
.unwrap();
token_account_keypair.pubkey()
}
};
self.users[user_index]
.token_accounts
.push((mint, token_account));
let output_merkle_tree_account = self.get_merkle_tree_pubkeys(1);
let max_amount = token_accounts
.iter()
.map(|token_account| token_account.token_data.amount)
.sum::<u64>();
let amount = Self::safe_gen_range(&mut self.rng, 1000..max_amount, max_amount / 2);
let transaction_paramets = if self.keypair_action_config.fee_assert {
Some(TransactionParams {
num_new_addresses: 0u8,
num_input_compressed_accounts: token_accounts.len() as u8,
num_output_compressed_accounts: 1u8,
compress: 0,
fee_config: FeeConfig::default(),
})
} else {
None
};
// decompress
decompress_test(
&self.users[user_index].keypair,
&mut self.rpc,
&mut self.indexer,
token_accounts.clone(),
amount,
&output_merkle_tree_account[0],
&token_account,
transaction_paramets,
false,
)
.await;
self.stats.spl_decompress += 1;
}
pub async fn rollover_state_merkle_tree_and_queue(
&mut self,
index: usize,
payer: &Keypair,
epoch: u64,
) -> Result<(), RpcError> {
let bundle = self.indexer.get_state_merkle_trees()[index].accounts;
let new_nullifier_queue_keypair = Keypair::new();
let new_merkle_tree_keypair = Keypair::new();
// TODO: move into registry program
let new_cpi_signature_keypair = Keypair::new();
let fee_payer_balance = self
.rpc
.get_balance(&self.indexer.get_payer().pubkey())
.await
.unwrap();
let rollover_signature_and_slot = perform_state_merkle_tree_roll_over_forester(
payer,
&mut self.rpc,
&new_nullifier_queue_keypair,
&new_merkle_tree_keypair,
&new_cpi_signature_keypair,
&bundle.merkle_tree,
&bundle.nullifier_queue,
epoch,
false,
)
.await
.unwrap();
info!("Rollover signature: {:?}", rollover_signature_and_slot.0);
let additional_rent = self
.rpc
.get_minimum_balance_for_rent_exemption(
ProtocolConfig::default().cpi_context_size as usize,
)
.await
.unwrap();
info!("additional_rent: {:?}", additional_rent);
assert_rolled_over_pair(
&self.indexer.get_payer().pubkey(),
&mut self.rpc,
&fee_payer_balance,
&bundle.merkle_tree,
&bundle.nullifier_queue,
&new_merkle_tree_keypair.pubkey(),
&new_nullifier_queue_keypair.pubkey(),
rollover_signature_and_slot.1,
additional_rent,
4,
)
.await;
self.indexer
.get_state_merkle_trees_mut()
.push(StateMerkleTreeBundle {
// TODO: fetch correct fee when this property is used
rollover_fee: 0,
accounts: StateMerkleTreeAccounts {
merkle_tree: new_merkle_tree_keypair.pubkey(),
nullifier_queue: new_nullifier_queue_keypair.pubkey(),
cpi_context: new_cpi_signature_keypair.pubkey(),
},
merkle_tree: Box::new(light_merkle_tree_reference::MerkleTree::<Poseidon>::new(
STATE_MERKLE_TREE_HEIGHT as usize,
STATE_MERKLE_TREE_CANOPY_DEPTH as usize,
)),
});
Ok(())
}
pub async fn rollover_address_merkle_tree_and_queue(
&mut self,
index: usize,
payer: &Keypair,
epoch: u64,
) -> Result<(), RpcError> {
let bundle = self.indexer.get_address_merkle_trees()[index].accounts;
let new_nullifier_queue_keypair = Keypair::new();
let new_merkle_tree_keypair = Keypair::new();
let fee_payer_balance = self
.rpc
.get_balance(&self.indexer.get_payer().pubkey())
.await
.unwrap();
println!("prior balance {}", fee_payer_balance);
perform_address_merkle_tree_roll_over_forester(
payer,
&mut self.rpc,
&new_nullifier_queue_keypair,
&new_merkle_tree_keypair,
&bundle.merkle_tree,
&bundle.queue,
epoch,
false,
)
.await?;
assert_rolled_over_address_merkle_tree_and_queue(
&self.indexer.get_payer().pubkey(),
&mut self.rpc,
&fee_payer_balance,
&bundle.merkle_tree,
&bundle.queue,
&new_merkle_tree_keypair.pubkey(),
&new_nullifier_queue_keypair.pubkey(),
)
.await;
self.indexer.add_address_merkle_tree_accounts(
&new_merkle_tree_keypair,
&new_nullifier_queue_keypair,
None,
);
Ok(())
}
pub fn get_random_compressed_sol_accounts(
&mut self,
user_index: usize,
) -> Vec<CompressedAccountWithMerkleContext> {
let input_compressed_accounts = self
.indexer
.get_compressed_accounts_by_owner(&self.users[user_index].keypair.pubkey());
let range = std::cmp::min(input_compressed_accounts.len(), 4);
let number_of_compressed_accounts = Self::safe_gen_range(&mut self.rng, 0..=range, 0);
input_compressed_accounts[0..number_of_compressed_accounts].to_vec()
}
pub fn get_compressed_sol_accounts(
&self,
pubkey: &Pubkey,
) -> Vec<CompressedAccountWithMerkleContext> {
self.indexer.get_compressed_accounts_by_owner(pubkey)
}
pub fn get_merkle_tree_pubkeys(&mut self, num: u64) -> Vec<Pubkey> {
let mut pubkeys = vec![];
for _ in 0..num {
let range_max: usize = std::cmp::min(
self.keypair_action_config
.max_output_accounts
.unwrap_or(self.indexer.get_state_merkle_trees().len() as u64),
self.indexer.get_state_merkle_trees().len() as u64,
) as usize;
let index = Self::safe_gen_range(&mut self.rng, 0..range_max, 0);
pubkeys.push(
self.indexer.get_state_merkle_trees()[index]
.accounts
.merkle_tree,
);
}
pubkeys.sort();
pubkeys
}
pub fn get_address_merkle_tree_pubkeys(&mut self, num: u64) -> (Vec<Pubkey>, Vec<Pubkey>) {
let mut pubkeys = vec![];
let mut queue_pubkeys = vec![];
for _ in 0..num {
let index = Self::safe_gen_range(
&mut self.rng,
0..self.indexer.get_address_merkle_trees().len(),
0,
);
pubkeys.push(
self.indexer.get_address_merkle_trees()[index]
.accounts
.merkle_tree,
);
queue_pubkeys.push(
self.indexer.get_address_merkle_trees()[index]
.accounts
.queue,
);
}
(pubkeys, queue_pubkeys)
}
pub async fn select_random_compressed_token_accounts(
&mut self,
user: &Pubkey,
) -> (Pubkey, Vec<TokenDataWithContext>) {
self.select_random_compressed_token_accounts_delegated(user, false, None, false)
.await
}
pub async fn select_random_compressed_token_accounts_frozen(
&mut self,
user: &Pubkey,
) -> (Pubkey, Vec<TokenDataWithContext>) {
self.select_random_compressed_token_accounts_delegated(user, false, None, true)
.await
}
pub async fn select_random_compressed_token_accounts_delegated(
&mut self,
user: &Pubkey,
delegated: bool,
delegate: Option<Pubkey>,
frozen: bool,
) -> (Pubkey, Vec<TokenDataWithContext>) {
let user_token_accounts = &mut self.indexer.get_compressed_token_accounts_by_owner(user);
// clean up dust so that we don't run into issues that account balances are too low
user_token_accounts.retain(|t| t.token_data.amount > 1000);
let mut token_accounts_with_mint;
let mint;
if user_token_accounts.is_empty() {
mint = self.indexer.get_token_compressed_accounts()[self
.rng
.gen_range(0..self.indexer.get_token_compressed_accounts().len())]
.token_data
.mint;
let number_of_compressed_accounts = Self::safe_gen_range(&mut self.rng, 1..8, 1);
let mt_pubkey = self.indexer.get_state_merkle_trees()[0]
.accounts
.merkle_tree;
mint_tokens_helper(
&mut self.rpc,
&mut self.indexer,
&mt_pubkey,
&self.payer,
&mint,
vec![
Self::safe_gen_range(&mut self.rng, 100_000..1_000_000, 100_000);
number_of_compressed_accounts
],
vec![*user; number_of_compressed_accounts],
)
.await;
token_accounts_with_mint = self
.indexer
.get_compressed_token_accounts_by_owner(user)
.iter()
.filter(|token_account| token_account.token_data.mint == mint)
.cloned()
.collect::<Vec<_>>();
} else {
mint = user_token_accounts
[Self::safe_gen_range(&mut self.rng, 0..user_token_accounts.len(), 0)]
.token_data
.mint;
token_accounts_with_mint = user_token_accounts
.iter()
.filter(|token_account| token_account.token_data.mint == mint)
.map(|token_account| (*token_account).clone())
.collect::<Vec<TokenDataWithContext>>();
}
if delegated {
token_accounts_with_mint = token_accounts_with_mint
.iter()
.filter(|token_account| token_account.token_data.delegate.is_some())
.map(|token_account| (*token_account).clone())
.collect::<Vec<TokenDataWithContext>>();
if token_accounts_with_mint.is_empty() {
return (mint, Vec::new());
}
}
if let Some(delegate) = delegate {
token_accounts_with_mint = token_accounts_with_mint
.iter()
.filter(|token_account| token_account.token_data.delegate.unwrap() == delegate)
.map(|token_account| (*token_account).clone())
.collect::<Vec<TokenDataWithContext>>();
}
if frozen {
token_accounts_with_mint = token_accounts_with_mint
.iter()
.filter(|token_account| token_account.token_data.state == AccountState::Frozen)
.map(|token_account| (*token_account).clone())
.collect::<Vec<TokenDataWithContext>>();
if token_accounts_with_mint.is_empty() {
return (mint, Vec::new());
}
} else {
token_accounts_with_mint = token_accounts_with_mint
.iter()
.filter(|token_account| token_account.token_data.state == AccountState::Initialized)
.map(|token_account| (*token_account).clone())
.collect::<Vec<TokenDataWithContext>>();
}
let range_end = if token_accounts_with_mint.len() == 1 {
1
} else if !token_accounts_with_mint.is_empty() {
self.rng
.gen_range(1..std::cmp::min(token_accounts_with_mint.len(), 4))
} else {
return (mint, Vec::new());
};
let mut get_random_subset_of_token_accounts =
token_accounts_with_mint[0..range_end].to_vec();
// Sorting input and output Merkle tree pubkeys the same way so the pubkey indices do not get out of order
get_random_subset_of_token_accounts.sort_by(|a, b| {
a.compressed_account
.merkle_context
.merkle_tree_pubkey
.cmp(&b.compressed_account.merkle_context.merkle_tree_pubkey)
});
(mint, get_random_subset_of_token_accounts)
}
}
// Configures probabilities for keypair actions
// default sol configuration is all sol actions enabled with 0.5 probability
pub struct KeypairActionConfig {
pub compress_sol: Option<f64>,
pub decompress_sol: Option<f64>,
pub transfer_sol: Option<f64>,
pub create_address: Option<f64>,
pub compress_spl: Option<f64>,
pub decompress_spl: Option<f64>,
pub mint_spl: Option<f64>,
pub transfer_spl: Option<f64>,
pub max_output_accounts: Option<u64>,
pub fee_assert: bool,
pub approve_spl: Option<f64>,
pub revoke_spl: Option<f64>,
pub freeze_spl: Option<f64>,
pub thaw_spl: Option<f64>,
pub burn_spl: Option<f64>,
}
impl KeypairActionConfig {
pub fn prover_config(&self) -> ProverConfig {
let mut config = ProverConfig {
run_mode: None,
circuits: vec![],
};
if self.inclusion() {
config.circuits.push(ProofType::Inclusion);
}
if self.non_inclusion() {
config.circuits.push(ProofType::NonInclusion);
}
config
}
pub fn inclusion(&self) -> bool {
self.transfer_sol.is_some() || self.transfer_spl.is_some()
}
pub fn non_inclusion(&self) -> bool {
self.create_address.is_some()
}
pub fn sol_default() -> Self {
Self {
compress_sol: Some(0.5),
decompress_sol: Some(0.5),
transfer_sol: Some(0.5),
create_address: None,
compress_spl: None,
decompress_spl: None,
mint_spl: None,
transfer_spl: None,
max_output_accounts: None,
fee_assert: true,
approve_spl: None,
revoke_spl: None,
freeze_spl: None,
thaw_spl: None,
burn_spl: None,
}
}
pub fn spl_default() -> Self {
Self {
compress_sol: None,
decompress_sol: None,
transfer_sol: None,
create_address: None,
compress_spl: Some(0.7),
decompress_spl: Some(0.5),
mint_spl: None,
transfer_spl: Some(0.5),
max_output_accounts: Some(10),
fee_assert: true,
approve_spl: Some(0.5),
revoke_spl: Some(0.5),
freeze_spl: Some(0.5),
thaw_spl: Some(0.5),
burn_spl: Some(0.5),
}
}
pub fn all_default() -> Self {
Self {
compress_sol: Some(0.5),
decompress_sol: Some(1.0),
transfer_sol: Some(1.0),
create_address: Some(0.2),
compress_spl: Some(0.7),
decompress_spl: Some(0.5),
mint_spl: None,
transfer_spl: Some(0.5),
max_output_accounts: Some(10),
fee_assert: true,
approve_spl: Some(0.7),
revoke_spl: Some(0.7),
freeze_spl: Some(0.7),
thaw_spl: Some(0.7),
burn_spl: Some(0.7),
}
}
pub fn all_default_no_fee_assert() -> Self {
Self {
compress_sol: Some(0.5),
decompress_sol: Some(1.0),
transfer_sol: Some(1.0),
create_address: Some(0.2),
compress_spl: Some(0.7),
decompress_spl: Some(0.5),
mint_spl: None,
transfer_spl: Some(0.5),
max_output_accounts: Some(10),
fee_assert: false,
approve_spl: Some(0.7),
revoke_spl: Some(0.7),
freeze_spl: Some(0.7),
thaw_spl: Some(0.7),
burn_spl: Some(0.7),
}
}
pub fn test_default() -> Self {
Self {
compress_sol: Some(1.0),
decompress_sol: Some(1.0),
transfer_sol: Some(1.0),
create_address: Some(1.0),
compress_spl: Some(0.0),
decompress_spl: Some(0.0),
mint_spl: None,
transfer_spl: Some(0.0),
max_output_accounts: Some(10),
fee_assert: true,
approve_spl: None,
revoke_spl: None,
freeze_spl: None,
thaw_spl: None,
burn_spl: None,
}
}
pub fn test_forester_default() -> Self {
Self {
compress_sol: Some(0.0),
decompress_sol: Some(0.0),
transfer_sol: Some(1.0),
create_address: None,
compress_spl: None,
decompress_spl: None,
mint_spl: None,
transfer_spl: None,
max_output_accounts: Some(3),
fee_assert: true,
approve_spl: None,
revoke_spl: None,
freeze_spl: None,
thaw_spl: None,
burn_spl: None,
}
}
}
// Configures probabilities for general actions
pub struct GeneralActionConfig {
pub add_keypair: Option<f64>,
pub create_state_mt: Option<f64>,
pub create_address_mt: Option<f64>,
pub nullify_compressed_accounts: Option<f64>,
pub empty_address_queue: Option<f64>,
pub rollover: Option<f64>,
pub add_forester: Option<f64>,
/// TODO: add this
/// Creates one infinte epoch
pub disable_epochs: bool,
}
impl Default for GeneralActionConfig {
fn default() -> Self {
Self {
add_keypair: Some(0.3),
create_state_mt: Some(1.0),
create_address_mt: Some(1.0),
nullify_compressed_accounts: Some(0.2),
empty_address_queue: Some(0.2),
rollover: None,
add_forester: None,
disable_epochs: false,
}
}
}
impl GeneralActionConfig {
pub fn test_forester_default() -> Self {
Self {
add_keypair: None,
create_state_mt: None,
create_address_mt: None,
nullify_compressed_accounts: None,
empty_address_queue: None,
rollover: None,
add_forester: None,
disable_epochs: false,
}
}
pub fn test_with_rollover() -> Self {
Self {
add_keypair: Some(0.3),
create_state_mt: Some(1.0),
create_address_mt: Some(1.0),
nullify_compressed_accounts: Some(0.2),
empty_address_queue: Some(0.2),
rollover: Some(0.5),
add_forester: None,
disable_epochs: false,
}
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-utils
|
solana_public_repos/Lightprotocol/light-protocol/test-utils/src/state_tree_rollover.rs
|
#![allow(clippy::await_holding_refcell_ref)]
use crate::assert_rollover::{
assert_rolledover_merkle_trees, assert_rolledover_merkle_trees_metadata,
assert_rolledover_queues_metadata,
};
use account_compression::NullifierQueueConfig;
use account_compression::{
self, initialize_address_merkle_tree::AccountLoader, state::QueueAccount,
StateMerkleTreeAccount, StateMerkleTreeConfig, ID,
};
use anchor_lang::{InstructionData, Lamports, ToAccountMetas};
use forester_utils::{create_account_instruction, get_hash_set};
use light_client::rpc::errors::RpcError;
use light_client::rpc::RpcConnection;
use light_concurrent_merkle_tree::{
copy::ConcurrentMerkleTreeCopy, zero_copy::ConcurrentMerkleTreeZeroCopyMut,
};
use light_hasher::Poseidon;
use solana_sdk::clock::Slot;
use solana_sdk::{
account::AccountSharedData,
account_info::AccountInfo,
instruction::{AccountMeta, Instruction},
signature::{Keypair, Signer},
transaction::Transaction,
};
use solana_sdk::{account::WritableAccount, pubkey::Pubkey};
use std::mem;
pub enum StateMerkleTreeRolloverMode {
QueueInvalidSize,
TreeInvalidSize,
}
#[allow(clippy::too_many_arguments)]
pub async fn perform_state_merkle_tree_roll_over<R: RpcConnection>(
rpc: &mut R,
new_nullifier_queue_keypair: &Keypair,
new_state_merkle_tree_keypair: &Keypair,
merkle_tree_pubkey: &Pubkey,
nullifier_queue_pubkey: &Pubkey,
merkle_tree_config: &StateMerkleTreeConfig,
queue_config: &NullifierQueueConfig,
mode: Option<StateMerkleTreeRolloverMode>,
) -> Result<(solana_sdk::signature::Signature, Slot), RpcError> {
let payer_pubkey = rpc.get_payer().pubkey();
let mut size = QueueAccount::size(queue_config.capacity as usize).unwrap();
if let Some(StateMerkleTreeRolloverMode::QueueInvalidSize) = mode {
size += 1;
}
let create_nullifier_queue_instruction = create_account_instruction(
&payer_pubkey,
size,
rpc.get_minimum_balance_for_rent_exemption(size)
.await
.unwrap(),
&ID,
Some(new_nullifier_queue_keypair),
);
let mut state_tree_size = account_compression::state::StateMerkleTreeAccount::size(
merkle_tree_config.height as usize,
merkle_tree_config.changelog_size as usize,
merkle_tree_config.roots_size as usize,
merkle_tree_config.canopy_depth as usize,
);
if let Some(StateMerkleTreeRolloverMode::TreeInvalidSize) = mode {
state_tree_size += 1;
}
let create_state_merkle_tree_instruction = create_account_instruction(
&payer_pubkey,
state_tree_size,
rpc.get_minimum_balance_for_rent_exemption(state_tree_size)
.await
.unwrap(),
&ID,
Some(new_state_merkle_tree_keypair),
);
let instruction_data =
account_compression::instruction::RolloverStateMerkleTreeAndNullifierQueue {};
let accounts = account_compression::accounts::RolloverStateMerkleTreeAndNullifierQueue {
fee_payer: rpc.get_payer().pubkey(),
authority: rpc.get_payer().pubkey(),
registered_program_pda: None,
new_state_merkle_tree: new_state_merkle_tree_keypair.pubkey(),
new_nullifier_queue: new_nullifier_queue_keypair.pubkey(),
old_state_merkle_tree: *merkle_tree_pubkey,
old_nullifier_queue: *nullifier_queue_pubkey,
};
let instruction = Instruction {
program_id: account_compression::ID,
accounts: [
accounts.to_account_metas(Some(true)),
vec![AccountMeta::new(*merkle_tree_pubkey, false)],
]
.concat(),
data: instruction_data.data(),
};
let blockhash = rpc.get_latest_blockhash().await.unwrap();
let transaction = Transaction::new_signed_with_payer(
&[
create_nullifier_queue_instruction,
create_state_merkle_tree_instruction,
instruction,
],
Some(&rpc.get_payer().pubkey()),
&vec![
&rpc.get_payer(),
&new_nullifier_queue_keypair,
&new_state_merkle_tree_keypair,
],
blockhash,
);
rpc.process_transaction_with_context(transaction).await
}
pub async fn set_state_merkle_tree_next_index<R: RpcConnection>(
rpc: &mut R,
merkle_tree_pubkey: &Pubkey,
next_index: u64,
lamports: u64,
) {
let mut merkle_tree = rpc.get_account(*merkle_tree_pubkey).await.unwrap().unwrap();
{
let merkle_tree_deserialized =
&mut ConcurrentMerkleTreeZeroCopyMut::<Poseidon, 26>::from_bytes_zero_copy_mut(
&mut merkle_tree.data[8 + std::mem::size_of::<StateMerkleTreeAccount>()..],
)
.unwrap();
unsafe {
*merkle_tree_deserialized.next_index = next_index as usize;
}
}
let mut account_share_data = AccountSharedData::from(merkle_tree);
account_share_data.set_lamports(lamports);
rpc.set_account(merkle_tree_pubkey, &account_share_data);
let mut merkle_tree = rpc.get_account(*merkle_tree_pubkey).await.unwrap().unwrap();
let merkle_tree_deserialized =
ConcurrentMerkleTreeZeroCopyMut::<Poseidon, 26>::from_bytes_zero_copy_mut(
&mut merkle_tree.data[8 + std::mem::size_of::<StateMerkleTreeAccount>()..],
)
.unwrap();
assert_eq!(merkle_tree_deserialized.next_index() as u64, next_index);
}
#[allow(clippy::too_many_arguments)]
pub async fn assert_rolled_over_pair<R: RpcConnection>(
payer: &Pubkey,
rpc: &mut R,
fee_payer_prior_balance: &u64,
old_merkle_tree_pubkey: &Pubkey,
old_nullifier_queue_pubkey: &Pubkey,
new_merkle_tree_pubkey: &Pubkey,
new_nullifier_queue_pubkey: &Pubkey,
current_slot: u64,
additional_rent: u64,
num_signatures: u64,
) {
let mut new_mt_account = rpc
.get_account(*new_merkle_tree_pubkey)
.await
.unwrap()
.unwrap();
let mut new_mt_lamports = 0u64;
let old_account_info = AccountInfo::new(
new_merkle_tree_pubkey,
false,
false,
&mut new_mt_lamports,
&mut new_mt_account.data,
&ID,
false,
0u64,
);
let new_mt_account =
AccountLoader::<StateMerkleTreeAccount>::try_from(&old_account_info).unwrap();
let new_loaded_mt_account = new_mt_account.load().unwrap();
let mut old_mt_account = rpc
.get_account(*old_merkle_tree_pubkey)
.await
.unwrap()
.unwrap();
let mut old_mt_lamports = 0u64;
let new_account_info = AccountInfo::new(
old_merkle_tree_pubkey,
false,
false,
&mut old_mt_lamports,
&mut old_mt_account.data,
&account_compression::ID,
false,
0u64,
);
let old_mt_account =
AccountLoader::<StateMerkleTreeAccount>::try_from(&new_account_info).unwrap();
let old_loaded_mt_account = old_mt_account.load().unwrap();
assert_rolledover_merkle_trees_metadata(
&old_loaded_mt_account.metadata,
&new_loaded_mt_account.metadata,
current_slot,
new_nullifier_queue_pubkey,
);
let old_mt_data = old_account_info.try_borrow_data().unwrap();
let old_mt = ConcurrentMerkleTreeCopy::<Poseidon, 26>::from_bytes_copy(
&old_mt_data[8 + mem::size_of::<StateMerkleTreeAccount>()..],
)
.unwrap();
let new_mt_data = new_account_info.try_borrow_data().unwrap();
let new_mt = ConcurrentMerkleTreeCopy::<Poseidon, 26>::from_bytes_copy(
&new_mt_data[8 + mem::size_of::<StateMerkleTreeAccount>()..],
)
.unwrap();
assert_rolledover_merkle_trees(&old_mt, &new_mt);
{
let mut new_queue_account = rpc
.get_account(*new_nullifier_queue_pubkey)
.await
.unwrap()
.unwrap();
let mut new_mt_lamports = 0u64;
let account_info = AccountInfo::new(
new_nullifier_queue_pubkey,
false,
false,
&mut new_mt_lamports,
&mut new_queue_account.data,
&account_compression::ID,
false,
0u64,
);
let new_queue_account = AccountLoader::<QueueAccount>::try_from(&account_info).unwrap();
let new_loaded_queue_account = new_queue_account.load().unwrap();
let mut old_queue_account = rpc
.get_account(*old_nullifier_queue_pubkey)
.await
.unwrap()
.unwrap();
let mut old_mt_lamports = 0u64;
let account_info = AccountInfo::new(
old_nullifier_queue_pubkey,
false,
false,
&mut old_mt_lamports,
&mut old_queue_account.data,
&account_compression::ID,
false,
0u64,
);
let old_queue_account = AccountLoader::<QueueAccount>::try_from(&account_info).unwrap();
let old_loaded_queue_account = old_queue_account.load().unwrap();
assert_rolledover_queues_metadata(
&old_loaded_queue_account.metadata,
&new_loaded_queue_account.metadata,
current_slot,
new_merkle_tree_pubkey,
new_nullifier_queue_pubkey,
old_mt_account.get_lamports(),
new_mt_account.get_lamports(),
new_queue_account.get_lamports(),
);
}
let fee_payer_post_balance = rpc.get_account(*payer).await.unwrap().unwrap().lamports;
// rent is reimbursed, 3 signatures cost 3 x 5000 lamports
assert_eq!(
*fee_payer_prior_balance,
fee_payer_post_balance + 5000 * num_signatures + additional_rent
);
let old_address_queue =
unsafe { get_hash_set::<QueueAccount, R>(rpc, *old_nullifier_queue_pubkey).await };
let new_address_queue =
unsafe { get_hash_set::<QueueAccount, R>(rpc, *new_nullifier_queue_pubkey).await };
assert_eq!(
old_address_queue.get_capacity(),
new_address_queue.get_capacity()
);
assert_eq!(
old_address_queue.sequence_threshold,
new_address_queue.sequence_threshold,
);
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-utils
|
solana_public_repos/Lightprotocol/light-protocol/test-utils/src/system_program.rs
|
use forester_utils::indexer::Indexer;
use light_hasher::Poseidon;
use light_system_program::sdk::event::PublicTransactionEvent;
use light_system_program::{
sdk::{
address::derive_address,
compressed_account::{
CompressedAccount, CompressedAccountWithMerkleContext, MerkleContext,
},
invoke::{create_invoke_instruction, get_sol_pool_pda},
},
NewAddressParams,
};
use solana_sdk::signature::Signature;
use solana_sdk::{
pubkey::Pubkey,
signature::{Keypair, Signer},
};
use crate::assert_compressed_tx::{
assert_compressed_transaction, get_merkle_tree_snapshots, AssertCompressedTransactionInputs,
};
use light_client::rpc::errors::RpcError;
use light_client::rpc::RpcConnection;
use light_client::transaction_params::TransactionParams;
#[allow(clippy::too_many_arguments)]
pub async fn create_addresses_test<R: RpcConnection, I: Indexer<R>>(
rpc: &mut R,
test_indexer: &mut I,
address_merkle_tree_pubkeys: &[Pubkey],
address_merkle_tree_queue_pubkeys: &[Pubkey],
mut output_merkle_tree_pubkeys: Vec<Pubkey>,
address_seeds: &[[u8; 32]],
input_compressed_accounts: &[CompressedAccountWithMerkleContext],
create_out_compressed_accounts_for_input_compressed_accounts: bool,
transaction_params: Option<TransactionParams>,
) -> Result<(), RpcError> {
if address_merkle_tree_pubkeys.len() != address_seeds.len() {
panic!("address_merkle_tree_pubkeys and address_seeds length mismatch for create_addresses_test");
}
let mut derived_addresses = Vec::new();
for (i, address_seed) in address_seeds.iter().enumerate() {
let derived_address =
derive_address(&address_merkle_tree_pubkeys[i], address_seed).unwrap();
println!("derived_address: {:?}", derived_address);
derived_addresses.push(derived_address);
}
let mut address_params = Vec::new();
for (i, seed) in address_seeds.iter().enumerate() {
let new_address_params = NewAddressParams {
address_queue_pubkey: address_merkle_tree_queue_pubkeys[i],
address_merkle_tree_pubkey: address_merkle_tree_pubkeys[i],
seed: *seed,
address_merkle_tree_root_index: 0,
};
address_params.push(new_address_params);
}
let mut output_compressed_accounts = Vec::new();
for address in derived_addresses.iter() {
output_compressed_accounts.push(CompressedAccount {
lamports: 0,
owner: rpc.get_payer().pubkey(),
data: None,
address: Some(*address),
});
}
if create_out_compressed_accounts_for_input_compressed_accounts {
for compressed_account in input_compressed_accounts.iter() {
output_compressed_accounts.push(CompressedAccount {
lamports: 0,
owner: rpc.get_payer().pubkey(),
data: None,
address: compressed_account.compressed_account.address,
});
output_merkle_tree_pubkeys.push(compressed_account.merkle_context.merkle_tree_pubkey);
}
}
let payer = rpc.get_payer().insecure_clone();
let inputs = CompressedTransactionTestInputs {
rpc,
test_indexer,
fee_payer: &payer,
authority: &payer,
input_compressed_accounts,
output_compressed_accounts: output_compressed_accounts.as_slice(),
output_merkle_tree_pubkeys: output_merkle_tree_pubkeys.as_slice(),
transaction_params,
relay_fee: None,
compress_or_decompress_lamports: None,
is_compress: false,
new_address_params: &address_params,
sorted_output_accounts: false,
created_addresses: Some(derived_addresses.as_slice()),
recipient: None,
};
compressed_transaction_test(inputs).await?;
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub async fn compress_sol_test<R: RpcConnection, I: Indexer<R>>(
rpc: &mut R,
test_indexer: &mut I,
authority: &Keypair,
input_compressed_accounts: &[CompressedAccountWithMerkleContext],
create_out_compressed_accounts_for_input_compressed_accounts: bool,
compress_amount: u64,
output_merkle_tree_pubkey: &Pubkey,
transaction_params: Option<TransactionParams>,
) -> Result<(), RpcError> {
let input_lamports = if input_compressed_accounts.is_empty() {
0
} else {
input_compressed_accounts
.iter()
.map(|x| x.compressed_account.lamports)
.sum::<u64>()
};
let mut output_compressed_accounts = Vec::new();
output_compressed_accounts.push(CompressedAccount {
lamports: input_lamports + compress_amount,
owner: authority.pubkey(),
data: None,
address: None,
});
let mut output_merkle_tree_pubkeys = vec![*output_merkle_tree_pubkey];
if create_out_compressed_accounts_for_input_compressed_accounts {
for compressed_account in input_compressed_accounts.iter() {
output_compressed_accounts.push(CompressedAccount {
lamports: 0,
owner: authority.pubkey(),
data: None,
address: compressed_account.compressed_account.address,
});
output_merkle_tree_pubkeys.push(compressed_account.merkle_context.merkle_tree_pubkey);
}
}
let inputs = CompressedTransactionTestInputs {
rpc,
test_indexer,
fee_payer: authority,
authority,
input_compressed_accounts,
output_compressed_accounts: output_compressed_accounts.as_slice(),
output_merkle_tree_pubkeys: &[*output_merkle_tree_pubkey],
transaction_params,
relay_fee: None,
compress_or_decompress_lamports: Some(compress_amount),
is_compress: true,
new_address_params: &[],
sorted_output_accounts: false,
created_addresses: None,
recipient: None,
};
compressed_transaction_test(inputs).await?;
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub async fn decompress_sol_test<R: RpcConnection, I: Indexer<R>>(
rpc: &mut R,
test_indexer: &mut I,
authority: &Keypair,
input_compressed_accounts: &[CompressedAccountWithMerkleContext],
recipient: &Pubkey,
decompress_amount: u64,
output_merkle_tree_pubkey: &Pubkey,
transaction_params: Option<TransactionParams>,
) -> Result<(), RpcError> {
let input_lamports = input_compressed_accounts
.iter()
.map(|x| x.compressed_account.lamports)
.sum::<u64>();
let output_compressed_accounts = vec![CompressedAccount {
lamports: input_lamports - decompress_amount,
owner: rpc.get_payer().pubkey(),
data: None,
address: None,
}];
let payer = rpc.get_payer().insecure_clone();
let inputs = CompressedTransactionTestInputs {
rpc,
test_indexer,
fee_payer: &payer,
authority,
input_compressed_accounts,
output_compressed_accounts: output_compressed_accounts.as_slice(),
output_merkle_tree_pubkeys: &[*output_merkle_tree_pubkey],
transaction_params,
relay_fee: None,
compress_or_decompress_lamports: Some(decompress_amount),
is_compress: false,
new_address_params: &[],
sorted_output_accounts: false,
created_addresses: None,
recipient: Some(*recipient),
};
compressed_transaction_test(inputs).await?;
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub async fn transfer_compressed_sol_test<R: RpcConnection, I: Indexer<R>>(
rpc: &mut R,
test_indexer: &mut I,
authority: &Keypair,
input_compressed_accounts: &[CompressedAccountWithMerkleContext],
recipients: &[Pubkey],
output_merkle_tree_pubkeys: &[Pubkey],
transaction_params: Option<TransactionParams>,
) -> Result<Signature, RpcError> {
if recipients.len() != output_merkle_tree_pubkeys.len() {
panic!("recipients and output_merkle_tree_pubkeys length mismatch for transfer_compressed_sol_test");
}
if input_compressed_accounts.is_empty() {
panic!("input_compressed_accounts is empty for transfer_compressed_sol_test");
}
let input_lamports = input_compressed_accounts
.iter()
.map(|x| x.compressed_account.lamports)
.sum::<u64>();
let mut output_compressed_accounts = Vec::new();
let mut output_merkle_tree_pubkeys = output_merkle_tree_pubkeys.to_vec();
output_merkle_tree_pubkeys.sort();
let input_addresses = input_compressed_accounts
.iter()
.map(|x| x.compressed_account.address)
.collect::<Vec<_>>();
for (i, _) in output_merkle_tree_pubkeys.iter().enumerate() {
let address = if i < input_addresses.len() {
input_addresses[i]
} else {
None
};
let mut lamports = input_lamports / output_merkle_tree_pubkeys.len() as u64;
if i == 0 {
lamports += input_lamports % output_merkle_tree_pubkeys.len() as u64;
}
output_compressed_accounts.push(CompressedAccount {
lamports,
owner: recipients[i],
data: None,
address,
});
}
let payer = rpc.get_payer().insecure_clone();
let inputs = CompressedTransactionTestInputs {
rpc,
test_indexer,
fee_payer: &payer,
authority,
input_compressed_accounts,
output_compressed_accounts: output_compressed_accounts.as_slice(),
output_merkle_tree_pubkeys: output_merkle_tree_pubkeys.as_slice(),
transaction_params,
relay_fee: None,
compress_or_decompress_lamports: None,
is_compress: false,
new_address_params: &[],
sorted_output_accounts: false,
created_addresses: None,
recipient: None,
};
compressed_transaction_test(inputs).await
}
pub struct CompressedTransactionTestInputs<'a, R: RpcConnection, I: Indexer<R>> {
rpc: &'a mut R,
test_indexer: &'a mut I,
fee_payer: &'a Keypair,
authority: &'a Keypair,
input_compressed_accounts: &'a [CompressedAccountWithMerkleContext],
output_compressed_accounts: &'a [CompressedAccount],
output_merkle_tree_pubkeys: &'a [Pubkey],
transaction_params: Option<TransactionParams>,
relay_fee: Option<u64>,
compress_or_decompress_lamports: Option<u64>,
is_compress: bool,
new_address_params: &'a [NewAddressParams],
sorted_output_accounts: bool,
created_addresses: Option<&'a [[u8; 32]]>,
recipient: Option<Pubkey>,
}
#[allow(clippy::too_many_arguments)]
pub async fn compressed_transaction_test<R: RpcConnection, I: Indexer<R>>(
inputs: CompressedTransactionTestInputs<'_, R, I>,
) -> Result<Signature, RpcError> {
let mut compressed_account_hashes = Vec::new();
let compressed_account_input_hashes = if !inputs.input_compressed_accounts.is_empty() {
for compressed_account in inputs.input_compressed_accounts.iter() {
compressed_account_hashes.push(
compressed_account
.compressed_account
.hash::<Poseidon>(
&compressed_account.merkle_context.merkle_tree_pubkey,
&compressed_account.merkle_context.leaf_index,
)
.unwrap(),
);
}
Some(compressed_account_hashes.as_slice())
} else {
None
};
let state_input_merkle_trees = inputs
.input_compressed_accounts
.iter()
.map(|x| x.merkle_context.merkle_tree_pubkey)
.collect::<Vec<Pubkey>>();
let state_input_merkle_trees = if state_input_merkle_trees.is_empty() {
None
} else {
Some(state_input_merkle_trees.as_slice())
};
let mut root_indices = Vec::new();
let mut proof = None;
let mut input_merkle_tree_snapshots = Vec::new();
let mut address_params = Vec::new();
if !inputs.input_compressed_accounts.is_empty() || !inputs.new_address_params.is_empty() {
let address_merkle_tree_pubkeys = if inputs.new_address_params.is_empty() {
None
} else {
Some(
inputs
.new_address_params
.iter()
.map(|x| x.address_merkle_tree_pubkey)
.collect::<Vec<_>>(),
)
};
let proof_rpc_res = inputs
.test_indexer
.create_proof_for_compressed_accounts(
compressed_account_input_hashes,
state_input_merkle_trees,
inputs.created_addresses,
address_merkle_tree_pubkeys,
inputs.rpc,
)
.await;
root_indices = proof_rpc_res.root_indices;
proof = Some(proof_rpc_res.proof);
let input_merkle_tree_accounts = inputs
.test_indexer
.get_state_merkle_tree_accounts(state_input_merkle_trees.unwrap_or(&[]));
input_merkle_tree_snapshots =
get_merkle_tree_snapshots::<R>(inputs.rpc, input_merkle_tree_accounts.as_slice()).await;
if !inputs.new_address_params.is_empty() {
for (i, input_address_params) in inputs.new_address_params.iter().enumerate() {
address_params.push(input_address_params.clone());
address_params[i].address_merkle_tree_root_index =
proof_rpc_res.address_root_indices[i];
}
}
}
let output_merkle_tree_accounts = inputs
.test_indexer
.get_state_merkle_tree_accounts(inputs.output_merkle_tree_pubkeys);
let output_merkle_tree_snapshots =
get_merkle_tree_snapshots::<R>(inputs.rpc, output_merkle_tree_accounts.as_slice()).await;
let instruction = create_invoke_instruction(
&inputs.fee_payer.pubkey(),
&inputs.authority.pubkey().clone(),
inputs
.input_compressed_accounts
.iter()
.map(|x| x.compressed_account.clone())
.collect::<Vec<CompressedAccount>>()
.as_slice(),
inputs.output_compressed_accounts,
inputs
.input_compressed_accounts
.iter()
.map(|x| x.merkle_context)
.collect::<Vec<MerkleContext>>()
.as_slice(),
inputs.output_merkle_tree_pubkeys,
&root_indices,
&address_params,
proof,
inputs.compress_or_decompress_lamports,
inputs.is_compress,
inputs.recipient,
true,
);
let mut recipient_balance_pre = 0;
let mut compressed_sol_pda_balance_pre = 0;
if inputs.compress_or_decompress_lamports.is_some() {
compressed_sol_pda_balance_pre =
match inputs.rpc.get_account(get_sol_pool_pda()).await.unwrap() {
Some(account) => account.lamports,
None => 0,
};
}
if inputs.recipient.is_some() {
// TODO: assert sender balance after fee refactor
recipient_balance_pre = match inputs
.rpc
.get_account(inputs.recipient.unwrap())
.await
.unwrap()
{
Some(account) => account.lamports,
None => 0,
};
}
let event = inputs
.rpc
.create_and_send_transaction_with_event::<PublicTransactionEvent>(
&[instruction],
&inputs.fee_payer.pubkey(),
&[inputs.fee_payer, inputs.authority],
inputs.transaction_params,
)
.await?
.unwrap();
let (created_output_compressed_accounts, _) = inputs
.test_indexer
.add_event_and_compressed_accounts(&event.0);
let input = AssertCompressedTransactionInputs {
rpc: inputs.rpc,
test_indexer: inputs.test_indexer,
output_compressed_accounts: inputs.output_compressed_accounts,
created_output_compressed_accounts: created_output_compressed_accounts.as_slice(),
event: &event.0,
input_merkle_tree_snapshots: input_merkle_tree_snapshots.as_slice(),
output_merkle_tree_snapshots: output_merkle_tree_snapshots.as_slice(),
recipient_balance_pre,
compress_or_decompress_lamports: inputs.compress_or_decompress_lamports,
is_compress: inputs.is_compress,
compressed_sol_pda_balance_pre,
compression_recipient: inputs.recipient,
created_addresses: inputs.created_addresses.unwrap_or(&[]),
sorted_output_accounts: inputs.sorted_output_accounts,
relay_fee: inputs.relay_fee,
input_compressed_account_hashes: &compressed_account_hashes,
address_queue_pubkeys: &inputs
.new_address_params
.iter()
.map(|x| x.address_queue_pubkey)
.collect::<Vec<Pubkey>>(),
};
assert_compressed_transaction(input).await;
Ok(event.1)
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-utils/src
|
solana_public_repos/Lightprotocol/light-protocol/test-utils/src/indexer/mod.rs
|
pub mod test_indexer;
pub use test_indexer::TestIndexer;
| 0
|
solana_public_repos/Lightprotocol/light-protocol/test-utils/src
|
solana_public_repos/Lightprotocol/light-protocol/test-utils/src/indexer/test_indexer.rs
|
use log::{debug, info, warn};
use num_bigint::BigUint;
use solana_sdk::bs58;
use std::marker::PhantomData;
use std::sync::{Arc, Mutex};
use crate::create_address_merkle_tree_and_queue_account_with_assert;
use crate::e2e_test_env::KeypairActionConfig;
use crate::spl::create_initialize_mint_instructions;
use account_compression::{
AddressMerkleTreeConfig, AddressQueueConfig, NullifierQueueConfig, StateMerkleTreeConfig,
};
use forester_utils::indexer::{
AddressMerkleTreeAccounts, AddressMerkleTreeBundle, Indexer, IndexerError, MerkleProof,
NewAddressProofWithContext, ProofRpcResult, StateMerkleTreeAccounts, StateMerkleTreeBundle,
TokenDataWithContext,
};
use forester_utils::{get_concurrent_merkle_tree, get_indexed_merkle_tree};
use light_client::rpc::RpcConnection;
use light_client::transaction_params::FeeConfig;
use light_compressed_token::constants::TOKEN_COMPRESSED_ACCOUNT_DISCRIMINATOR;
use light_compressed_token::mint_sdk::create_create_token_pool_instruction;
use light_compressed_token::{get_token_pool_pda, TokenData};
use light_program_test::test_env::{create_state_merkle_tree_and_queue_account, EnvAccounts};
use light_prover_client::gnark::helpers::{ProverConfig, ProverMode};
use light_utils::bigint::bigint_to_be_bytes_array;
use {
account_compression::{
utils::constants::{STATE_MERKLE_TREE_CANOPY_DEPTH, STATE_MERKLE_TREE_HEIGHT},
AddressMerkleTreeAccount, StateMerkleTreeAccount,
},
anchor_lang::AnchorDeserialize,
light_hasher::Poseidon,
light_indexed_merkle_tree::{array::IndexedArray, reference::IndexedMerkleTree},
light_merkle_tree_reference::MerkleTree,
light_prover_client::{
gnark::{
combined_json_formatter::CombinedJsonStruct,
constants::{PROVE_PATH, SERVER_ADDRESS},
helpers::spawn_prover,
inclusion_json_formatter::BatchInclusionJsonStruct,
non_inclusion_json_formatter::BatchNonInclusionJsonStruct,
proof_helpers::{compress_proof, deserialize_gnark_proof_json, proof_from_json_struct},
},
inclusion::merkle_inclusion_proof_inputs::{
InclusionMerkleProofInputs, InclusionProofInputs,
},
non_inclusion::merkle_non_inclusion_proof_inputs::{
get_non_inclusion_proof_inputs, NonInclusionProofInputs,
},
},
light_system_program::{
invoke::processor::CompressedProof,
sdk::{
compressed_account::{CompressedAccountWithMerkleContext, MerkleContext},
event::PublicTransactionEvent,
},
},
num_bigint::BigInt,
num_traits::ops::bytes::FromBytes,
reqwest::Client,
solana_sdk::{
instruction::Instruction, program_pack::Pack, pubkey::Pubkey, signature::Keypair,
signer::Signer,
},
spl_token::instruction::initialize_mint,
std::time::Duration,
};
// TODO: find a different way to init Indexed array on the heap so that it doesn't break the stack
#[derive(Debug)]
pub struct TestIndexer<R: RpcConnection> {
pub state_merkle_trees: Vec<StateMerkleTreeBundle>,
pub address_merkle_trees: Vec<AddressMerkleTreeBundle>,
pub payer: Keypair,
pub group_pda: Pubkey,
pub compressed_accounts: Vec<CompressedAccountWithMerkleContext>,
pub nullified_compressed_accounts: Vec<CompressedAccountWithMerkleContext>,
pub token_compressed_accounts: Vec<TokenDataWithContext>,
pub token_nullified_compressed_accounts: Vec<TokenDataWithContext>,
pub events: Vec<PublicTransactionEvent>,
pub prover_config: Option<ProverConfig>,
phantom: PhantomData<R>,
}
impl<R: RpcConnection + Send + Sync + 'static> Indexer<R> for TestIndexer<R> {
async fn get_multiple_compressed_account_proofs(
&self,
hashes: Vec<String>,
) -> Result<Vec<MerkleProof>, IndexerError> {
info!("Getting proofs for {:?}", hashes);
let mut proofs: Vec<MerkleProof> = Vec::new();
hashes.iter().for_each(|hash| {
let hash_array: [u8; 32] = bs58::decode(hash)
.into_vec()
.unwrap()
.as_slice()
.try_into()
.unwrap();
self.state_merkle_trees.iter().for_each(|tree| {
if let Some(leaf_index) = tree.merkle_tree.get_leaf_index(&hash_array) {
let proof = tree
.merkle_tree
.get_proof_of_leaf(leaf_index, false)
.unwrap();
proofs.push(MerkleProof {
hash: hash.clone(),
leaf_index: leaf_index as u64,
merkle_tree: tree.accounts.merkle_tree.to_string(),
proof: proof.to_vec(),
root_seq: tree.merkle_tree.sequence_number as u64,
});
}
})
});
Ok(proofs)
}
async fn get_rpc_compressed_accounts_by_owner(
&self,
owner: &Pubkey,
) -> Result<Vec<String>, IndexerError> {
let result = self.get_compressed_accounts_by_owner(owner);
let mut hashes: Vec<String> = Vec::new();
for account in result.iter() {
let hash = account.hash().unwrap();
let bs58_hash = bs58::encode(hash).into_string();
hashes.push(bs58_hash);
}
Ok(hashes)
}
async fn get_multiple_new_address_proofs(
&self,
merkle_tree_pubkey: [u8; 32],
addresses: Vec<[u8; 32]>,
) -> Result<Vec<NewAddressProofWithContext>, IndexerError> {
let mut proofs: Vec<NewAddressProofWithContext> = Vec::new();
for address in addresses.iter() {
info!("Getting new address proof for {:?}", address);
let pubkey = Pubkey::from(merkle_tree_pubkey);
let address_tree_bundle = self
.address_merkle_trees
.iter()
.find(|x| x.accounts.merkle_tree == pubkey)
.unwrap();
let address_biguint = BigUint::from_bytes_be(address.as_slice());
let (old_low_address, _old_low_address_next_value) = address_tree_bundle
.indexed_array
.find_low_element_for_nonexistent(&address_biguint)
.unwrap();
let address_bundle = address_tree_bundle
.indexed_array
.new_element_with_low_element_index(old_low_address.index, &address_biguint)
.unwrap();
let (old_low_address, old_low_address_next_value) = address_tree_bundle
.indexed_array
.find_low_element_for_nonexistent(&address_biguint)
.unwrap();
// Get the Merkle proof for updating low element.
let low_address_proof = address_tree_bundle
.merkle_tree
.get_proof_of_leaf(old_low_address.index, false)
.unwrap();
let low_address_index: u64 = old_low_address.index as u64;
let low_address_value: [u8; 32] =
bigint_to_be_bytes_array(&old_low_address.value).unwrap();
let low_address_next_index: u64 = old_low_address.next_index as u64;
let low_address_next_value: [u8; 32] =
bigint_to_be_bytes_array(&old_low_address_next_value).unwrap();
let low_address_proof: [[u8; 32]; 16] = low_address_proof.to_array().unwrap();
let proof = NewAddressProofWithContext {
merkle_tree: merkle_tree_pubkey,
low_address_index,
low_address_value,
low_address_next_index,
low_address_next_value,
low_address_proof,
root: address_tree_bundle.merkle_tree.root(),
root_seq: address_tree_bundle.merkle_tree.merkle_tree.sequence_number as u64,
new_low_element: Some(address_bundle.new_low_element),
new_element: Some(address_bundle.new_element),
new_element_next_value: Some(address_bundle.new_element_next_value),
};
proofs.push(proof);
}
Ok(proofs)
}
fn account_nullified(&mut self, merkle_tree_pubkey: Pubkey, account_hash: &str) {
let decoded_hash: [u8; 32] = bs58::decode(account_hash)
.into_vec()
.unwrap()
.as_slice()
.try_into()
.unwrap();
if let Some(state_tree_bundle) = self
.state_merkle_trees
.iter_mut()
.find(|x| x.accounts.merkle_tree == merkle_tree_pubkey)
{
if let Some(leaf_index) = state_tree_bundle.merkle_tree.get_leaf_index(&decoded_hash) {
state_tree_bundle
.merkle_tree
.update(&[0u8; 32], leaf_index)
.unwrap();
}
}
}
fn address_tree_updated(
&mut self,
merkle_tree_pubkey: Pubkey,
context: &NewAddressProofWithContext,
) {
info!("Updating address tree...");
let mut address_tree_bundle: &mut AddressMerkleTreeBundle = self
.address_merkle_trees
.iter_mut()
.find(|x| x.accounts.merkle_tree == merkle_tree_pubkey)
.unwrap();
let new_low_element = context.new_low_element.clone().unwrap();
let new_element = context.new_element.clone().unwrap();
let new_element_next_value = context.new_element_next_value.clone().unwrap();
address_tree_bundle
.merkle_tree
.update(&new_low_element, &new_element, &new_element_next_value)
.unwrap();
address_tree_bundle
.indexed_array
.append_with_low_element_index(new_low_element.index, &new_element.value)
.unwrap();
info!("Address tree updated");
}
fn get_state_merkle_tree_accounts(&self, pubkeys: &[Pubkey]) -> Vec<StateMerkleTreeAccounts> {
pubkeys
.iter()
.map(|x| {
self.state_merkle_trees
.iter()
.find(|y| y.accounts.merkle_tree == *x)
.unwrap()
.accounts
})
.collect::<Vec<_>>()
}
fn add_event_and_compressed_accounts(
&mut self,
event: &PublicTransactionEvent,
) -> (
Vec<CompressedAccountWithMerkleContext>,
Vec<TokenDataWithContext>,
) {
for hash in event.input_compressed_account_hashes.iter() {
let index = self.compressed_accounts.iter().position(|x| {
x.compressed_account
.hash::<Poseidon>(
&x.merkle_context.merkle_tree_pubkey,
&x.merkle_context.leaf_index,
)
.unwrap()
== *hash
});
if let Some(index) = index {
self.nullified_compressed_accounts
.push(self.compressed_accounts[index].clone());
self.compressed_accounts.remove(index);
continue;
};
if index.is_none() {
let index = self
.token_compressed_accounts
.iter()
.position(|x| {
x.compressed_account
.compressed_account
.hash::<Poseidon>(
&x.compressed_account.merkle_context.merkle_tree_pubkey,
&x.compressed_account.merkle_context.leaf_index,
)
.unwrap()
== *hash
})
.expect("input compressed account not found");
self.token_nullified_compressed_accounts
.push(self.token_compressed_accounts[index].clone());
self.token_compressed_accounts.remove(index);
}
}
let mut compressed_accounts = Vec::new();
let mut token_compressed_accounts = Vec::new();
for (i, compressed_account) in event.output_compressed_accounts.iter().enumerate() {
let nullifier_queue_pubkey = self
.state_merkle_trees
.iter()
.find(|x| {
x.accounts.merkle_tree
== event.pubkey_array
[event.output_compressed_accounts[i].merkle_tree_index as usize]
})
.unwrap()
.accounts
.nullifier_queue;
// if data is some, try to deserialize token data, if it fails, add to compressed_accounts
// if data is none add to compressed_accounts
// new accounts are inserted in front so that the newest accounts are found first
match compressed_account.compressed_account.data.as_ref() {
Some(data) => {
if compressed_account.compressed_account.owner == light_compressed_token::ID
&& data.discriminator == TOKEN_COMPRESSED_ACCOUNT_DISCRIMINATOR
{
if let Ok(token_data) = TokenData::deserialize(&mut data.data.as_slice()) {
let token_account = TokenDataWithContext {
token_data,
compressed_account: CompressedAccountWithMerkleContext {
compressed_account: compressed_account
.compressed_account
.clone(),
merkle_context: MerkleContext {
leaf_index: event.output_leaf_indices[i],
merkle_tree_pubkey: event.pubkey_array[event
.output_compressed_accounts[i]
.merkle_tree_index
as usize],
nullifier_queue_pubkey,
queue_index: None,
},
},
};
token_compressed_accounts.push(token_account.clone());
self.token_compressed_accounts.insert(0, token_account);
}
} else {
let compressed_account = CompressedAccountWithMerkleContext {
compressed_account: compressed_account.compressed_account.clone(),
merkle_context: MerkleContext {
leaf_index: event.output_leaf_indices[i],
merkle_tree_pubkey: event.pubkey_array[event
.output_compressed_accounts[i]
.merkle_tree_index
as usize],
nullifier_queue_pubkey,
queue_index: None,
},
};
compressed_accounts.push(compressed_account.clone());
self.compressed_accounts.insert(0, compressed_account);
}
}
None => {
let compressed_account = CompressedAccountWithMerkleContext {
compressed_account: compressed_account.compressed_account.clone(),
merkle_context: MerkleContext {
leaf_index: event.output_leaf_indices[i],
merkle_tree_pubkey: event.pubkey_array
[event.output_compressed_accounts[i].merkle_tree_index as usize],
nullifier_queue_pubkey,
queue_index: None,
},
};
compressed_accounts.push(compressed_account.clone());
self.compressed_accounts.insert(0, compressed_account);
}
};
let merkle_tree = &mut self
.state_merkle_trees
.iter_mut()
.find(|x| {
x.accounts.merkle_tree
== event.pubkey_array
[event.output_compressed_accounts[i].merkle_tree_index as usize]
})
.unwrap()
.merkle_tree;
merkle_tree
.append(
&compressed_account
.compressed_account
.hash::<Poseidon>(
&event.pubkey_array
[event.output_compressed_accounts[i].merkle_tree_index as usize],
&event.output_leaf_indices[i],
)
.unwrap(),
)
.expect("insert failed");
}
self.events.push(event.clone());
(compressed_accounts, token_compressed_accounts)
}
fn get_state_merkle_trees(&self) -> &Vec<StateMerkleTreeBundle> {
&self.state_merkle_trees
}
fn get_state_merkle_trees_mut(&mut self) -> &mut Vec<StateMerkleTreeBundle> {
&mut self.state_merkle_trees
}
fn get_address_merkle_trees(&self) -> &Vec<AddressMerkleTreeBundle> {
&self.address_merkle_trees
}
fn get_address_merkle_trees_mut(&mut self) -> &mut Vec<AddressMerkleTreeBundle> {
&mut self.address_merkle_trees
}
fn get_token_compressed_accounts(&self) -> &Vec<TokenDataWithContext> {
&self.token_compressed_accounts
}
fn get_payer(&self) -> &Keypair {
&self.payer
}
fn get_group_pda(&self) -> &Pubkey {
&self.group_pda
}
async fn create_proof_for_compressed_accounts(
&mut self,
compressed_accounts: Option<&[[u8; 32]]>,
state_merkle_tree_pubkeys: Option<&[Pubkey]>,
new_addresses: Option<&[[u8; 32]]>,
address_merkle_tree_pubkeys: Option<Vec<Pubkey>>,
rpc: &mut R,
) -> ProofRpcResult {
if compressed_accounts.is_some()
&& ![1usize, 2usize, 3usize, 4usize, 8usize]
.contains(&compressed_accounts.unwrap().len())
{
panic!(
"compressed_accounts must be of length 1, 2, 3, 4 or 8 != {}",
compressed_accounts.unwrap().len()
)
}
if new_addresses.is_some() && ![1usize, 2usize].contains(&new_addresses.unwrap().len()) {
panic!("new_addresses must be of length 1, 2")
}
let client = Client::new();
let (root_indices, address_root_indices, json_payload) =
match (compressed_accounts, new_addresses) {
(Some(accounts), None) => {
let (payload, indices) = self
.process_inclusion_proofs(state_merkle_tree_pubkeys.unwrap(), accounts, rpc)
.await;
(indices, Vec::new(), payload.to_string())
}
(None, Some(addresses)) => {
let (payload, indices) = self
.process_non_inclusion_proofs(
address_merkle_tree_pubkeys.unwrap().as_slice(),
addresses,
rpc,
)
.await;
(Vec::<u16>::new(), indices, payload.to_string())
}
(Some(accounts), Some(addresses)) => {
let (inclusion_payload, inclusion_indices) = self
.process_inclusion_proofs(state_merkle_tree_pubkeys.unwrap(), accounts, rpc)
.await;
let (non_inclusion_payload, non_inclusion_indices) = self
.process_non_inclusion_proofs(
address_merkle_tree_pubkeys.unwrap().as_slice(),
addresses,
rpc,
)
.await;
let combined_payload = CombinedJsonStruct {
inclusion: inclusion_payload.inputs,
non_inclusion: non_inclusion_payload.inputs,
}
.to_string();
(inclusion_indices, non_inclusion_indices, combined_payload)
}
_ => {
panic!("At least one of compressed_accounts or new_addresses must be provided")
}
};
let mut retries = 3;
while retries > 0 {
let response_result = client
.post(&format!("{}{}", SERVER_ADDRESS, PROVE_PATH))
.header("Content-Type", "text/plain; charset=utf-8")
.body(json_payload.clone())
.send()
.await
.expect("Failed to execute request.");
if response_result.status().is_success() {
let body = response_result.text().await.unwrap();
let proof_json = deserialize_gnark_proof_json(&body).unwrap();
let (proof_a, proof_b, proof_c) = proof_from_json_struct(proof_json);
let (proof_a, proof_b, proof_c) = compress_proof(&proof_a, &proof_b, &proof_c);
return ProofRpcResult {
root_indices,
address_root_indices,
proof: CompressedProof {
a: proof_a,
b: proof_b,
c: proof_c,
},
};
} else {
warn!("Error: {}", response_result.text().await.unwrap());
tokio::time::sleep(Duration::from_secs(1)).await;
if let Some(ref prover_config) = self.prover_config {
spawn_prover(true, prover_config.clone()).await;
}
retries -= 1;
}
}
panic!("Failed to get proof from server");
}
fn add_address_merkle_tree_accounts(
&mut self,
merkle_tree_keypair: &Keypair,
queue_keypair: &Keypair,
_owning_program_id: Option<Pubkey>,
) -> AddressMerkleTreeAccounts {
info!("Adding address merkle tree accounts...");
let address_merkle_tree_accounts = AddressMerkleTreeAccounts {
merkle_tree: merkle_tree_keypair.pubkey(),
queue: queue_keypair.pubkey(),
};
self.address_merkle_trees
.push(Self::add_address_merkle_tree_bundle(
address_merkle_tree_accounts,
));
info!(
"Address merkle tree accounts added. Total: {}",
self.address_merkle_trees.len()
);
address_merkle_tree_accounts
}
/// returns compressed_accounts with the owner pubkey
/// does not return token accounts.
fn get_compressed_accounts_by_owner(
&self,
owner: &Pubkey,
) -> Vec<CompressedAccountWithMerkleContext> {
self.compressed_accounts
.iter()
.filter(|x| x.compressed_account.owner == *owner)
.cloned()
.collect()
}
fn get_compressed_token_accounts_by_owner(&self, owner: &Pubkey) -> Vec<TokenDataWithContext> {
self.token_compressed_accounts
.iter()
.filter(|x| x.token_data.owner == *owner)
.cloned()
.collect()
}
fn add_state_bundle(&mut self, state_bundle: StateMerkleTreeBundle) {
self.get_state_merkle_trees_mut().push(state_bundle);
}
}
impl<R: RpcConnection> TestIndexer<R> {
fn count_matching_hashes(&self, query_hashes: &[String]) -> usize {
self.nullified_compressed_accounts
.iter()
.map(|account| self.compute_hash(account))
.filter(|bs58_hash| query_hashes.contains(bs58_hash))
.count()
}
fn compute_hash(&self, account: &CompressedAccountWithMerkleContext) -> String {
// replace AccountType with actual type
let hash = account
.compressed_account
.hash::<Poseidon>(
&account.merkle_context.merkle_tree_pubkey,
&account.merkle_context.leaf_index,
)
.unwrap();
bs58::encode(hash).into_string()
}
pub async fn init_from_env(
payer: &Keypair,
env: &EnvAccounts,
prover_config: Option<ProverConfig>,
) -> Self {
Self::new(
vec![StateMerkleTreeAccounts {
merkle_tree: env.merkle_tree_pubkey,
nullifier_queue: env.nullifier_queue_pubkey,
cpi_context: env.cpi_context_account_pubkey,
}],
vec![AddressMerkleTreeAccounts {
merkle_tree: env.address_merkle_tree_pubkey,
queue: env.address_merkle_tree_queue_pubkey,
}],
payer.insecure_clone(),
env.group_pda,
prover_config,
)
.await
}
pub async fn new(
state_merkle_tree_accounts: Vec<StateMerkleTreeAccounts>,
address_merkle_tree_accounts: Vec<AddressMerkleTreeAccounts>,
payer: Keypair,
group_pda: Pubkey,
prover_config: Option<ProverConfig>,
) -> Self {
if let Some(ref prover_config) = prover_config {
spawn_prover(true, prover_config.clone()).await;
}
let mut state_merkle_trees = Vec::new();
for state_merkle_tree_account in state_merkle_tree_accounts.iter() {
let merkle_tree = Box::new(MerkleTree::<Poseidon>::new(
STATE_MERKLE_TREE_HEIGHT as usize,
STATE_MERKLE_TREE_CANOPY_DEPTH as usize,
));
state_merkle_trees.push(StateMerkleTreeBundle {
accounts: *state_merkle_tree_account,
merkle_tree,
rollover_fee: FeeConfig::default().state_merkle_tree_rollover as i64,
});
}
let mut address_merkle_trees = Vec::new();
for address_merkle_tree_account in address_merkle_tree_accounts {
address_merkle_trees.push(Self::add_address_merkle_tree_bundle(
address_merkle_tree_account,
));
}
Self {
state_merkle_trees,
address_merkle_trees,
payer,
compressed_accounts: vec![],
nullified_compressed_accounts: vec![],
events: vec![],
token_compressed_accounts: vec![],
token_nullified_compressed_accounts: vec![],
prover_config,
phantom: Default::default(),
group_pda,
}
}
pub fn add_address_merkle_tree_bundle(
address_merkle_tree_accounts: AddressMerkleTreeAccounts,
// TODO: add config here
) -> AddressMerkleTreeBundle {
let mut merkle_tree = Box::new(
IndexedMerkleTree::<Poseidon, usize>::new(
STATE_MERKLE_TREE_HEIGHT as usize,
STATE_MERKLE_TREE_CANOPY_DEPTH as usize,
)
.unwrap(),
);
merkle_tree.init().unwrap();
let mut indexed_array = Box::<IndexedArray<Poseidon, usize>>::default();
indexed_array.init().unwrap();
AddressMerkleTreeBundle {
merkle_tree,
indexed_array,
accounts: address_merkle_tree_accounts,
rollover_fee: FeeConfig::default().address_queue_rollover as i64,
}
}
pub async fn add_address_merkle_tree(
&mut self,
rpc: &mut R,
merkle_tree_keypair: &Keypair,
queue_keypair: &Keypair,
owning_program_id: Option<Pubkey>,
) -> AddressMerkleTreeAccounts {
create_address_merkle_tree_and_queue_account_with_assert(
&self.payer,
true,
rpc,
merkle_tree_keypair,
queue_keypair,
owning_program_id,
None,
&AddressMerkleTreeConfig::default(),
&AddressQueueConfig::default(),
0,
)
.await
.unwrap();
self.add_address_merkle_tree_accounts(merkle_tree_keypair, queue_keypair, owning_program_id)
}
pub async fn add_state_merkle_tree(
&mut self,
rpc: &mut R,
merkle_tree_keypair: &Keypair,
nullifier_queue_keypair: &Keypair,
cpi_context_keypair: &Keypair,
owning_program_id: Option<Pubkey>,
forester: Option<Pubkey>,
) {
create_state_merkle_tree_and_queue_account(
&self.payer,
true,
rpc,
merkle_tree_keypair,
nullifier_queue_keypair,
Some(cpi_context_keypair),
owning_program_id,
forester,
self.state_merkle_trees.len() as u64,
&StateMerkleTreeConfig::default(),
&NullifierQueueConfig::default(),
)
.await
.unwrap();
let state_merkle_tree_account = StateMerkleTreeAccounts {
merkle_tree: merkle_tree_keypair.pubkey(),
nullifier_queue: nullifier_queue_keypair.pubkey(),
cpi_context: cpi_context_keypair.pubkey(),
};
let merkle_tree = Box::new(MerkleTree::<Poseidon>::new(
STATE_MERKLE_TREE_HEIGHT as usize,
STATE_MERKLE_TREE_CANOPY_DEPTH as usize,
));
self.state_merkle_trees.push(StateMerkleTreeBundle {
merkle_tree,
accounts: state_merkle_tree_account,
rollover_fee: FeeConfig::default().state_merkle_tree_rollover as i64,
});
}
async fn process_inclusion_proofs(
&self,
merkle_tree_pubkeys: &[Pubkey],
accounts: &[[u8; 32]],
rpc: &mut R,
) -> (BatchInclusionJsonStruct, Vec<u16>) {
let mut inclusion_proofs = Vec::new();
let mut root_indices = Vec::new();
for (i, account) in accounts.iter().enumerate() {
let merkle_tree = &self
.state_merkle_trees
.iter()
.find(|x| x.accounts.merkle_tree == merkle_tree_pubkeys[i])
.unwrap()
.merkle_tree;
let leaf_index = merkle_tree.get_leaf_index(account).unwrap();
let proof = merkle_tree.get_proof_of_leaf(leaf_index, true).unwrap();
inclusion_proofs.push(InclusionMerkleProofInputs {
root: BigInt::from_be_bytes(merkle_tree.root().as_slice()),
leaf: BigInt::from_be_bytes(account),
path_index: BigInt::from_be_bytes(leaf_index.to_be_bytes().as_slice()),
path_elements: proof.iter().map(|x| BigInt::from_be_bytes(x)).collect(),
});
let fetched_merkle_tree = unsafe {
get_concurrent_merkle_tree::<StateMerkleTreeAccount, R, Poseidon, 26>(
rpc,
merkle_tree_pubkeys[i],
)
.await
};
for i in 0..fetched_merkle_tree.roots.len() {
info!("roots {:?} {:?}", i, fetched_merkle_tree.roots[i]);
}
info!(
"sequence number {:?}",
fetched_merkle_tree.sequence_number()
);
info!("root index {:?}", fetched_merkle_tree.root_index());
info!("local sequence number {:?}", merkle_tree.sequence_number);
assert_eq!(
merkle_tree.root(),
fetched_merkle_tree.root(),
"Merkle tree root mismatch"
);
root_indices.push(fetched_merkle_tree.root_index() as u16);
}
let inclusion_proof_inputs = InclusionProofInputs(inclusion_proofs.as_slice());
let batch_inclusion_proof_inputs =
BatchInclusionJsonStruct::from_inclusion_proof_inputs(&inclusion_proof_inputs);
(batch_inclusion_proof_inputs, root_indices)
}
async fn process_non_inclusion_proofs(
&self,
address_merkle_tree_pubkeys: &[Pubkey],
addresses: &[[u8; 32]],
rpc: &mut R,
) -> (BatchNonInclusionJsonStruct, Vec<u16>) {
let mut non_inclusion_proofs = Vec::new();
let mut address_root_indices = Vec::new();
for (i, address) in addresses.iter().enumerate() {
let address_tree = &self
.address_merkle_trees
.iter()
.find(|x| x.accounts.merkle_tree == address_merkle_tree_pubkeys[i])
.unwrap();
let proof_inputs = get_non_inclusion_proof_inputs(
address,
&address_tree.merkle_tree,
&address_tree.indexed_array,
);
non_inclusion_proofs.push(proof_inputs);
let fetched_address_merkle_tree = unsafe {
get_indexed_merkle_tree::<AddressMerkleTreeAccount, R, Poseidon, usize, 26, 16>(
rpc,
address_merkle_tree_pubkeys[i],
)
.await
};
address_root_indices.push(fetched_address_merkle_tree.root_index() as u16);
}
let non_inclusion_proof_inputs = NonInclusionProofInputs(non_inclusion_proofs.as_slice());
let batch_non_inclusion_proof_inputs =
BatchNonInclusionJsonStruct::from_non_inclusion_proof_inputs(
&non_inclusion_proof_inputs,
);
(batch_non_inclusion_proof_inputs, address_root_indices)
}
/// deserializes an event
/// adds the output_compressed_accounts to the compressed_accounts
/// removes the input_compressed_accounts from the compressed_accounts
/// adds the input_compressed_accounts to the nullified_compressed_accounts
pub fn add_lamport_compressed_accounts(&mut self, event_bytes: Vec<u8>) {
let event_bytes = event_bytes.clone();
let event = PublicTransactionEvent::deserialize(&mut event_bytes.as_slice()).unwrap();
self.add_event_and_compressed_accounts(&event);
}
/// deserializes an event
/// adds the output_compressed_accounts to the compressed_accounts
/// removes the input_compressed_accounts from the compressed_accounts
/// adds the input_compressed_accounts to the nullified_compressed_accounts
/// deserialiazes token data from the output_compressed_accounts
/// adds the token_compressed_accounts to the token_compressed_accounts
pub fn add_compressed_accounts_with_token_data(&mut self, event: &PublicTransactionEvent) {
self.add_event_and_compressed_accounts(event);
}
/// returns the compressed sol balance of the owner pubkey
pub fn get_compressed_balance(&self, owner: &Pubkey) -> u64 {
self.compressed_accounts
.iter()
.filter(|x| x.compressed_account.owner == *owner)
.map(|x| x.compressed_account.lamports)
.sum()
}
/// returns the compressed token balance of the owner pubkey for a token by mint
pub fn get_compressed_token_balance(&self, owner: &Pubkey, mint: &Pubkey) -> u64 {
self.token_compressed_accounts
.iter()
.filter(|x| {
x.compressed_account.compressed_account.owner == *owner
&& x.token_data.mint == *mint
})
.map(|x| x.token_data.amount)
.sum()
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/Cargo.toml
|
[package]
name = "light-hasher"
version = "1.1.0"
description = "Trait for generic usage of hash functions on Solana"
repository = "https://github.com/Lightprotocol/light-protocol"
license = "Apache-2.0"
edition = "2021"
[features]
solana = []
[dependencies]
light-poseidon = "0.2.0"
solana-program = { workspace = true }
thiserror = "1.0"
[target.'cfg(not(target_os = "solana"))'.dependencies]
ark-bn254 = "0.4.0"
sha2 = "0.10"
sha3 = "0.10"
[dev-dependencies]
rand = "0.8"
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src/sha256.rs
|
use crate::{
errors::HasherError,
zero_bytes::{sha256::ZERO_BYTES, ZeroBytes},
zero_indexed_leaf::sha256::ZERO_INDEXED_LEAF,
Hash, Hasher,
};
#[derive(Clone, Copy)] // To allow using with zero copy Solana accounts.
pub struct Sha256;
impl Hasher for Sha256 {
fn hash(val: &[u8]) -> Result<Hash, HasherError> {
Self::hashv(&[val])
}
fn hashv(vals: &[&[u8]]) -> Result<Hash, HasherError> {
#[cfg(not(target_os = "solana"))]
{
use sha2::{Digest, Sha256};
let mut hasher = Sha256::default();
for val in vals {
hasher.update(val);
}
Ok(hasher.finalize().into())
}
// Call via a system call to perform the calculation
#[cfg(target_os = "solana")]
{
use crate::HASH_BYTES;
let mut hash_result = [0; HASH_BYTES];
unsafe {
crate::syscalls::sol_sha256(
vals as *const _ as *const u8,
vals.len() as u64,
&mut hash_result as *mut _ as *mut u8,
);
}
Ok(hash_result)
}
}
fn zero_bytes() -> ZeroBytes {
ZERO_BYTES
}
fn zero_indexed_leaf() -> [u8; 32] {
ZERO_INDEXED_LEAF
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src/lib.rs
|
pub mod bytes;
pub mod errors;
pub mod keccak;
pub mod poseidon;
pub mod sha256;
pub mod syscalls;
pub mod zero_bytes;
pub mod zero_indexed_leaf;
pub use keccak::Keccak;
pub use poseidon::Poseidon;
pub use sha256::Sha256;
pub use crate::errors::HasherError;
use crate::zero_bytes::ZeroBytes;
pub const HASH_BYTES: usize = 32;
pub type Hash = [u8; HASH_BYTES];
pub trait Hasher {
fn hash(val: &[u8]) -> Result<Hash, HasherError>;
fn hashv(vals: &[&[u8]]) -> Result<Hash, HasherError>;
fn zero_bytes() -> ZeroBytes;
fn zero_indexed_leaf() -> [u8; 32];
}
pub trait DataHasher {
fn hash<H: crate::Hasher>(&self) -> Result<[u8; 32], HasherError>;
}
pub trait Discriminator {
const DISCRIMINATOR: [u8; 8];
fn discriminator() -> [u8; 8] {
Self::DISCRIMINATOR
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src/keccak.rs
|
use crate::{
errors::HasherError,
zero_bytes::{keccak::ZERO_BYTES, ZeroBytes},
zero_indexed_leaf::keccak::ZERO_INDEXED_LEAF,
Hash, Hasher,
};
#[derive(Clone, Copy)] // To allow using with zero copy Solana accounts.
pub struct Keccak;
impl Hasher for Keccak {
fn hash(val: &[u8]) -> Result<Hash, HasherError> {
Self::hashv(&[val])
}
fn hashv(vals: &[&[u8]]) -> Result<Hash, HasherError> {
#[cfg(not(target_os = "solana"))]
{
use sha3::{Digest, Keccak256};
let mut hasher = Keccak256::default();
for val in vals {
hasher.update(val);
}
Ok(hasher.finalize().into())
}
// Call via a system call to perform the calculation
#[cfg(target_os = "solana")]
{
use crate::HASH_BYTES;
let mut hash_result = [0; HASH_BYTES];
unsafe {
crate::syscalls::sol_keccak256(
vals as *const _ as *const u8,
vals.len() as u64,
&mut hash_result as *mut _ as *mut u8,
);
}
Ok(hash_result)
}
}
fn zero_bytes() -> ZeroBytes {
ZERO_BYTES
}
fn zero_indexed_leaf() -> [u8; 32] {
ZERO_INDEXED_LEAF
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src/errors.rs
|
use light_poseidon::PoseidonError;
use solana_program::poseidon::PoseidonSyscallError;
use thiserror::Error;
#[derive(Debug, Error)]
pub enum HasherError {
#[error("Integer overflow, value too large")]
IntegerOverflow,
#[error("Poseidon hasher error: {0}")]
Poseidon(#[from] PoseidonError),
#[error("Poseidon syscall error: {0}")]
PoseidonSyscall(#[from] PoseidonSyscallError),
#[error("Unknown Solana syscall error: {0}")]
UnknownSolanaSyscall(u64),
}
// NOTE(vadorovsky): Unfortunately, we need to do it by hand. `num_derive::ToPrimitive`
// doesn't support data-carrying enums.
impl From<HasherError> for u32 {
fn from(e: HasherError) -> u32 {
match e {
HasherError::IntegerOverflow => 7001,
HasherError::Poseidon(_) => 7002,
HasherError::PoseidonSyscall(e) => (u64::from(e)).try_into().unwrap_or(7003),
HasherError::UnknownSolanaSyscall(e) => e.try_into().unwrap_or(7004),
}
}
}
impl From<HasherError> for solana_program::program_error::ProgramError {
fn from(e: HasherError) -> Self {
solana_program::program_error::ProgramError::Custom(e.into())
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src/bytes.rs
|
/// A trait providing [`as_byte_vec()`](AsByteVec::as_byte_vec) method for types which
/// are used inside compressed accounts.
pub trait AsByteVec {
fn as_byte_vec(&self) -> Vec<Vec<u8>>;
}
macro_rules! impl_as_byte_vec_for_integer_type {
($int_ty:ty) => {
impl AsByteVec for $int_ty {
fn as_byte_vec(&self) -> Vec<Vec<u8>> {
vec![self.to_le_bytes().to_vec()]
}
}
};
}
// Special implementation for `bool` since bool doesn't implement `ToLeBytes`.
impl AsByteVec for bool {
fn as_byte_vec(&self) -> Vec<Vec<u8>> {
vec![vec![*self as u8]]
}
}
impl_as_byte_vec_for_integer_type!(i8);
impl_as_byte_vec_for_integer_type!(u8);
impl_as_byte_vec_for_integer_type!(i16);
impl_as_byte_vec_for_integer_type!(u16);
impl_as_byte_vec_for_integer_type!(i32);
impl_as_byte_vec_for_integer_type!(u32);
impl_as_byte_vec_for_integer_type!(i64);
impl_as_byte_vec_for_integer_type!(u64);
impl_as_byte_vec_for_integer_type!(isize);
impl_as_byte_vec_for_integer_type!(usize);
impl_as_byte_vec_for_integer_type!(i128);
impl_as_byte_vec_for_integer_type!(u128);
impl<T> AsByteVec for Option<T>
where
T: AsByteVec,
{
fn as_byte_vec(&self) -> Vec<Vec<u8>> {
match self {
Some(hashable) => {
let mut bytes = hashable.as_byte_vec();
bytes.reserve(1);
bytes.insert(0, vec![1]);
bytes
}
None => vec![vec![0]],
}
}
}
impl<const N: usize> AsByteVec for [u8; N] {
fn as_byte_vec(&self) -> Vec<Vec<u8>> {
vec![self.to_vec()]
}
}
impl AsByteVec for String {
fn as_byte_vec(&self) -> Vec<Vec<u8>> {
vec![self.as_bytes().to_vec()]
}
}
impl AsByteVec for solana_program::pubkey::Pubkey {
fn as_byte_vec(&self) -> Vec<Vec<u8>> {
vec![self.to_bytes().to_vec()]
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_as_byte_vec_integers() {
let i8_min: &dyn AsByteVec = &i8::MIN;
let i8_min_bytes = i8_min.as_byte_vec();
assert_eq!(i8_min_bytes, &[&[128]]);
assert_eq!(i8_min_bytes, &[i8::MIN.to_le_bytes()]);
let i8_max: &dyn AsByteVec = &i8::MAX;
let i8_max_bytes = i8_max.as_byte_vec();
assert_eq!(i8_max_bytes, &[&[127]]);
assert_eq!(i8_max_bytes, &[i8::MAX.to_le_bytes()]);
let u8_min: &dyn AsByteVec = &u8::MIN;
let u8_min_bytes = u8_min.as_byte_vec();
assert_eq!(u8_min_bytes, &[&[0]]);
assert_eq!(u8_min_bytes, &[u8::MIN.to_le_bytes()]);
let u8_max: &dyn AsByteVec = &u8::MAX;
let u8_max_bytes = u8_max.as_byte_vec();
assert_eq!(u8_max_bytes, &[&[255]]);
assert_eq!(u8_max_bytes, &[u8::MAX.to_le_bytes()]);
let i16_min: &dyn AsByteVec = &i16::MIN;
let i16_min_bytes = i16_min.as_byte_vec();
assert_eq!(i16_min_bytes, &[&[0, 128]]);
assert_eq!(i16_min_bytes, &[&i16::MIN.to_le_bytes()]);
let i16_max: &dyn AsByteVec = &i16::MAX;
let i16_max_bytes = i16_max.as_byte_vec();
assert_eq!(i16_max_bytes, &[&[255, 127]]);
assert_eq!(i16_max_bytes, &[i16::MAX.to_le_bytes()]);
let u16_min: &dyn AsByteVec = &u16::MIN;
let u16_min_bytes = u16_min.as_byte_vec();
assert_eq!(u16_min_bytes, &[&[0, 0]]);
assert_eq!(u16_min_bytes, &[u16::MIN.to_le_bytes()]);
let u16_max: &dyn AsByteVec = &u16::MAX;
let u16_max_bytes = u16_max.as_byte_vec();
assert_eq!(u16_max_bytes, &[&[255, 255]]);
assert_eq!(u16_max_bytes, &[u16::MAX.to_le_bytes()]);
let i32_min: &dyn AsByteVec = &i32::MIN;
let i32_min_bytes = i32_min.as_byte_vec();
assert_eq!(i32_min_bytes, &[&[0, 0, 0, 128]]);
assert_eq!(i32_min_bytes, &[i32::MIN.to_le_bytes()]);
let i32_max: &dyn AsByteVec = &i32::MAX;
let i32_max_bytes = i32_max.as_byte_vec();
assert_eq!(i32_max_bytes, &[&[255, 255, 255, 127]]);
assert_eq!(i32_max_bytes, &[i32::MAX.to_le_bytes()]);
let u32_min: &dyn AsByteVec = &u32::MIN;
let u32_min_bytes = u32_min.as_byte_vec();
assert_eq!(u32_min_bytes, &[&[0, 0, 0, 0]]);
assert_eq!(u32_min_bytes, &[u32::MIN.to_le_bytes()]);
let u32_max: &dyn AsByteVec = &u32::MAX;
let u32_max_bytes = u32_max.as_byte_vec();
assert_eq!(u32_max_bytes, &[&[255, 255, 255, 255]]);
assert_eq!(u32_max_bytes, &[u32::MAX.to_le_bytes()]);
let i64_min: &dyn AsByteVec = &i64::MIN;
let i64_min_bytes = i64_min.as_byte_vec();
assert_eq!(i64_min_bytes, &[&[0, 0, 0, 0, 0, 0, 0, 128]]);
assert_eq!(i64_min_bytes, &[i64::MIN.to_le_bytes()]);
let i64_max: &dyn AsByteVec = &i64::MAX;
let i64_max_bytes = i64_max.as_byte_vec();
assert_eq!(i64_max_bytes, &[&[255, 255, 255, 255, 255, 255, 255, 127]]);
assert_eq!(i64_max_bytes, &[i64::MAX.to_le_bytes()]);
let u64_min: &dyn AsByteVec = &u64::MIN;
let u64_min_bytes = u64_min.as_byte_vec();
assert_eq!(u64_min_bytes, &[[0, 0, 0, 0, 0, 0, 0, 0]]);
assert_eq!(i64_min_bytes, &[i64::MIN.to_le_bytes()]);
let u64_max: &dyn AsByteVec = &u64::MAX;
let u64_max_bytes = u64_max.as_byte_vec();
assert_eq!(u64_max_bytes, &[&[255, 255, 255, 255, 255, 255, 255, 255]]);
assert_eq!(u64_max_bytes, &[u64::MAX.to_le_bytes()]);
let i128_min: &dyn AsByteVec = &i128::MIN;
let i128_min_bytes = i128_min.as_byte_vec();
assert_eq!(
i128_min_bytes,
&[&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128]]
);
assert_eq!(i128_min_bytes, &[i128::MIN.to_le_bytes()]);
let i128_max: &dyn AsByteVec = &i128::MAX;
let i128_max_bytes = i128_max.as_byte_vec();
assert_eq!(
i128_max_bytes,
&[&[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 127]]
);
assert_eq!(i128_max_bytes, &[i128::MAX.to_le_bytes()]);
let u128_min: &dyn AsByteVec = &u128::MIN;
let u128_min_bytes = u128_min.as_byte_vec();
assert_eq!(
u128_min_bytes,
&[&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
);
assert_eq!(u128_min_bytes, &[u128::MIN.to_le_bytes()]);
let u128_max: &dyn AsByteVec = &u128::MAX;
let u128_max_bytes = u128_max.as_byte_vec();
assert_eq!(
u128_max_bytes,
&[&[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255]]
);
assert_eq!(u128_max_bytes, &[u128::MAX.to_le_bytes()]);
}
#[test]
fn test_as_byte_vec_primitives() {
let bool_false: &dyn AsByteVec = &false;
assert_eq!(bool_false.as_byte_vec(), &[&[0]]);
let bool_true: &dyn AsByteVec = &true;
assert_eq!(bool_true.as_byte_vec(), &[&[1]]);
}
#[test]
fn test_as_byte_vec_option() {
// Very important property - `None` and `Some(0)` always have to be
// different and should produce different hashes!
let u8_none: Option<u8> = None;
let u8_none: &dyn AsByteVec = &u8_none;
assert_eq!(u8_none.as_byte_vec(), &[&[0]]);
let u8_some_zero: Option<u8> = Some(0);
let u8_some_zero: &dyn AsByteVec = &u8_some_zero;
assert_eq!(u8_some_zero.as_byte_vec(), &[&[1], &[0]]);
let u16_none: Option<u16> = None;
let u16_none: &dyn AsByteVec = &u16_none;
assert_eq!(u16_none.as_byte_vec(), &[&[0]]);
let u16_some_zero: Option<u16> = Some(0);
let u16_some_zero: &dyn AsByteVec = &u16_some_zero;
assert_eq!(u16_some_zero.as_byte_vec(), &[&[1][..], &[0, 0][..]]);
let u32_none: Option<u32> = None;
let u32_none: &dyn AsByteVec = &u32_none;
assert_eq!(u32_none.as_byte_vec(), &[&[0]]);
let u32_some_zero: Option<u32> = Some(0);
let u32_some_zero: &dyn AsByteVec = &u32_some_zero;
assert_eq!(u32_some_zero.as_byte_vec(), &[&[1][..], &[0, 0, 0, 0][..]]);
let u64_none: Option<u64> = None;
let u64_none: &dyn AsByteVec = &u64_none;
assert_eq!(u64_none.as_byte_vec(), &[&[0]]);
let u64_some_zero: Option<u64> = Some(0);
let u64_some_zero: &dyn AsByteVec = &u64_some_zero;
assert_eq!(
u64_some_zero.as_byte_vec(),
&[&[1][..], &[0, 0, 0, 0, 0, 0, 0, 0][..]]
);
let u128_none: Option<u128> = None;
let u128_none: &dyn AsByteVec = &u128_none;
assert_eq!(u128_none.as_byte_vec(), &[&[0]]);
let u128_some_zero: Option<u128> = Some(0);
let u128_some_zero: &dyn AsByteVec = &u128_some_zero;
assert_eq!(
u128_some_zero.as_byte_vec(),
&[
&[1][..],
&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0][..]
]
);
}
#[test]
fn test_as_byte_vec_array() {
let arr: [u8; 0] = [];
let arr: &dyn AsByteVec = &arr;
assert_eq!(arr.as_byte_vec(), &[&[]]);
let arr: [u8; 1] = [255];
let arr: &dyn AsByteVec = &arr;
assert_eq!(arr.as_byte_vec(), &[&[255]]);
let arr: [u8; 4] = [255, 255, 255, 255];
let arr: &dyn AsByteVec = &arr;
assert_eq!(arr.as_byte_vec(), &[&[255, 255, 255, 255]]);
}
#[test]
fn test_as_byte_vec_string() {
let s: &dyn AsByteVec = &"".to_string();
assert_eq!(s.as_byte_vec(), &[b""]);
let s: &dyn AsByteVec = &"foobar".to_string();
assert_eq!(s.as_byte_vec(), &[b"foobar"]);
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src/poseidon.rs
|
use crate::{
errors::HasherError,
zero_bytes::{poseidon::ZERO_BYTES, ZeroBytes},
zero_indexed_leaf::poseidon::ZERO_INDEXED_LEAF,
Hash, Hasher,
};
#[derive(Debug, Clone, Copy)]
pub struct Poseidon;
impl Hasher for Poseidon {
fn hash(val: &[u8]) -> Result<Hash, HasherError> {
Self::hashv(&[val])
}
fn hashv(vals: &[&[u8]]) -> Result<Hash, HasherError> {
// Perform the calculation inline, calling this from within a program is
// not supported.
#[cfg(not(target_os = "solana"))]
{
use ark_bn254::Fr;
use light_poseidon::{Poseidon, PoseidonBytesHasher};
let mut hasher = Poseidon::<Fr>::new_circom(vals.len())?;
let res = hasher.hash_bytes_be(vals)?;
Ok(res)
}
// Call via a system call to perform the calculation.
#[cfg(target_os = "solana")]
{
use solana_program::poseidon::PoseidonSyscallError;
use crate::HASH_BYTES;
let mut hash_result = [0; HASH_BYTES];
let result = unsafe {
crate::syscalls::sol_poseidon(
0, // bn254
0, // big-endian
vals as *const _ as *const u8,
vals.len() as u64,
&mut hash_result as *mut _ as *mut u8,
)
};
match result {
0 => Ok(hash_result),
e => Err(HasherError::from(PoseidonSyscallError::from(e))),
}
}
}
fn zero_bytes() -> ZeroBytes {
ZERO_BYTES
}
fn zero_indexed_leaf() -> [u8; 32] {
ZERO_INDEXED_LEAF
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src/zero_indexed_leaf/sha256.rs
|
// This file is generated by xtask. Do not edit it manually.
pub const ZERO_INDEXED_LEAF: [u8; 32] = [
131u8, 74u8, 112u8, 155u8, 162u8, 83u8, 78u8, 190u8, 62u8, 225u8, 57u8, 127u8, 212u8, 247u8,
189u8, 40u8, 139u8, 42u8, 204u8, 29u8, 32u8, 160u8, 141u8, 108u8, 134u8, 45u8, 205u8, 153u8,
182u8, 240u8, 68u8, 0u8,
];
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src/zero_indexed_leaf/keccak.rs
|
// This file is generated by xtask. Do not edit it manually.
pub const ZERO_INDEXED_LEAF: [u8; 32] = [
60u8, 172u8, 49u8, 121u8, 8u8, 198u8, 153u8, 254u8, 135u8, 58u8, 127u8, 110u8, 228u8, 232u8,
205u8, 99u8, 251u8, 233u8, 145u8, 139u8, 35u8, 21u8, 201u8, 123u8, 233u8, 21u8, 133u8, 89u8,
1u8, 104u8, 227u8, 1u8,
];
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src/zero_indexed_leaf/mod.rs
|
pub mod keccak;
pub mod poseidon;
pub mod sha256;
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src/zero_indexed_leaf/poseidon.rs
|
// This file is generated by xtask. Do not edit it manually.
pub const ZERO_INDEXED_LEAF: [u8; 32] = [
11u8, 193u8, 136u8, 210u8, 125u8, 204u8, 234u8, 220u8, 29u8, 207u8, 182u8, 175u8, 10u8, 122u8,
240u8, 143u8, 226u8, 134u8, 78u8, 236u8, 236u8, 150u8, 197u8, 174u8, 124u8, 238u8, 109u8,
179u8, 27u8, 165u8, 153u8, 170u8,
];
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src/zero_bytes/sha256.rs
|
// This file is generated by xtask. Do not edit it manually.
use super::ZeroBytes;
pub const ZERO_BYTES: ZeroBytes = [
[
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
],
[
245u8, 165u8, 253u8, 66u8, 209u8, 106u8, 32u8, 48u8, 39u8, 152u8, 239u8, 110u8, 211u8, 9u8,
151u8, 155u8, 67u8, 0u8, 61u8, 35u8, 32u8, 217u8, 240u8, 232u8, 234u8, 152u8, 49u8, 169u8,
39u8, 89u8, 251u8, 75u8,
],
[
219u8, 86u8, 17u8, 78u8, 0u8, 253u8, 212u8, 193u8, 248u8, 92u8, 137u8, 43u8, 243u8, 90u8,
201u8, 168u8, 146u8, 137u8, 170u8, 236u8, 177u8, 235u8, 208u8, 169u8, 108u8, 222u8, 96u8,
106u8, 116u8, 139u8, 93u8, 113u8,
],
[
199u8, 128u8, 9u8, 253u8, 240u8, 127u8, 197u8, 106u8, 17u8, 241u8, 34u8, 55u8, 6u8, 88u8,
163u8, 83u8, 170u8, 165u8, 66u8, 237u8, 99u8, 228u8, 76u8, 75u8, 193u8, 95u8, 244u8, 205u8,
16u8, 90u8, 179u8, 60u8,
],
[
83u8, 109u8, 152u8, 131u8, 127u8, 45u8, 209u8, 101u8, 165u8, 93u8, 94u8, 234u8, 233u8,
20u8, 133u8, 149u8, 68u8, 114u8, 213u8, 111u8, 36u8, 109u8, 242u8, 86u8, 191u8, 60u8,
174u8, 25u8, 53u8, 42u8, 18u8, 60u8,
],
[
158u8, 253u8, 224u8, 82u8, 170u8, 21u8, 66u8, 159u8, 174u8, 5u8, 186u8, 212u8, 208u8,
177u8, 215u8, 198u8, 77u8, 166u8, 77u8, 3u8, 215u8, 161u8, 133u8, 74u8, 88u8, 140u8, 44u8,
184u8, 67u8, 12u8, 13u8, 48u8,
],
[
216u8, 141u8, 223u8, 238u8, 212u8, 0u8, 168u8, 117u8, 85u8, 150u8, 178u8, 25u8, 66u8,
193u8, 73u8, 126u8, 17u8, 76u8, 48u8, 46u8, 97u8, 24u8, 41u8, 15u8, 145u8, 230u8, 119u8,
41u8, 118u8, 4u8, 31u8, 161u8,
],
[
135u8, 235u8, 13u8, 219u8, 165u8, 126u8, 53u8, 246u8, 210u8, 134u8, 103u8, 56u8, 2u8,
164u8, 175u8, 89u8, 117u8, 226u8, 37u8, 6u8, 199u8, 207u8, 76u8, 100u8, 187u8, 107u8,
229u8, 238u8, 17u8, 82u8, 127u8, 44u8,
],
[
38u8, 132u8, 100u8, 118u8, 253u8, 95u8, 197u8, 74u8, 93u8, 67u8, 56u8, 81u8, 103u8, 201u8,
81u8, 68u8, 242u8, 100u8, 63u8, 83u8, 60u8, 200u8, 91u8, 185u8, 209u8, 107u8, 120u8, 47u8,
141u8, 125u8, 177u8, 147u8,
],
[
80u8, 109u8, 134u8, 88u8, 45u8, 37u8, 36u8, 5u8, 184u8, 64u8, 1u8, 135u8, 146u8, 202u8,
210u8, 191u8, 18u8, 89u8, 241u8, 239u8, 90u8, 165u8, 248u8, 135u8, 225u8, 60u8, 178u8,
240u8, 9u8, 79u8, 81u8, 225u8,
],
[
255u8, 255u8, 10u8, 215u8, 230u8, 89u8, 119u8, 47u8, 149u8, 52u8, 193u8, 149u8, 200u8,
21u8, 239u8, 196u8, 1u8, 78u8, 241u8, 225u8, 218u8, 237u8, 68u8, 4u8, 192u8, 99u8, 133u8,
209u8, 17u8, 146u8, 233u8, 43u8,
],
[
108u8, 240u8, 65u8, 39u8, 219u8, 5u8, 68u8, 28u8, 216u8, 51u8, 16u8, 122u8, 82u8, 190u8,
133u8, 40u8, 104u8, 137u8, 14u8, 67u8, 23u8, 230u8, 160u8, 42u8, 180u8, 118u8, 131u8,
170u8, 117u8, 150u8, 66u8, 32u8,
],
[
183u8, 208u8, 95u8, 135u8, 95u8, 20u8, 0u8, 39u8, 239u8, 81u8, 24u8, 162u8, 36u8, 123u8,
187u8, 132u8, 206u8, 143u8, 47u8, 15u8, 17u8, 35u8, 98u8, 48u8, 133u8, 218u8, 247u8, 150u8,
12u8, 50u8, 159u8, 95u8,
],
[
223u8, 106u8, 245u8, 245u8, 187u8, 219u8, 107u8, 233u8, 239u8, 138u8, 166u8, 24u8, 228u8,
191u8, 128u8, 115u8, 150u8, 8u8, 103u8, 23u8, 30u8, 41u8, 103u8, 111u8, 139u8, 40u8, 77u8,
234u8, 106u8, 8u8, 168u8, 94u8,
],
[
181u8, 141u8, 144u8, 15u8, 94u8, 24u8, 46u8, 60u8, 80u8, 239u8, 116u8, 150u8, 158u8, 161u8,
108u8, 119u8, 38u8, 197u8, 73u8, 117u8, 124u8, 194u8, 53u8, 35u8, 195u8, 105u8, 88u8,
125u8, 167u8, 41u8, 55u8, 132u8,
],
[
212u8, 154u8, 117u8, 2u8, 255u8, 207u8, 176u8, 52u8, 11u8, 29u8, 120u8, 133u8, 104u8,
133u8, 0u8, 202u8, 48u8, 129u8, 97u8, 167u8, 249u8, 107u8, 98u8, 223u8, 157u8, 8u8, 59u8,
113u8, 252u8, 200u8, 242u8, 187u8,
],
[
143u8, 230u8, 177u8, 104u8, 146u8, 86u8, 192u8, 211u8, 133u8, 244u8, 47u8, 91u8, 190u8,
32u8, 39u8, 162u8, 44u8, 25u8, 150u8, 225u8, 16u8, 186u8, 151u8, 193u8, 113u8, 211u8,
229u8, 148u8, 141u8, 233u8, 43u8, 235u8,
],
[
141u8, 13u8, 99u8, 195u8, 158u8, 186u8, 222u8, 133u8, 9u8, 224u8, 174u8, 60u8, 156u8, 56u8,
118u8, 251u8, 95u8, 161u8, 18u8, 190u8, 24u8, 249u8, 5u8, 236u8, 172u8, 254u8, 203u8,
146u8, 5u8, 118u8, 3u8, 171u8,
],
[
149u8, 238u8, 200u8, 178u8, 229u8, 65u8, 202u8, 212u8, 233u8, 29u8, 227u8, 131u8, 133u8,
242u8, 224u8, 70u8, 97u8, 159u8, 84u8, 73u8, 108u8, 35u8, 130u8, 203u8, 108u8, 172u8,
213u8, 185u8, 140u8, 38u8, 245u8, 164u8,
],
[
248u8, 147u8, 233u8, 8u8, 145u8, 119u8, 117u8, 182u8, 43u8, 255u8, 35u8, 41u8, 77u8, 187u8,
227u8, 161u8, 205u8, 142u8, 108u8, 193u8, 195u8, 91u8, 72u8, 1u8, 136u8, 123u8, 100u8,
106u8, 111u8, 129u8, 241u8, 127u8,
],
[
205u8, 219u8, 167u8, 181u8, 146u8, 227u8, 19u8, 51u8, 147u8, 193u8, 97u8, 148u8, 250u8,
199u8, 67u8, 26u8, 191u8, 47u8, 84u8, 133u8, 237u8, 113u8, 29u8, 178u8, 130u8, 24u8, 60u8,
129u8, 158u8, 8u8, 235u8, 170u8,
],
[
138u8, 141u8, 127u8, 227u8, 175u8, 140u8, 170u8, 8u8, 90u8, 118u8, 57u8, 168u8, 50u8, 0u8,
20u8, 87u8, 223u8, 185u8, 18u8, 138u8, 128u8, 97u8, 20u8, 42u8, 208u8, 51u8, 86u8, 41u8,
255u8, 35u8, 255u8, 156u8,
],
[
254u8, 179u8, 195u8, 55u8, 215u8, 165u8, 26u8, 111u8, 191u8, 0u8, 185u8, 227u8, 76u8, 82u8,
225u8, 201u8, 25u8, 92u8, 150u8, 155u8, 212u8, 231u8, 160u8, 191u8, 213u8, 29u8, 92u8,
91u8, 237u8, 156u8, 17u8, 103u8,
],
[
231u8, 31u8, 10u8, 168u8, 60u8, 195u8, 46u8, 223u8, 190u8, 250u8, 159u8, 77u8, 62u8, 1u8,
116u8, 202u8, 133u8, 24u8, 46u8, 236u8, 159u8, 58u8, 9u8, 246u8, 166u8, 192u8, 223u8, 99u8,
119u8, 165u8, 16u8, 215u8,
],
[
49u8, 32u8, 111u8, 168u8, 10u8, 80u8, 187u8, 106u8, 190u8, 41u8, 8u8, 80u8, 88u8, 241u8,
98u8, 18u8, 33u8, 42u8, 96u8, 238u8, 200u8, 240u8, 73u8, 254u8, 203u8, 146u8, 216u8, 200u8,
224u8, 168u8, 75u8, 192u8,
],
[
33u8, 53u8, 43u8, 254u8, 203u8, 237u8, 221u8, 233u8, 147u8, 131u8, 159u8, 97u8, 76u8, 61u8,
172u8, 10u8, 62u8, 227u8, 117u8, 67u8, 249u8, 180u8, 18u8, 177u8, 97u8, 153u8, 220u8, 21u8,
142u8, 35u8, 181u8, 68u8,
],
[
97u8, 158u8, 49u8, 39u8, 36u8, 187u8, 109u8, 124u8, 49u8, 83u8, 237u8, 157u8, 231u8, 145u8,
215u8, 100u8, 163u8, 102u8, 179u8, 137u8, 175u8, 19u8, 197u8, 139u8, 248u8, 168u8, 217u8,
4u8, 129u8, 164u8, 103u8, 101u8,
],
[
124u8, 221u8, 41u8, 134u8, 38u8, 130u8, 80u8, 98u8, 141u8, 12u8, 16u8, 227u8, 133u8, 197u8,
140u8, 97u8, 145u8, 230u8, 251u8, 224u8, 81u8, 145u8, 188u8, 192u8, 79u8, 19u8, 63u8, 44u8,
234u8, 114u8, 193u8, 196u8,
],
[
132u8, 137u8, 48u8, 189u8, 123u8, 168u8, 202u8, 197u8, 70u8, 97u8, 7u8, 33u8, 19u8, 251u8,
39u8, 136u8, 105u8, 224u8, 123u8, 184u8, 88u8, 127u8, 145u8, 57u8, 41u8, 51u8, 55u8, 77u8,
1u8, 123u8, 203u8, 225u8,
],
[
136u8, 105u8, 255u8, 44u8, 34u8, 178u8, 140u8, 193u8, 5u8, 16u8, 217u8, 133u8, 50u8, 146u8,
128u8, 51u8, 40u8, 190u8, 79u8, 176u8, 232u8, 4u8, 149u8, 232u8, 187u8, 141u8, 39u8, 31u8,
91u8, 136u8, 150u8, 54u8,
],
[
181u8, 254u8, 40u8, 231u8, 159u8, 27u8, 133u8, 15u8, 134u8, 88u8, 36u8, 108u8, 233u8,
182u8, 161u8, 231u8, 180u8, 159u8, 192u8, 109u8, 183u8, 20u8, 62u8, 143u8, 224u8, 180u8,
242u8, 176u8, 197u8, 82u8, 58u8, 92u8,
],
[
152u8, 94u8, 146u8, 159u8, 112u8, 175u8, 40u8, 208u8, 189u8, 209u8, 169u8, 10u8, 128u8,
143u8, 151u8, 127u8, 89u8, 124u8, 124u8, 119u8, 140u8, 72u8, 158u8, 152u8, 211u8, 189u8,
137u8, 16u8, 211u8, 26u8, 192u8, 247u8,
],
[
198u8, 246u8, 126u8, 2u8, 230u8, 228u8, 225u8, 189u8, 239u8, 185u8, 148u8, 198u8, 9u8,
137u8, 83u8, 243u8, 70u8, 54u8, 186u8, 43u8, 108u8, 162u8, 10u8, 71u8, 33u8, 210u8, 178u8,
106u8, 136u8, 103u8, 34u8, 255u8,
],
];
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src/zero_bytes/keccak.rs
|
// This file is generated by xtask. Do not edit it manually.
use super::ZeroBytes;
pub const ZERO_BYTES: ZeroBytes = [
[
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
],
[
173u8, 50u8, 40u8, 182u8, 118u8, 247u8, 211u8, 205u8, 66u8, 132u8, 165u8, 68u8, 63u8, 23u8,
241u8, 150u8, 43u8, 54u8, 228u8, 145u8, 179u8, 10u8, 64u8, 178u8, 64u8, 88u8, 73u8, 229u8,
151u8, 186u8, 95u8, 181u8,
],
[
180u8, 193u8, 25u8, 81u8, 149u8, 124u8, 111u8, 143u8, 100u8, 44u8, 74u8, 246u8, 28u8,
214u8, 178u8, 70u8, 64u8, 254u8, 198u8, 220u8, 127u8, 198u8, 7u8, 238u8, 130u8, 6u8, 169u8,
158u8, 146u8, 65u8, 13u8, 48u8,
],
[
33u8, 221u8, 185u8, 163u8, 86u8, 129u8, 92u8, 63u8, 172u8, 16u8, 38u8, 182u8, 222u8, 197u8,
223u8, 49u8, 36u8, 175u8, 186u8, 219u8, 72u8, 92u8, 155u8, 165u8, 163u8, 227u8, 57u8,
138u8, 4u8, 183u8, 186u8, 133u8,
],
[
229u8, 135u8, 105u8, 179u8, 42u8, 27u8, 234u8, 241u8, 234u8, 39u8, 55u8, 90u8, 68u8, 9u8,
90u8, 13u8, 31u8, 182u8, 100u8, 206u8, 45u8, 211u8, 88u8, 231u8, 252u8, 191u8, 183u8,
140u8, 38u8, 161u8, 147u8, 68u8,
],
[
14u8, 176u8, 30u8, 191u8, 201u8, 237u8, 39u8, 80u8, 12u8, 212u8, 223u8, 201u8, 121u8, 39u8,
45u8, 31u8, 9u8, 19u8, 204u8, 159u8, 102u8, 84u8, 13u8, 126u8, 128u8, 5u8, 129u8, 17u8,
9u8, 225u8, 207u8, 45u8,
],
[
136u8, 124u8, 34u8, 189u8, 135u8, 80u8, 211u8, 64u8, 22u8, 172u8, 60u8, 102u8, 181u8,
255u8, 16u8, 45u8, 172u8, 221u8, 115u8, 246u8, 176u8, 20u8, 231u8, 16u8, 181u8, 30u8,
128u8, 34u8, 175u8, 154u8, 25u8, 104u8,
],
[
255u8, 215u8, 1u8, 87u8, 228u8, 128u8, 99u8, 252u8, 51u8, 201u8, 122u8, 5u8, 15u8, 127u8,
100u8, 2u8, 51u8, 191u8, 100u8, 108u8, 201u8, 141u8, 149u8, 36u8, 198u8, 185u8, 43u8,
207u8, 58u8, 181u8, 111u8, 131u8,
],
[
152u8, 103u8, 204u8, 95u8, 127u8, 25u8, 107u8, 147u8, 186u8, 225u8, 226u8, 126u8, 99u8,
32u8, 116u8, 36u8, 69u8, 210u8, 144u8, 242u8, 38u8, 56u8, 39u8, 73u8, 139u8, 84u8, 254u8,
197u8, 57u8, 247u8, 86u8, 175u8,
],
[
206u8, 250u8, 212u8, 229u8, 8u8, 192u8, 152u8, 185u8, 167u8, 225u8, 216u8, 254u8, 177u8,
153u8, 85u8, 251u8, 2u8, 186u8, 150u8, 117u8, 88u8, 80u8, 120u8, 113u8, 9u8, 105u8, 211u8,
68u8, 15u8, 80u8, 84u8, 224u8,
],
[
249u8, 220u8, 62u8, 127u8, 224u8, 22u8, 224u8, 80u8, 239u8, 242u8, 96u8, 51u8, 79u8, 24u8,
165u8, 212u8, 254u8, 57u8, 29u8, 130u8, 9u8, 35u8, 25u8, 245u8, 150u8, 79u8, 46u8, 46u8,
183u8, 193u8, 195u8, 165u8,
],
[
248u8, 177u8, 58u8, 73u8, 226u8, 130u8, 246u8, 9u8, 195u8, 23u8, 168u8, 51u8, 251u8, 141u8,
151u8, 109u8, 17u8, 81u8, 124u8, 87u8, 29u8, 18u8, 33u8, 162u8, 101u8, 210u8, 90u8, 247u8,
120u8, 236u8, 248u8, 146u8,
],
[
52u8, 144u8, 198u8, 206u8, 235u8, 69u8, 10u8, 236u8, 220u8, 130u8, 226u8, 130u8, 147u8,
3u8, 29u8, 16u8, 199u8, 215u8, 59u8, 248u8, 94u8, 87u8, 191u8, 4u8, 26u8, 151u8, 54u8,
10u8, 162u8, 197u8, 217u8, 156u8,
],
[
193u8, 223u8, 130u8, 217u8, 196u8, 184u8, 116u8, 19u8, 234u8, 226u8, 239u8, 4u8, 143u8,
148u8, 180u8, 211u8, 85u8, 76u8, 234u8, 115u8, 217u8, 43u8, 15u8, 122u8, 249u8, 110u8, 2u8,
113u8, 198u8, 145u8, 226u8, 187u8,
],
[
92u8, 103u8, 173u8, 215u8, 198u8, 202u8, 243u8, 2u8, 37u8, 106u8, 222u8, 223u8, 122u8,
177u8, 20u8, 218u8, 10u8, 207u8, 232u8, 112u8, 212u8, 73u8, 163u8, 164u8, 137u8, 247u8,
129u8, 214u8, 89u8, 232u8, 190u8, 204u8,
],
[
218u8, 123u8, 206u8, 159u8, 78u8, 134u8, 24u8, 182u8, 189u8, 47u8, 65u8, 50u8, 206u8,
121u8, 140u8, 220u8, 122u8, 96u8, 231u8, 225u8, 70u8, 10u8, 114u8, 153u8, 227u8, 198u8,
52u8, 42u8, 87u8, 150u8, 38u8, 210u8,
],
[
39u8, 51u8, 229u8, 15u8, 82u8, 110u8, 194u8, 250u8, 25u8, 162u8, 43u8, 49u8, 232u8, 237u8,
80u8, 242u8, 60u8, 209u8, 253u8, 249u8, 76u8, 145u8, 84u8, 237u8, 58u8, 118u8, 9u8, 162u8,
241u8, 255u8, 152u8, 31u8,
],
[
225u8, 211u8, 181u8, 200u8, 7u8, 178u8, 129u8, 228u8, 104u8, 60u8, 198u8, 214u8, 49u8,
92u8, 249u8, 91u8, 154u8, 222u8, 134u8, 65u8, 222u8, 252u8, 179u8, 35u8, 114u8, 241u8,
193u8, 38u8, 227u8, 152u8, 239u8, 122u8,
],
[
90u8, 45u8, 206u8, 10u8, 138u8, 127u8, 104u8, 187u8, 116u8, 86u8, 15u8, 143u8, 113u8,
131u8, 124u8, 44u8, 46u8, 187u8, 203u8, 247u8, 255u8, 251u8, 66u8, 174u8, 24u8, 150u8,
241u8, 63u8, 124u8, 116u8, 121u8, 160u8,
],
[
180u8, 106u8, 40u8, 182u8, 245u8, 85u8, 64u8, 248u8, 148u8, 68u8, 246u8, 61u8, 224u8, 55u8,
142u8, 61u8, 18u8, 27u8, 224u8, 158u8, 6u8, 204u8, 157u8, 237u8, 28u8, 32u8, 230u8, 88u8,
118u8, 211u8, 106u8, 160u8,
],
[
198u8, 94u8, 150u8, 69u8, 100u8, 71u8, 134u8, 182u8, 32u8, 226u8, 221u8, 42u8, 214u8, 72u8,
221u8, 252u8, 191u8, 74u8, 126u8, 91u8, 26u8, 58u8, 78u8, 207u8, 231u8, 246u8, 70u8, 103u8,
163u8, 240u8, 183u8, 226u8,
],
[
244u8, 65u8, 133u8, 136u8, 237u8, 53u8, 162u8, 69u8, 140u8, 255u8, 235u8, 57u8, 185u8,
61u8, 38u8, 241u8, 141u8, 42u8, 177u8, 59u8, 220u8, 230u8, 174u8, 229u8, 142u8, 123u8,
153u8, 53u8, 158u8, 194u8, 223u8, 217u8,
],
[
90u8, 156u8, 22u8, 220u8, 0u8, 214u8, 239u8, 24u8, 183u8, 147u8, 58u8, 111u8, 141u8, 198u8,
92u8, 203u8, 85u8, 102u8, 113u8, 56u8, 119u8, 111u8, 125u8, 234u8, 16u8, 16u8, 112u8,
220u8, 135u8, 150u8, 227u8, 119u8,
],
[
77u8, 248u8, 79u8, 64u8, 174u8, 12u8, 130u8, 41u8, 208u8, 214u8, 6u8, 158u8, 92u8, 143u8,
57u8, 167u8, 194u8, 153u8, 103u8, 122u8, 9u8, 211u8, 103u8, 252u8, 123u8, 5u8, 227u8,
188u8, 56u8, 14u8, 230u8, 82u8,
],
[
205u8, 199u8, 37u8, 149u8, 247u8, 76u8, 123u8, 16u8, 67u8, 208u8, 225u8, 255u8, 186u8,
183u8, 52u8, 100u8, 140u8, 131u8, 141u8, 251u8, 5u8, 39u8, 217u8, 113u8, 182u8, 2u8, 188u8,
33u8, 108u8, 150u8, 25u8, 239u8,
],
[
10u8, 191u8, 90u8, 201u8, 116u8, 161u8, 237u8, 87u8, 244u8, 5u8, 10u8, 165u8, 16u8, 221u8,
156u8, 116u8, 245u8, 8u8, 39u8, 123u8, 57u8, 215u8, 151u8, 59u8, 178u8, 223u8, 204u8,
197u8, 238u8, 176u8, 97u8, 141u8,
],
[
184u8, 205u8, 116u8, 4u8, 111u8, 243u8, 55u8, 240u8, 167u8, 191u8, 44u8, 142u8, 3u8, 225u8,
15u8, 100u8, 44u8, 24u8, 134u8, 121u8, 141u8, 113u8, 128u8, 106u8, 177u8, 232u8, 136u8,
217u8, 229u8, 238u8, 135u8, 208u8,
],
[
131u8, 140u8, 86u8, 85u8, 203u8, 33u8, 198u8, 203u8, 131u8, 49u8, 59u8, 90u8, 99u8, 17u8,
117u8, 223u8, 244u8, 150u8, 55u8, 114u8, 204u8, 233u8, 16u8, 129u8, 136u8, 179u8, 74u8,
200u8, 124u8, 129u8, 196u8, 30u8,
],
[
102u8, 46u8, 228u8, 221u8, 45u8, 215u8, 178u8, 188u8, 112u8, 121u8, 97u8, 177u8, 230u8,
70u8, 196u8, 4u8, 118u8, 105u8, 220u8, 182u8, 88u8, 79u8, 13u8, 141u8, 119u8, 13u8, 175u8,
93u8, 126u8, 125u8, 235u8, 46u8,
],
[
56u8, 138u8, 178u8, 14u8, 37u8, 115u8, 209u8, 113u8, 168u8, 129u8, 8u8, 231u8, 157u8,
130u8, 14u8, 152u8, 242u8, 108u8, 11u8, 132u8, 170u8, 139u8, 47u8, 74u8, 164u8, 150u8,
141u8, 187u8, 129u8, 142u8, 163u8, 34u8,
],
[
147u8, 35u8, 124u8, 80u8, 186u8, 117u8, 238u8, 72u8, 95u8, 76u8, 34u8, 173u8, 242u8, 247u8,
65u8, 64u8, 11u8, 223u8, 141u8, 106u8, 156u8, 199u8, 223u8, 126u8, 202u8, 229u8, 118u8,
34u8, 22u8, 101u8, 215u8, 53u8,
],
[
132u8, 72u8, 129u8, 139u8, 180u8, 174u8, 69u8, 98u8, 132u8, 158u8, 148u8, 158u8, 23u8,
172u8, 22u8, 224u8, 190u8, 22u8, 104u8, 142u8, 21u8, 107u8, 92u8, 241u8, 94u8, 9u8, 140u8,
98u8, 124u8, 0u8, 86u8, 169u8,
],
[
39u8, 174u8, 91u8, 160u8, 141u8, 114u8, 145u8, 201u8, 108u8, 140u8, 189u8, 220u8, 193u8,
72u8, 191u8, 72u8, 166u8, 214u8, 140u8, 121u8, 116u8, 185u8, 67u8, 86u8, 245u8, 55u8, 84u8,
239u8, 97u8, 113u8, 215u8, 87u8,
],
];
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src/zero_bytes/mod.rs
|
pub mod keccak;
pub mod poseidon;
pub mod sha256;
pub const MAX_HEIGHT: usize = 32;
pub type ZeroBytes = [[u8; 32]; MAX_HEIGHT + 1];
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src/zero_bytes/poseidon.rs
|
// This file is generated by xtask. Do not edit it manually.
use super::ZeroBytes;
pub const ZERO_BYTES: ZeroBytes = [
[
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
],
[
32u8, 152u8, 245u8, 251u8, 158u8, 35u8, 158u8, 171u8, 60u8, 234u8, 195u8, 242u8, 123u8,
129u8, 228u8, 129u8, 220u8, 49u8, 36u8, 213u8, 95u8, 254u8, 213u8, 35u8, 168u8, 57u8,
238u8, 132u8, 70u8, 182u8, 72u8, 100u8,
],
[
16u8, 105u8, 103u8, 61u8, 205u8, 177u8, 34u8, 99u8, 223u8, 48u8, 26u8, 111u8, 245u8, 132u8,
167u8, 236u8, 38u8, 26u8, 68u8, 203u8, 157u8, 198u8, 141u8, 240u8, 103u8, 164u8, 119u8,
68u8, 96u8, 177u8, 241u8, 225u8,
],
[
24u8, 244u8, 51u8, 49u8, 83u8, 126u8, 226u8, 175u8, 46u8, 61u8, 117u8, 141u8, 80u8, 247u8,
33u8, 6u8, 70u8, 124u8, 110u8, 234u8, 80u8, 55u8, 29u8, 213u8, 40u8, 213u8, 126u8, 178u8,
184u8, 86u8, 210u8, 56u8,
],
[
7u8, 249u8, 216u8, 55u8, 203u8, 23u8, 176u8, 211u8, 99u8, 32u8, 255u8, 233u8, 59u8, 165u8,
35u8, 69u8, 241u8, 183u8, 40u8, 87u8, 26u8, 86u8, 130u8, 101u8, 202u8, 172u8, 151u8, 85u8,
157u8, 188u8, 149u8, 42u8,
],
[
43u8, 148u8, 207u8, 94u8, 135u8, 70u8, 179u8, 245u8, 201u8, 99u8, 31u8, 76u8, 93u8, 243u8,
41u8, 7u8, 166u8, 153u8, 197u8, 140u8, 148u8, 178u8, 173u8, 77u8, 123u8, 92u8, 236u8, 22u8,
57u8, 24u8, 63u8, 85u8,
],
[
45u8, 238u8, 147u8, 197u8, 166u8, 102u8, 69u8, 150u8, 70u8, 234u8, 125u8, 34u8, 204u8,
169u8, 225u8, 188u8, 254u8, 215u8, 30u8, 105u8, 81u8, 185u8, 83u8, 97u8, 29u8, 17u8, 221u8,
163u8, 46u8, 160u8, 157u8, 120u8,
],
[
7u8, 130u8, 149u8, 229u8, 162u8, 43u8, 132u8, 233u8, 130u8, 207u8, 96u8, 30u8, 182u8, 57u8,
89u8, 123u8, 139u8, 5u8, 21u8, 168u8, 140u8, 181u8, 172u8, 127u8, 168u8, 164u8, 170u8,
190u8, 60u8, 135u8, 52u8, 157u8,
],
[
47u8, 165u8, 229u8, 241u8, 143u8, 96u8, 39u8, 166u8, 80u8, 27u8, 236u8, 134u8, 69u8, 100u8,
71u8, 42u8, 97u8, 107u8, 46u8, 39u8, 74u8, 65u8, 33u8, 26u8, 68u8, 76u8, 190u8, 58u8,
153u8, 243u8, 204u8, 97u8,
],
[
14u8, 136u8, 67u8, 118u8, 208u8, 216u8, 253u8, 33u8, 236u8, 183u8, 128u8, 56u8, 158u8,
148u8, 31u8, 102u8, 228u8, 94u8, 122u8, 204u8, 227u8, 226u8, 40u8, 171u8, 62u8, 33u8, 86u8,
166u8, 20u8, 252u8, 215u8, 71u8,
],
[
27u8, 114u8, 1u8, 218u8, 114u8, 73u8, 79u8, 30u8, 40u8, 113u8, 122u8, 209u8, 165u8, 46u8,
180u8, 105u8, 249u8, 88u8, 146u8, 249u8, 87u8, 113u8, 53u8, 51u8, 222u8, 97u8, 117u8,
229u8, 218u8, 25u8, 10u8, 242u8,
],
[
31u8, 141u8, 136u8, 34u8, 114u8, 94u8, 54u8, 56u8, 82u8, 0u8, 192u8, 178u8, 1u8, 36u8,
152u8, 25u8, 166u8, 230u8, 225u8, 228u8, 101u8, 8u8, 8u8, 181u8, 190u8, 188u8, 107u8,
250u8, 206u8, 125u8, 118u8, 54u8,
],
[
44u8, 93u8, 130u8, 246u8, 108u8, 145u8, 75u8, 175u8, 185u8, 112u8, 21u8, 137u8, 186u8,
140u8, 252u8, 251u8, 97u8, 98u8, 176u8, 161u8, 42u8, 207u8, 136u8, 168u8, 208u8, 135u8,
154u8, 4u8, 113u8, 181u8, 248u8, 90u8,
],
[
20u8, 197u8, 65u8, 72u8, 160u8, 148u8, 11u8, 184u8, 32u8, 149u8, 127u8, 90u8, 223u8, 63u8,
161u8, 19u8, 78u8, 245u8, 196u8, 170u8, 161u8, 19u8, 244u8, 100u8, 100u8, 88u8, 242u8,
112u8, 224u8, 191u8, 191u8, 208u8,
],
[
25u8, 13u8, 51u8, 177u8, 47u8, 152u8, 111u8, 150u8, 30u8, 16u8, 192u8, 238u8, 68u8, 216u8,
185u8, 175u8, 17u8, 190u8, 37u8, 88u8, 140u8, 173u8, 137u8, 212u8, 22u8, 17u8, 142u8, 75u8,
244u8, 235u8, 232u8, 12u8,
],
[
34u8, 249u8, 138u8, 169u8, 206u8, 112u8, 65u8, 82u8, 172u8, 23u8, 53u8, 73u8, 20u8, 173u8,
115u8, 237u8, 17u8, 103u8, 174u8, 101u8, 150u8, 175u8, 81u8, 10u8, 165u8, 179u8, 100u8,
147u8, 37u8, 224u8, 108u8, 146u8,
],
[
42u8, 124u8, 124u8, 155u8, 108u8, 229u8, 136u8, 11u8, 159u8, 111u8, 34u8, 141u8, 114u8,
191u8, 106u8, 87u8, 90u8, 82u8, 111u8, 41u8, 198u8, 110u8, 204u8, 238u8, 248u8, 183u8,
83u8, 211u8, 139u8, 186u8, 115u8, 35u8,
],
[
46u8, 129u8, 134u8, 229u8, 88u8, 105u8, 142u8, 193u8, 198u8, 122u8, 249u8, 193u8, 77u8,
70u8, 63u8, 252u8, 71u8, 0u8, 67u8, 201u8, 194u8, 152u8, 139u8, 149u8, 77u8, 117u8, 221u8,
100u8, 63u8, 54u8, 185u8, 146u8,
],
[
15u8, 87u8, 197u8, 87u8, 30u8, 154u8, 78u8, 171u8, 73u8, 226u8, 200u8, 207u8, 5u8, 13u8,
174u8, 148u8, 138u8, 239u8, 110u8, 173u8, 100u8, 115u8, 146u8, 39u8, 53u8, 70u8, 36u8,
157u8, 28u8, 31u8, 241u8, 15u8,
],
[
24u8, 48u8, 238u8, 103u8, 181u8, 251u8, 85u8, 74u8, 213u8, 246u8, 61u8, 67u8, 136u8, 128u8,
14u8, 28u8, 254u8, 120u8, 227u8, 16u8, 105u8, 125u8, 70u8, 228u8, 60u8, 156u8, 227u8, 97u8,
52u8, 247u8, 44u8, 202u8,
],
[
33u8, 52u8, 231u8, 106u8, 197u8, 210u8, 26u8, 171u8, 24u8, 108u8, 43u8, 225u8, 221u8,
143u8, 132u8, 238u8, 136u8, 10u8, 30u8, 70u8, 234u8, 247u8, 18u8, 249u8, 211u8, 113u8,
182u8, 223u8, 34u8, 25u8, 31u8, 62u8,
],
[
25u8, 223u8, 144u8, 236u8, 132u8, 78u8, 188u8, 79u8, 254u8, 235u8, 216u8, 102u8, 243u8,
56u8, 89u8, 176u8, 192u8, 81u8, 216u8, 201u8, 88u8, 238u8, 58u8, 168u8, 143u8, 143u8,
141u8, 243u8, 219u8, 145u8, 165u8, 177u8,
],
[
24u8, 204u8, 162u8, 166u8, 107u8, 92u8, 7u8, 135u8, 152u8, 30u8, 105u8, 174u8, 253u8,
132u8, 133u8, 45u8, 116u8, 175u8, 14u8, 147u8, 239u8, 73u8, 18u8, 180u8, 100u8, 140u8, 5u8,
247u8, 34u8, 239u8, 229u8, 43u8,
],
[
35u8, 136u8, 144u8, 148u8, 21u8, 35u8, 13u8, 27u8, 77u8, 19u8, 4u8, 210u8, 213u8, 79u8,
71u8, 58u8, 98u8, 131u8, 56u8, 242u8, 239u8, 173u8, 131u8, 250u8, 223u8, 5u8, 100u8, 69u8,
73u8, 210u8, 83u8, 141u8,
],
[
39u8, 23u8, 31u8, 180u8, 169u8, 123u8, 108u8, 192u8, 233u8, 232u8, 245u8, 67u8, 181u8,
41u8, 77u8, 232u8, 102u8, 162u8, 175u8, 44u8, 156u8, 141u8, 11u8, 29u8, 150u8, 230u8,
115u8, 228u8, 82u8, 158u8, 213u8, 64u8,
],
[
47u8, 246u8, 101u8, 5u8, 64u8, 246u8, 41u8, 253u8, 87u8, 17u8, 160u8, 188u8, 116u8, 252u8,
13u8, 40u8, 220u8, 178u8, 48u8, 185u8, 57u8, 37u8, 131u8, 229u8, 248u8, 213u8, 150u8,
150u8, 221u8, 230u8, 174u8, 33u8,
],
[
18u8, 12u8, 88u8, 241u8, 67u8, 212u8, 145u8, 233u8, 89u8, 2u8, 247u8, 245u8, 39u8, 119u8,
120u8, 162u8, 224u8, 173u8, 81u8, 104u8, 246u8, 173u8, 215u8, 86u8, 105u8, 147u8, 38u8,
48u8, 206u8, 97u8, 21u8, 24u8,
],
[
31u8, 33u8, 254u8, 183u8, 13u8, 63u8, 33u8, 176u8, 123u8, 248u8, 83u8, 213u8, 229u8, 219u8,
3u8, 7u8, 30u8, 196u8, 149u8, 160u8, 165u8, 101u8, 162u8, 29u8, 162u8, 214u8, 101u8, 210u8,
121u8, 72u8, 55u8, 149u8,
],
[
36u8, 190u8, 144u8, 95u8, 167u8, 19u8, 53u8, 225u8, 76u8, 99u8, 140u8, 192u8, 246u8, 106u8,
134u8, 35u8, 168u8, 38u8, 231u8, 104u8, 6u8, 138u8, 158u8, 150u8, 139u8, 177u8, 161u8,
221u8, 225u8, 138u8, 114u8, 210u8,
],
[
15u8, 134u8, 102u8, 182u8, 46u8, 209u8, 116u8, 145u8, 197u8, 12u8, 234u8, 222u8, 173u8,
87u8, 212u8, 205u8, 89u8, 126u8, 243u8, 130u8, 29u8, 101u8, 195u8, 40u8, 116u8, 76u8,
116u8, 229u8, 83u8, 218u8, 194u8, 109u8,
],
[
9u8, 24u8, 212u8, 107u8, 245u8, 45u8, 152u8, 176u8, 52u8, 65u8, 63u8, 74u8, 26u8, 28u8,
65u8, 89u8, 78u8, 122u8, 122u8, 63u8, 106u8, 224u8, 140u8, 180u8, 61u8, 26u8, 42u8, 35u8,
14u8, 25u8, 89u8, 239u8,
],
[
27u8, 190u8, 176u8, 27u8, 76u8, 71u8, 158u8, 205u8, 231u8, 105u8, 23u8, 100u8, 94u8, 64u8,
77u8, 250u8, 46u8, 38u8, 249u8, 13u8, 10u8, 252u8, 90u8, 101u8, 18u8, 133u8, 19u8, 173u8,
55u8, 92u8, 95u8, 242u8,
],
[
47u8, 104u8, 161u8, 197u8, 142u8, 37u8, 126u8, 66u8, 161u8, 122u8, 108u8, 97u8, 223u8,
245u8, 85u8, 30u8, 213u8, 96u8, 185u8, 146u8, 42u8, 177u8, 25u8, 213u8, 172u8, 142u8, 24u8,
76u8, 151u8, 52u8, 234u8, 217u8,
],
];
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src/syscalls/definitions.rs
|
//! This module is a partial copy from
//! [solana-program](https://github.com/solana-labs/solana/blob/master/sdk/program/src/syscalls/definitions.rs),
//! which is licensed under Apache License 2.0.
#[cfg(target_feature = "static-syscalls")]
macro_rules! define_syscall {
(fn $name:ident($($arg:ident: $typ:ty),*) -> $ret:ty) => {
#[inline]
pub unsafe fn $name($($arg: $typ),*) -> $ret {
// this enum is used to force the hash to be computed in a const context
#[repr(usize)]
enum Syscall {
Code = sys_hash(stringify!($name)),
}
let syscall: extern "C" fn($($arg: $typ),*) -> $ret = core::mem::transmute(Syscall::Code);
syscall($($arg),*)
}
};
(fn $name:ident($($arg:ident: $typ:ty),*)) => {
define_syscall!(fn $name($($arg: $typ),*) -> ());
}
}
#[cfg(not(target_feature = "static-syscalls"))]
macro_rules! define_syscall {
(fn $name:ident($($arg:ident: $typ:ty),*) -> $ret:ty) => {
extern "C" {
pub fn $name($($arg: $typ),*) -> $ret;
}
};
(fn $name:ident($($arg:ident: $typ:ty),*)) => {
define_syscall!(fn $name($($arg: $typ),*) -> ());
}
}
define_syscall!(fn sol_sha256(vals: *const u8, val_len: u64, hash_result: *mut u8) -> u64);
define_syscall!(fn sol_keccak256(vals: *const u8, val_len: u64, hash_result: *mut u8) -> u64);
define_syscall!(fn sol_poseidon(parameters: u64, endianness: u64, vals: *const u8, val_len: u64, hash_result: *mut u8) -> u64);
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hasher/src/syscalls/mod.rs
|
//! This module is a partial copy from
//! [solana-program](https://github.com/solana-labs/solana/blob/master/sdk/program/src/syscalls/definitions.rs),
//! which is licensed under Apache License 2.0.
//!
//! The purpose of the module is to provide definition of Poseidon syscall
//! without upgrading solana-program and Anchor just yet.
#[cfg(target_os = "solana")]
mod definitions;
#[cfg(target_os = "solana")]
pub use definitions::*;
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hash-set/Cargo.toml
|
[package]
name = "light-hash-set"
version = "1.2.0"
description = "Hash set which can be stored on a Solana account"
repository = "https://github.com/Lightprotocol/light-protocol"
license = "Apache-2.0"
edition = "2021"
[features]
solana = ["solana-program"]
[dependencies]
light-bounded-vec = { path = "../bounded-vec", version = "1.1.0" }
light-utils = { path = "../../utils", version = "1.1.0" }
memoffset = "0.9"
num-bigint = "0.4"
num-traits = "0.2"
solana-program = { workspace = true, optional = true }
thiserror = "1.0"
[target.'cfg(target_os = "solana")'.dependencies]
light-heap = { path = "../../heap", version = "1.1.0" }
[dev-dependencies]
ark-bn254 = "0.4"
ark-ff = "0.4"
rand = "0.8"
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hash-set
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hash-set/src/zero_copy.rs
|
use std::{
marker::PhantomData,
mem,
ops::{Deref, DerefMut},
ptr::NonNull,
};
use crate::{HashSet, HashSetCell, HashSetError};
/// A `HashSet` wrapper which can be instantiated from Solana account bytes
/// without copying them.
#[derive(Debug)]
pub struct HashSetZeroCopy<'a> {
pub hash_set: mem::ManuallyDrop<HashSet>,
_marker: PhantomData<&'a ()>,
}
impl<'a> HashSetZeroCopy<'a> {
// TODO(vadorovsky): Add a non-mut method: `from_bytes_zero_copy`.
/// Casts a byte slice into `HashSet`.
///
/// # Purpose
///
/// This method is meant to be used mostly in Solana programs, where memory
/// constraints are tight and we want to make sure no data is copied.
///
/// # Safety
///
/// This is highly unsafe. Ensuring the alignment and that the slice
/// provides actual data of the hash set is the caller's responsibility.
///
/// Calling it in async context (or anyhwere where the underlying data can
/// be moved in the memory) is certainly going to cause undefined behavior.
pub unsafe fn from_bytes_zero_copy_mut(bytes: &'a mut [u8]) -> Result<Self, HashSetError> {
if bytes.len() < HashSet::non_dyn_fields_size() {
return Err(HashSetError::BufferSize(
HashSet::non_dyn_fields_size(),
bytes.len(),
));
}
let capacity_values = usize::from_le_bytes(bytes[0..8].try_into().unwrap());
let sequence_threshold = usize::from_le_bytes(bytes[8..16].try_into().unwrap());
let offset = HashSet::non_dyn_fields_size() + mem::size_of::<usize>();
let values_size = mem::size_of::<Option<HashSetCell>>() * capacity_values;
let expected_size = HashSet::non_dyn_fields_size() + values_size;
if bytes.len() < expected_size {
return Err(HashSetError::BufferSize(expected_size, bytes.len()));
}
let buckets =
NonNull::new(bytes.as_mut_ptr().add(offset) as *mut Option<HashSetCell>).unwrap();
Ok(Self {
hash_set: mem::ManuallyDrop::new(HashSet {
capacity: capacity_values,
sequence_threshold,
buckets,
}),
_marker: PhantomData,
})
}
/// Casts a byte slice into `HashSet` and then initializes it.
///
/// * `bytes` is casted into a reference of `HashSet` and used as
/// storage for the struct.
/// * `capacity_indices` indicates the size of the indices table. It should
/// already include a desired load factor and be greater than the expected
/// number of elements to avoid filling the set too early and avoid
/// creating clusters.
/// * `capacity_values` indicates the size of the values array. It should be
/// equal to the number of expected elements, without load factor.
/// * `sequence_threshold` indicates a difference of sequence numbers which
/// make elements of the has set expired. Expiration means that they can
/// be replaced during insertion of new elements with sequence numbers
/// higher by at least a threshold.
///
/// # Purpose
///
/// This method is meant to be used mostly in Solana programs to initialize
/// a new account which is supposed to store the hash set.
///
/// # Safety
///
/// This is highly unsafe. Ensuring the alignment and that the slice has
/// a correct size, which is able to fit the hash set, is the caller's
/// responsibility.
///
/// Calling it in async context (or anywhere where the underlying data can
/// be moved in memory) is certainly going to cause undefined behavior.
pub unsafe fn from_bytes_zero_copy_init(
bytes: &'a mut [u8],
capacity_values: usize,
sequence_threshold: usize,
) -> Result<Self, HashSetError> {
if bytes.len() < HashSet::non_dyn_fields_size() {
return Err(HashSetError::BufferSize(
HashSet::non_dyn_fields_size(),
bytes.len(),
));
}
bytes[0..8].copy_from_slice(&capacity_values.to_le_bytes());
bytes[8..16].copy_from_slice(&sequence_threshold.to_le_bytes());
bytes[16..24].copy_from_slice(&0_usize.to_le_bytes());
let hash_set = Self::from_bytes_zero_copy_mut(bytes)?;
for i in 0..capacity_values {
std::ptr::write(hash_set.hash_set.buckets.as_ptr().add(i), None);
}
Ok(hash_set)
}
}
impl<'a> Drop for HashSetZeroCopy<'a> {
fn drop(&mut self) {
// SAFETY: Don't do anything here! Why?
//
// * Primitive fields of `HashSet` implement `Copy`, therefore `drop()`
// has no effect on them - Rust drops them when they go out of scope.
// * Don't drop the dynamic fields (`indices` and `values`). In
// `HashSetZeroCopy`, they are backed by buffers provided by the
// caller. These buffers are going to be eventually deallocated.
// Performing an another `drop()` here would result double `free()`
// which would result in aborting the program (either with `SIGABRT`
// or `SIGSEGV`).
}
}
impl<'a> Deref for HashSetZeroCopy<'a> {
type Target = HashSet;
fn deref(&self) -> &Self::Target {
&self.hash_set
}
}
impl<'a> DerefMut for HashSetZeroCopy<'a> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.hash_set
}
}
#[cfg(test)]
mod test {
use ark_bn254::Fr;
use ark_ff::UniformRand;
use num_bigint::BigUint;
use rand::{thread_rng, Rng};
use super::*;
#[test]
fn test_load_from_bytes() {
const VALUES: usize = 4800;
const SEQUENCE_THRESHOLD: usize = 2400;
// Create a buffer with random bytes.
let mut bytes = vec![0u8; HashSet::size_in_account(VALUES)];
thread_rng().fill(bytes.as_mut_slice());
// Create random nullifiers.
let mut rng = thread_rng();
let nullifiers: [BigUint; 2400] =
std::array::from_fn(|_| BigUint::from(Fr::rand(&mut rng)));
// Initialize a hash set on top of a byte slice.
{
let mut hs = unsafe {
HashSetZeroCopy::from_bytes_zero_copy_init(
bytes.as_mut_slice(),
VALUES,
SEQUENCE_THRESHOLD,
)
.unwrap()
};
// Ensure that the underlying data were properly initialized.
assert_eq!(hs.hash_set.get_capacity(), VALUES);
assert_eq!(hs.hash_set.sequence_threshold, SEQUENCE_THRESHOLD);
for i in 0..VALUES {
assert!(unsafe { &*hs.hash_set.buckets.as_ptr().add(i) }.is_none());
}
for (seq, nullifier) in nullifiers.iter().enumerate() {
let index = hs.insert(&nullifier, seq).unwrap();
hs.mark_with_sequence_number(index, seq).unwrap();
}
}
// Read the hash set from buffers again.
{
let mut hs =
unsafe { HashSetZeroCopy::from_bytes_zero_copy_mut(bytes.as_mut_slice()).unwrap() };
for (seq, nullifier) in nullifiers.iter().enumerate() {
assert_eq!(hs.contains(nullifier, Some(seq)).unwrap(), true);
}
for (seq, nullifier) in nullifiers.iter().enumerate() {
hs.insert(&nullifier, 2400 + seq as usize).unwrap();
}
drop(hs);
}
// Make a copy of hash set from the same buffers.
{
let hs = unsafe { HashSet::from_bytes_copy(bytes.as_mut_slice()).unwrap() };
for (seq, nullifier) in nullifiers.iter().enumerate() {
assert_eq!(
hs.contains(nullifier, Some(2400 + seq as usize)).unwrap(),
true
);
}
}
}
#[test]
fn test_buffer_size_error() {
const VALUES: usize = 4800;
const SEQUENCE_THRESHOLD: usize = 2400;
let mut invalid_bytes = vec![0_u8; 256];
let res = unsafe {
HashSetZeroCopy::from_bytes_zero_copy_init(
invalid_bytes.as_mut_slice(),
VALUES,
SEQUENCE_THRESHOLD,
)
};
assert!(matches!(res, Err(HashSetError::BufferSize(_, _))));
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hash-set
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/hash-set/src/lib.rs
|
use light_utils::{bigint::bigint_to_be_bytes_array, UtilsError};
use num_bigint::{BigUint, ToBigUint};
use num_traits::{FromBytes, ToPrimitive};
use std::{
alloc::{self, handle_alloc_error, Layout},
cmp::Ordering,
marker::Send,
mem,
ptr::NonNull,
};
use thiserror::Error;
pub mod zero_copy;
pub const ITERATIONS: usize = 20;
#[derive(Debug, Error, PartialEq)]
pub enum HashSetError {
#[error("The hash set is full, cannot add any new elements")]
Full,
#[error("The provided element is already in the hash set")]
ElementAlreadyExists,
#[error("The provided element doesn't exist in the hash set")]
ElementDoesNotExist,
#[error("Could not convert the index from/to usize")]
UsizeConv,
#[error("Integer overflow")]
IntegerOverflow,
#[error("Invalid buffer size, expected {0}, got {1}")]
BufferSize(usize, usize),
#[error("Utils: big integer conversion error")]
Utils(#[from] UtilsError),
}
#[cfg(feature = "solana")]
impl From<HashSetError> for u32 {
fn from(e: HashSetError) -> u32 {
match e {
HashSetError::Full => 9001,
HashSetError::ElementAlreadyExists => 9002,
HashSetError::ElementDoesNotExist => 9003,
HashSetError::UsizeConv => 9004,
HashSetError::IntegerOverflow => 9005,
HashSetError::BufferSize(_, _) => 9006,
HashSetError::Utils(e) => e.into(),
}
}
}
#[cfg(feature = "solana")]
impl From<HashSetError> for solana_program::program_error::ProgramError {
fn from(e: HashSetError) -> Self {
solana_program::program_error::ProgramError::Custom(e.into())
}
}
#[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)]
pub struct HashSetCell {
pub value: [u8; 32],
pub sequence_number: Option<usize>,
}
unsafe impl Send for HashSet {}
impl HashSetCell {
/// Returns the value as a byte array.
pub fn value_bytes(&self) -> [u8; 32] {
self.value
}
/// Returns the value as a big number.
pub fn value_biguint(&self) -> BigUint {
BigUint::from_bytes_be(self.value.as_slice())
}
/// Returns the associated sequence number.
pub fn sequence_number(&self) -> Option<usize> {
self.sequence_number
}
/// Checks whether the value is marked with a sequence number.
pub fn is_marked(&self) -> bool {
self.sequence_number.is_some()
}
/// Checks whether the value is valid according to the provided
/// `current_sequence_number` (which usually should be a sequence number
/// associated with the Merkle tree).
///
/// The value is valid if:
///
/// * It was not annotated with sequence number.
/// * Its sequence number is lower than the provided `sequence_number`.
///
/// The value is invalid if it's lower or equal to the provided
/// `sequence_number`.
pub fn is_valid(&self, current_sequence_number: usize) -> bool {
match self.sequence_number {
Some(sequence_number) => match sequence_number.cmp(¤t_sequence_number) {
Ordering::Less | Ordering::Equal => false,
Ordering::Greater => true,
},
None => true,
}
}
}
#[derive(Debug)]
pub struct HashSet {
/// Capacity of the buckets.
capacity: usize,
/// Difference of sequence numbers, after which the given element can be
/// replaced by an another one (with a sequence number higher than the
/// threshold).
pub sequence_threshold: usize,
/// An array of buckets. It has a size equal to the expected number of
/// elements.
buckets: NonNull<Option<HashSetCell>>,
}
unsafe impl Send for HashSetCell {}
impl HashSet {
/// Size of the struct **without** dynamically sized fields.
pub fn non_dyn_fields_size() -> usize {
// capacity
mem::size_of::<usize>()
// sequence_threshold
+ mem::size_of::<usize>()
}
/// Size which needs to be allocated on Solana account to fit the hash set.
pub fn size_in_account(capacity_values: usize) -> usize {
let dyn_fields_size = Self::non_dyn_fields_size();
let buckets_size_unaligned = mem::size_of::<Option<HashSetCell>>() * capacity_values;
// Make sure that alignment of `values` matches the alignment of `usize`.
let buckets_size = buckets_size_unaligned + mem::align_of::<usize>()
- (buckets_size_unaligned % mem::align_of::<usize>());
dyn_fields_size + buckets_size
}
// Create a new hash set with the given capacity
pub fn new(capacity_values: usize, sequence_threshold: usize) -> Result<Self, HashSetError> {
// SAFETY: It's just a regular allocation.
let layout = Layout::array::<Option<HashSetCell>>(capacity_values).unwrap();
let values_ptr = unsafe { alloc::alloc(layout) as *mut Option<HashSetCell> };
if values_ptr.is_null() {
handle_alloc_error(layout);
}
let values = NonNull::new(values_ptr).unwrap();
for i in 0..capacity_values {
unsafe {
std::ptr::write(values_ptr.add(i), None);
}
}
Ok(HashSet {
sequence_threshold,
capacity: capacity_values,
buckets: values,
})
}
/// Creates a copy of `HashSet` from the given byte slice.
///
/// # Purpose
///
/// This method is meant to be used mostly in the SDK code, to convert
/// fetched Solana accounts to actual hash sets. Creating a copy is the
/// safest way of conversion in async Rust.
///
/// # Safety
///
/// This is highly unsafe. Ensuring the alignment and that the slice
/// provides actual actual data of the hash set is the caller's
/// responsibility.
pub unsafe fn from_bytes_copy(bytes: &mut [u8]) -> Result<Self, HashSetError> {
if bytes.len() < Self::non_dyn_fields_size() {
return Err(HashSetError::BufferSize(
Self::non_dyn_fields_size(),
bytes.len(),
));
}
let capacity = usize::from_le_bytes(bytes[0..8].try_into().unwrap());
let sequence_threshold = usize::from_le_bytes(bytes[8..16].try_into().unwrap());
let expected_size = Self::size_in_account(capacity);
if bytes.len() != expected_size {
return Err(HashSetError::BufferSize(expected_size, bytes.len()));
}
let buckets_layout = Layout::array::<Option<HashSetCell>>(capacity).unwrap();
// SAFETY: `I` is always a signed integer. Creating a layout for an
// array of integers of any size won't cause any panic.
let buckets_dst_ptr = unsafe { alloc::alloc(buckets_layout) as *mut Option<HashSetCell> };
if buckets_dst_ptr.is_null() {
handle_alloc_error(buckets_layout);
}
let buckets = NonNull::new(buckets_dst_ptr).unwrap();
for i in 0..capacity {
std::ptr::write(buckets_dst_ptr.add(i), None);
}
let offset = Self::non_dyn_fields_size() + mem::size_of::<usize>();
let buckets_src_ptr = bytes.as_ptr().add(offset) as *const Option<HashSetCell>;
std::ptr::copy(buckets_src_ptr, buckets_dst_ptr, capacity);
Ok(Self {
capacity,
sequence_threshold,
buckets,
})
}
fn probe_index(&self, value: &BigUint, iteration: usize) -> usize {
// Increase stepsize over the capacity of the hash set.
let iteration = iteration + self.capacity / 10;
let probe_index = (value
+ iteration.to_biguint().unwrap() * iteration.to_biguint().unwrap())
% self.capacity.to_biguint().unwrap();
probe_index.to_usize().unwrap()
}
/// Returns a reference to a bucket under the given `index`. Does not check
/// the validity.
pub fn get_bucket(&self, index: usize) -> Option<&Option<HashSetCell>> {
if index >= self.capacity {
return None;
}
let bucket = unsafe { &*self.buckets.as_ptr().add(index) };
Some(bucket)
}
/// Returns a mutable reference to a bucket under the given `index`. Does
/// not check the validity.
pub fn get_bucket_mut(&mut self, index: usize) -> Option<&mut Option<HashSetCell>> {
if index >= self.capacity {
return None;
}
let bucket = unsafe { &mut *self.buckets.as_ptr().add(index) };
Some(bucket)
}
/// Returns a reference to an unmarked bucket under the given index. If the
/// bucket is marked, returns `None`.
pub fn get_unmarked_bucket(&self, index: usize) -> Option<&Option<HashSetCell>> {
let bucket = self.get_bucket(index);
let is_unmarked = match bucket {
Some(Some(bucket)) => !bucket.is_marked(),
Some(None) => false,
None => false,
};
if is_unmarked {
bucket
} else {
None
}
}
pub fn get_capacity(&self) -> usize {
self.capacity
}
fn insert_into_occupied_cell(
&mut self,
value_index: usize,
value: &BigUint,
current_sequence_number: usize,
) -> Result<bool, HashSetError> {
// PANICS: We trust the bounds of `value_index` here.
let bucket = self.get_bucket_mut(value_index).unwrap();
match bucket {
// The cell in the value array is already taken.
Some(bucket) => {
// We can overwrite that cell only if the element
// is expired - when the difference between its
// sequence number and provided sequence number is
// greater than the threshold.
if let Some(element_sequence_number) = bucket.sequence_number {
if current_sequence_number >= element_sequence_number {
*bucket = HashSetCell {
value: bigint_to_be_bytes_array(value)?,
sequence_number: None,
};
return Ok(true);
}
}
// Otherwise, we need to prevent having multiple valid
// elements with the same value.
if &BigUint::from_be_bytes(bucket.value.as_slice()) == value {
return Err(HashSetError::ElementAlreadyExists);
}
}
// Panics: If there is a hash set cell pointing to a `None` value,
// it means we really screwed up in the implementation...
// That should never happen.
None => unreachable!(),
}
Ok(false)
}
/// Inserts a value into the hash set, with `self.capacity_values` attempts.
///
/// Every attempt uses quadratic probing to find an empty cell or a cell
/// which can be overwritten.
///
/// `current sequence_number` is used to check whether existing values can
/// be overwritten.
pub fn insert(
&mut self,
value: &BigUint,
current_sequence_number: usize,
) -> Result<usize, HashSetError> {
let index_bucket = self.find_element_iter(value, current_sequence_number, 0, ITERATIONS)?;
let (index, is_new) = match index_bucket {
Some(index) => index,
None => {
return Err(HashSetError::Full);
}
};
match is_new {
// The visited hash set cell points to a value in the array.
false => {
if self.insert_into_occupied_cell(index, value, current_sequence_number)? {
return Ok(index);
}
}
true => {
// PANICS: We trust the bounds of `index`.
let bucket = self.get_bucket_mut(index).unwrap();
*bucket = Some(HashSetCell {
value: bigint_to_be_bytes_array(value)?,
sequence_number: None,
});
return Ok(index);
}
}
Err(HashSetError::Full)
}
/// Finds an index of the provided `value` inside `buckets`.
///
/// Uses the optional `current_sequence_number` arguments for checking the
/// validity of the element.
pub fn find_element_index(
&self,
value: &BigUint,
current_sequence_number: Option<usize>,
) -> Result<Option<usize>, HashSetError> {
for i in 0..ITERATIONS {
let probe_index = self.probe_index(value, i);
// PANICS: `probe_index()` ensures the bounds.
let bucket = self.get_bucket(probe_index).unwrap();
match bucket {
Some(bucket) => {
if &bucket.value_biguint() == value {
match current_sequence_number {
// If the caller provided `current_sequence_number`,
// check the validity of the bucket.
Some(current_sequence_number) => {
if bucket.is_valid(current_sequence_number) {
return Ok(Some(probe_index));
}
continue;
}
None => return Ok(Some(probe_index)),
}
}
continue;
}
// If we found an empty bucket, it means that there is no
// chance of our element existing in the hash set.
None => {
return Ok(None);
}
}
}
Ok(None)
}
pub fn find_element(
&self,
value: &BigUint,
current_sequence_number: Option<usize>,
) -> Result<Option<(&HashSetCell, usize)>, HashSetError> {
let index = self.find_element_index(value, current_sequence_number)?;
match index {
Some(index) => {
let bucket = self.get_bucket(index).unwrap();
match bucket {
Some(bucket) => Ok(Some((bucket, index))),
None => Ok(None),
}
}
None => Ok(None),
}
}
pub fn find_element_mut(
&mut self,
value: &BigUint,
current_sequence_number: Option<usize>,
) -> Result<Option<(&mut HashSetCell, usize)>, HashSetError> {
let index = self.find_element_index(value, current_sequence_number)?;
match index {
Some(index) => {
let bucket = self.get_bucket_mut(index).unwrap();
match bucket {
Some(bucket) => Ok(Some((bucket, index))),
None => Ok(None),
}
}
None => Ok(None),
}
}
/// find_element_iter iterates over a fixed range of elements
/// in the hash set.
/// We always have to iterate over the whole range
/// to make sure that the value is not in the hash-set.
/// Returns the position of the first free value.
pub fn find_element_iter(
&mut self,
value: &BigUint,
current_sequence_number: usize,
start_iter: usize,
num_iterations: usize,
) -> Result<Option<(usize, bool)>, HashSetError> {
let mut first_free_element: Option<(usize, bool)> = None;
for i in start_iter..start_iter + num_iterations {
let probe_index = self.probe_index(value, i);
let bucket = self.get_bucket(probe_index).unwrap();
match bucket {
Some(bucket) => {
let is_valid = bucket.is_valid(current_sequence_number);
if first_free_element.is_none() && !is_valid {
first_free_element = Some((probe_index, false));
}
if is_valid && &bucket.value_biguint() == value {
return Err(HashSetError::ElementAlreadyExists);
} else {
continue;
}
}
None => {
// A previous bucket could have been freed already even
// though the whole hash set has not been used yet.
if first_free_element.is_none() {
first_free_element = Some((probe_index, true));
}
// Since we encountered an empty bucket we know for sure
// that the element is not in a bucket with higher probe
// index.
break;
}
}
}
Ok(first_free_element)
}
/// Returns a first available element.
pub fn first(
&self,
current_sequence_number: usize,
) -> Result<Option<&HashSetCell>, HashSetError> {
for i in 0..self.capacity {
// PANICS: The loop ensures the bounds.
let bucket = self.get_bucket(i).unwrap();
if let Some(bucket) = bucket {
if bucket.is_valid(current_sequence_number) {
return Ok(Some(bucket));
}
}
}
Ok(None)
}
/// Returns a first available element that does not have a sequence number.
pub fn first_no_seq(&self) -> Result<Option<(HashSetCell, u16)>, HashSetError> {
for i in 0..self.capacity {
// PANICS: The loop ensures the bounds.
let bucket = self.get_bucket(i).unwrap();
if let Some(bucket) = bucket {
if bucket.sequence_number.is_none() {
return Ok(Some((*bucket, i as u16)));
}
}
}
Ok(None)
}
/// Checks if the hash set contains a value.
pub fn contains(
&self,
value: &BigUint,
sequence_number: Option<usize>,
) -> Result<bool, HashSetError> {
let element = self.find_element(value, sequence_number)?;
Ok(element.is_some())
}
/// Marks the given element with a given sequence number.
pub fn mark_with_sequence_number(
&mut self,
index: usize,
sequence_number: usize,
) -> Result<(), HashSetError> {
let sequence_threshold = self.sequence_threshold;
let element = self
.get_bucket_mut(index)
.ok_or(HashSetError::ElementDoesNotExist)?;
match element {
Some(element) => {
element.sequence_number = Some(sequence_number + sequence_threshold);
Ok(())
}
None => Err(HashSetError::ElementDoesNotExist),
}
}
/// Returns an iterator over elements.
pub fn iter(&self) -> HashSetIterator {
HashSetIterator {
hash_set: self,
current: 0,
}
}
}
impl Drop for HashSet {
fn drop(&mut self) {
// SAFETY: As long as `next_value_index`, `capacity_indices` and
// `capacity_values` are correct, this deallocaion is safe.
unsafe {
let layout = Layout::array::<Option<HashSetCell>>(self.capacity).unwrap();
alloc::dealloc(self.buckets.as_ptr() as *mut u8, layout);
}
}
}
impl PartialEq for HashSet {
fn eq(&self, other: &Self) -> bool {
self.capacity.eq(&other.capacity)
&& self.sequence_threshold.eq(&other.sequence_threshold)
&& self.iter().eq(other.iter())
}
}
pub struct HashSetIterator<'a> {
hash_set: &'a HashSet,
current: usize,
}
impl<'a> Iterator for HashSetIterator<'a> {
type Item = (usize, &'a HashSetCell);
fn next(&mut self) -> Option<Self::Item> {
while self.current < self.hash_set.get_capacity() {
let element_index = self.current;
self.current += 1;
if let Some(Some(cur_element)) = self.hash_set.get_bucket(element_index) {
return Some((element_index, cur_element));
}
}
None
}
}
#[cfg(test)]
mod test {
use ark_bn254::Fr;
use ark_ff::UniformRand;
use rand::{thread_rng, Rng};
use crate::zero_copy::HashSetZeroCopy;
use super::*;
#[test]
fn test_is_valid() {
let mut rng = thread_rng();
let cell = HashSetCell {
value: [0u8; 32],
sequence_number: None,
};
// It should be always valid, no matter the sequence number.
assert_eq!(cell.is_valid(0), true);
for _ in 0..100 {
let seq: usize = rng.gen();
assert_eq!(cell.is_valid(seq), true);
}
let cell = HashSetCell {
value: [0u8; 32],
sequence_number: Some(2400),
};
// Sequence numbers up to 2400 should succeed.
for i in 0..2400 {
assert_eq!(cell.is_valid(i), true);
}
for i in 2400..10000 {
assert_eq!(cell.is_valid(i), false);
}
}
/// Manual test cases. A simple check whether basic properties of the hash
/// set work.
#[test]
fn test_hash_set_manual() {
let mut hs = HashSet::new(256, 4).unwrap();
// Insert an element and immediately mark it with a sequence number.
// An equivalent to a single insertion in Light Protocol
let element_1_1 = 1.to_biguint().unwrap();
let index_1_1 = hs.insert(&element_1_1, 0).unwrap();
hs.mark_with_sequence_number(index_1_1, 1).unwrap();
// Check if element exists in the set.
assert_eq!(hs.contains(&element_1_1, Some(1)).unwrap(), true);
// Try inserting the same element, even though we didn't reach the
// threshold.
assert!(matches!(
hs.insert(&element_1_1, 1),
Err(HashSetError::ElementAlreadyExists)
));
// Insert multiple elements and mark them with one sequence number.
// An equivalent to a batched insertion in Light Protocol.
let element_2_3 = 3.to_biguint().unwrap();
let element_2_6 = 6.to_biguint().unwrap();
let element_2_8 = 8.to_biguint().unwrap();
let element_2_9 = 9.to_biguint().unwrap();
let index_2_3 = hs.insert(&element_2_3, 1).unwrap();
let index_2_6 = hs.insert(&element_2_6, 1).unwrap();
let index_2_8 = hs.insert(&element_2_8, 1).unwrap();
let index_2_9 = hs.insert(&element_2_9, 1).unwrap();
assert_eq!(hs.contains(&element_2_3, Some(2)).unwrap(), true);
assert_eq!(hs.contains(&element_2_6, Some(2)).unwrap(), true);
assert_eq!(hs.contains(&element_2_8, Some(2)).unwrap(), true);
assert_eq!(hs.contains(&element_2_9, Some(2)).unwrap(), true);
hs.mark_with_sequence_number(index_2_3, 2).unwrap();
hs.mark_with_sequence_number(index_2_6, 2).unwrap();
hs.mark_with_sequence_number(index_2_8, 2).unwrap();
hs.mark_with_sequence_number(index_2_9, 2).unwrap();
assert!(matches!(
hs.insert(&element_2_3, 2),
Err(HashSetError::ElementAlreadyExists)
));
assert!(matches!(
hs.insert(&element_2_6, 2),
Err(HashSetError::ElementAlreadyExists)
));
assert!(matches!(
hs.insert(&element_2_8, 2),
Err(HashSetError::ElementAlreadyExists)
));
assert!(matches!(
hs.insert(&element_2_9, 2),
Err(HashSetError::ElementAlreadyExists)
));
let element_3_11 = 11.to_biguint().unwrap();
let element_3_13 = 13.to_biguint().unwrap();
let element_3_21 = 21.to_biguint().unwrap();
let element_3_29 = 29.to_biguint().unwrap();
let index_3_11 = hs.insert(&element_3_11, 2).unwrap();
let index_3_13 = hs.insert(&element_3_13, 2).unwrap();
let index_3_21 = hs.insert(&element_3_21, 2).unwrap();
let index_3_29 = hs.insert(&element_3_29, 2).unwrap();
assert_eq!(hs.contains(&element_3_11, Some(3)).unwrap(), true);
assert_eq!(hs.contains(&element_3_13, Some(3)).unwrap(), true);
assert_eq!(hs.contains(&element_3_21, Some(3)).unwrap(), true);
assert_eq!(hs.contains(&element_3_29, Some(3)).unwrap(), true);
hs.mark_with_sequence_number(index_3_11, 3).unwrap();
hs.mark_with_sequence_number(index_3_13, 3).unwrap();
hs.mark_with_sequence_number(index_3_21, 3).unwrap();
hs.mark_with_sequence_number(index_3_29, 3).unwrap();
assert!(matches!(
hs.insert(&element_3_11, 3),
Err(HashSetError::ElementAlreadyExists)
));
assert!(matches!(
hs.insert(&element_3_13, 3),
Err(HashSetError::ElementAlreadyExists)
));
assert!(matches!(
hs.insert(&element_3_21, 3),
Err(HashSetError::ElementAlreadyExists)
));
assert!(matches!(
hs.insert(&element_3_29, 3),
Err(HashSetError::ElementAlreadyExists)
));
let element_4_93 = 93.to_biguint().unwrap();
let element_4_65 = 64.to_biguint().unwrap();
let element_4_72 = 72.to_biguint().unwrap();
let element_4_15 = 15.to_biguint().unwrap();
let index_4_93 = hs.insert(&element_4_93, 3).unwrap();
let index_4_65 = hs.insert(&element_4_65, 3).unwrap();
let index_4_72 = hs.insert(&element_4_72, 3).unwrap();
let index_4_15 = hs.insert(&element_4_15, 3).unwrap();
assert_eq!(hs.contains(&element_4_93, Some(4)).unwrap(), true);
assert_eq!(hs.contains(&element_4_65, Some(4)).unwrap(), true);
assert_eq!(hs.contains(&element_4_72, Some(4)).unwrap(), true);
assert_eq!(hs.contains(&element_4_15, Some(4)).unwrap(), true);
hs.mark_with_sequence_number(index_4_93, 4).unwrap();
hs.mark_with_sequence_number(index_4_65, 4).unwrap();
hs.mark_with_sequence_number(index_4_72, 4).unwrap();
hs.mark_with_sequence_number(index_4_15, 4).unwrap();
// Try inserting the same elements we inserted before.
//
// Ones with the sequence number difference lower or equal to the
// sequence threshold (4) will fail.
//
// Ones with the higher dif will succeed.
assert!(matches!(
hs.insert(&element_1_1, 4),
Err(HashSetError::ElementAlreadyExists)
));
assert!(matches!(
hs.insert(&element_2_3, 5),
Err(HashSetError::ElementAlreadyExists)
));
assert!(matches!(
hs.insert(&element_2_6, 5),
Err(HashSetError::ElementAlreadyExists)
));
assert!(matches!(
hs.insert(&element_2_8, 5),
Err(HashSetError::ElementAlreadyExists)
));
assert!(matches!(
hs.insert(&element_2_9, 5),
Err(HashSetError::ElementAlreadyExists)
));
hs.insert(&element_1_1, 5).unwrap();
hs.insert(&element_2_3, 6).unwrap();
hs.insert(&element_2_6, 6).unwrap();
hs.insert(&element_2_8, 6).unwrap();
hs.insert(&element_2_9, 6).unwrap();
}
/// Test cases with random prime field elements.
#[test]
fn test_hash_set_random() {
let mut hs = HashSet::new(6857, 2400).unwrap();
// The hash set should be empty.
assert_eq!(hs.first(0).unwrap(), None);
let mut rng = thread_rng();
let mut seq = 0;
let nullifiers: [BigUint; 24000] =
std::array::from_fn(|_| BigUint::from(Fr::rand(&mut rng)));
for nf_chunk in nullifiers.chunks(2400) {
for nullifier in nf_chunk.iter() {
assert_eq!(hs.contains(&nullifier, Some(seq)).unwrap(), false);
let index = hs.insert(&nullifier, seq as usize).unwrap();
assert_eq!(hs.contains(&nullifier, Some(seq)).unwrap(), true);
let nullifier_bytes = bigint_to_be_bytes_array(&nullifier).unwrap();
let element = hs
.find_element(&nullifier, Some(seq))
.unwrap()
.unwrap()
.0
.clone();
assert_eq!(
element,
HashSetCell {
value: bigint_to_be_bytes_array(&nullifier).unwrap(),
sequence_number: None,
}
);
assert_eq!(element.value_bytes(), nullifier_bytes);
assert_eq!(&element.value_biguint(), nullifier);
assert_eq!(element.sequence_number(), None);
assert!(!element.is_marked());
assert!(element.is_valid(seq));
hs.mark_with_sequence_number(index, seq).unwrap();
let element = hs
.find_element(&nullifier, Some(seq))
.unwrap()
.unwrap()
.0
.clone();
assert_eq!(
element,
HashSetCell {
value: nullifier_bytes,
sequence_number: Some(2400 + seq)
}
);
assert_eq!(element.value_bytes(), nullifier_bytes);
assert_eq!(&element.value_biguint(), nullifier);
assert_eq!(element.sequence_number(), Some(2400 + seq));
assert!(element.is_marked());
assert!(element.is_valid(seq));
// Trying to insert the same nullifier, before reaching the
// sequence threshold, should fail.
assert!(matches!(
hs.insert(&nullifier, seq as usize + 2399),
Err(HashSetError::ElementAlreadyExists),
));
seq += 1;
}
seq += 2400;
}
}
fn hash_set_from_bytes_copy<
const CAPACITY: usize,
const SEQUENCE_THRESHOLD: usize,
const OPERATIONS: usize,
>() {
let mut hs_1 = HashSet::new(CAPACITY, SEQUENCE_THRESHOLD).unwrap();
let mut rng = thread_rng();
// Create a buffer with random bytes.
let mut bytes = vec![0u8; HashSet::size_in_account(CAPACITY)];
rng.fill(bytes.as_mut_slice());
// Initialize a hash set on top of a byte slice.
{
let mut hs_2 = unsafe {
HashSetZeroCopy::from_bytes_zero_copy_init(&mut bytes, CAPACITY, SEQUENCE_THRESHOLD)
.unwrap()
};
for seq in 0..OPERATIONS {
let value = BigUint::from(Fr::rand(&mut rng));
hs_1.insert(&value, seq).unwrap();
hs_2.insert(&value, seq).unwrap();
}
assert_eq!(hs_1, *hs_2);
}
// Create a copy on top of a byte slice.
{
let hs_2 = unsafe { HashSet::from_bytes_copy(&mut bytes).unwrap() };
assert_eq!(hs_1, hs_2);
}
}
#[test]
fn test_hash_set_from_bytes_copy_6857_2400_3600() {
hash_set_from_bytes_copy::<6857, 2400, 3600>()
}
#[test]
fn test_hash_set_from_bytes_copy_9601_2400_5000() {
hash_set_from_bytes_copy::<9601, 2400, 5000>()
}
fn hash_set_full<const CAPACITY: usize, const SEQUENCE_THRESHOLD: usize>() {
for _ in 0..100 {
let mut hs = HashSet::new(CAPACITY, SEQUENCE_THRESHOLD).unwrap();
let mut rng = rand::thread_rng();
// Insert as many values as possible. The important point is to
// encounter the `HashSetError::Full` at some point
for i in 0..CAPACITY {
let value = BigUint::from(Fr::rand(&mut rng));
match hs.insert(&value, 0) {
Ok(index) => hs.mark_with_sequence_number(index, 0).unwrap(),
Err(e) => {
assert!(matches!(e, HashSetError::Full));
println!("initial insertions: {i}: failed, stopping");
break;
}
}
}
// Keep inserting. It should mostly fail, although there might be
// also some successful insertions - there might be values which
// will end up in unused buckets.
for i in 0..1000 {
let value = BigUint::from(Fr::rand(&mut rng));
let res = hs.insert(&value, 0);
if res.is_err() {
assert!(matches!(res, Err(HashSetError::Full)));
} else {
println!("secondary insertions: {i}: apparent success with value: {value:?}");
}
}
// Try again with defined sequence numbers, but still too small to
// vacate any cell.
for i in 0..1000 {
let value = BigUint::from(Fr::rand(&mut rng));
// Sequence numbers lower than the threshold should not vacate
// any cell.
let sequence_number = rng.gen_range(0..hs.sequence_threshold);
let res = hs.insert(&value, sequence_number);
if res.is_err() {
assert!(matches!(res, Err(HashSetError::Full)));
} else {
println!("tertiary insertions: {i}: surprising success with value: {value:?}");
}
}
// Use sequence numbers which are going to vacate cells. All
// insertions should be successful now.
for i in 0..CAPACITY {
let value = BigUint::from(Fr::rand(&mut rng));
if let Err(e) = hs.insert(&value, SEQUENCE_THRESHOLD + i) {
assert!(matches!(e, HashSetError::Full));
println!("insertions after fillup: {i}: failed, stopping");
break;
}
}
}
}
#[test]
fn test_hash_set_full_6857_2400() {
hash_set_full::<6857, 2400>()
}
#[test]
fn test_hash_set_full_9601_2400() {
hash_set_full::<9601, 2400>()
}
#[test]
fn test_hash_set_element_does_not_exist() {
let mut hs = HashSet::new(4800, 2400).unwrap();
let mut rng = thread_rng();
for _ in 0..1000 {
let index = rng.gen_range(0..4800);
// Assert `ElementDoesNotExist` error.
let res = hs.mark_with_sequence_number(index, 0);
assert!(matches!(res, Err(HashSetError::ElementDoesNotExist)));
}
for _ in 0..1000 {
// After actually appending the value, the same operation should be
// possible
let value = BigUint::from(Fr::rand(&mut rng));
let index = hs.insert(&value, 0).unwrap();
hs.mark_with_sequence_number(index, 1).unwrap();
}
}
#[test]
fn test_hash_set_iter_manual() {
let mut hs = HashSet::new(6857, 2400).unwrap();
let nullifier_1 = 945635_u32.to_biguint().unwrap();
let nullifier_2 = 3546656654734254353455_u128.to_biguint().unwrap();
let nullifier_3 = 543543656564_u64.to_biguint().unwrap();
let nullifier_4 = 43_u8.to_biguint().unwrap();
let nullifier_5 = 0_u8.to_biguint().unwrap();
let nullifier_6 = 65423_u32.to_biguint().unwrap();
let nullifier_7 = 745654665_u32.to_biguint().unwrap();
let nullifier_8 = 97664353453465354645645465_u128.to_biguint().unwrap();
let nullifier_9 = 453565465464565635475_u128.to_biguint().unwrap();
let nullifier_10 = 543645654645_u64.to_biguint().unwrap();
hs.insert(&nullifier_1, 0).unwrap();
hs.insert(&nullifier_2, 0).unwrap();
hs.insert(&nullifier_3, 0).unwrap();
hs.insert(&nullifier_4, 0).unwrap();
hs.insert(&nullifier_5, 0).unwrap();
hs.insert(&nullifier_6, 0).unwrap();
hs.insert(&nullifier_7, 0).unwrap();
hs.insert(&nullifier_8, 0).unwrap();
hs.insert(&nullifier_9, 0).unwrap();
hs.insert(&nullifier_10, 0).unwrap();
let inserted_nullifiers = hs
.iter()
.map(|(_, nullifier)| nullifier.value_biguint())
.collect::<Vec<_>>();
assert_eq!(inserted_nullifiers.len(), 10);
assert_eq!(inserted_nullifiers[0], nullifier_7);
assert_eq!(inserted_nullifiers[1], nullifier_3);
assert_eq!(inserted_nullifiers[2], nullifier_10);
assert_eq!(inserted_nullifiers[3], nullifier_1);
assert_eq!(inserted_nullifiers[4], nullifier_8);
assert_eq!(inserted_nullifiers[5], nullifier_5);
assert_eq!(inserted_nullifiers[6], nullifier_4);
assert_eq!(inserted_nullifiers[7], nullifier_2);
assert_eq!(inserted_nullifiers[8], nullifier_9);
assert_eq!(inserted_nullifiers[9], nullifier_6);
}
fn hash_set_iter_random<
const INSERTIONS: usize,
const CAPACITY: usize,
const SEQUENCE_THRESHOLD: usize,
>() {
let mut hs = HashSet::new(CAPACITY, SEQUENCE_THRESHOLD).unwrap();
let mut rng = thread_rng();
let nullifiers: [BigUint; INSERTIONS] =
std::array::from_fn(|_| BigUint::from(Fr::rand(&mut rng)));
for nullifier in nullifiers.iter() {
hs.insert(&nullifier, 0).unwrap();
}
let mut sorted_nullifiers = nullifiers.iter().collect::<Vec<_>>();
let mut inserted_nullifiers = hs
.iter()
.map(|(_, nullifier)| nullifier.value_biguint())
.collect::<Vec<_>>();
sorted_nullifiers.sort();
inserted_nullifiers.sort();
let inserted_nullifiers = inserted_nullifiers.iter().collect::<Vec<&BigUint>>();
assert_eq!(inserted_nullifiers.len(), INSERTIONS);
assert_eq!(sorted_nullifiers.as_slice(), inserted_nullifiers.as_slice());
}
#[test]
fn test_hash_set_iter_random_6857_2400() {
hash_set_iter_random::<3500, 6857, 2400>()
}
#[test]
fn test_hash_set_iter_random_9601_2400() {
hash_set_iter_random::<5000, 9601, 2400>()
}
#[test]
fn test_hash_set_get_bucket() {
let mut hs = HashSet::new(6857, 2400).unwrap();
for i in 0..3600 {
let bn_i = i.to_biguint().unwrap();
hs.insert(&bn_i, i).unwrap();
}
let mut unused_indices = vec![true; 6857];
for i in 0..3600 {
let bn_i = i.to_biguint().unwrap();
let i = hs.find_element_index(&bn_i, None).unwrap().unwrap();
let element = hs.get_bucket(i).unwrap().unwrap();
assert_eq!(element.value_biguint(), bn_i);
unused_indices[i] = false;
}
// Unused cells within the capacity should be `Some(None)`.
for i in unused_indices.iter().enumerate() {
if *i.1 {
assert!(hs.get_bucket(i.0).unwrap().is_none());
}
}
// Cells over the capacity should be `None`.
for i in 6857..10_000 {
assert!(hs.get_bucket(i).is_none());
}
}
#[test]
fn test_hash_set_get_bucket_mut() {
let mut hs = HashSet::new(6857, 2400).unwrap();
for i in 0..3600 {
let bn_i = i.to_biguint().unwrap();
hs.insert(&bn_i, i).unwrap();
}
let mut unused_indices = vec![false; 6857];
for i in 0..3600 {
let bn_i = i.to_biguint().unwrap();
let i = hs.find_element_index(&bn_i, None).unwrap().unwrap();
let element = hs.get_bucket_mut(i).unwrap();
assert_eq!(element.unwrap().value_biguint(), bn_i);
unused_indices[i] = true;
// "Nullify" the element.
*element = Some(HashSetCell {
value: [0_u8; 32],
sequence_number: None,
});
}
for (i, is_used) in unused_indices.iter().enumerate() {
if *is_used {
let element = hs.get_bucket_mut(i).unwrap().unwrap();
assert_eq!(element.value_bytes(), [0_u8; 32]);
}
}
// Unused cells within the capacity should be `Some(None)`.
for (i, is_used) in unused_indices.iter().enumerate() {
if !*is_used {
assert!(hs.get_bucket_mut(i).unwrap().is_none());
}
}
// Cells over the capacity should be `None`.
for i in 6857..10_000 {
assert!(hs.get_bucket_mut(i).is_none());
}
}
#[test]
fn test_hash_set_get_unmarked_bucket() {
let mut hs = HashSet::new(6857, 2400).unwrap();
// Insert incremental elements, so they end up being in the same
// sequence in the hash set.
(0..3600).for_each(|i| {
let bn_i = i.to_biguint().unwrap();
hs.insert(&bn_i, i).unwrap();
});
for i in 0..3600 {
let i = hs
.find_element_index(&i.to_biguint().unwrap(), None)
.unwrap()
.unwrap();
let element = hs.get_unmarked_bucket(i);
assert!(element.is_some());
}
// Mark the elements.
for i in 0..3600 {
let index = hs
.find_element_index(&i.to_biguint().unwrap(), None)
.unwrap()
.unwrap();
hs.mark_with_sequence_number(index, i).unwrap();
}
for i in 0..3600 {
let i = hs
.find_element_index(&i.to_biguint().unwrap(), None)
.unwrap()
.unwrap();
let element = hs.get_unmarked_bucket(i);
assert!(element.is_none());
}
}
#[test]
fn test_hash_set_first_no_seq() {
let mut hs = HashSet::new(6857, 2400).unwrap();
// Insert incremental elements, so they end up being in the same
// sequence in the hash set.
for i in 0..3600 {
let bn_i = i.to_biguint().unwrap();
hs.insert(&bn_i, i).unwrap();
let element = hs.first_no_seq().unwrap().unwrap();
assert_eq!(element.0.value_biguint(), 0.to_biguint().unwrap());
}
}
}
| 0
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree
|
solana_public_repos/Lightprotocol/light-protocol/merkle-tree/indexed/Cargo.toml
|
[package]
name = "light-indexed-merkle-tree"
version = "1.1.0"
description = "Implementation of indexed (and concurrent) Merkle tree in Rust"
repository = "https://github.com/Lightprotocol/light-protocol"
license = "Apache-2.0"
edition = "2021"
[features]
solana = [
"light-concurrent-merkle-tree/solana",
"solana-program"
]
[dependencies]
borsh = { version = "0.10" }
light-bounded-vec = { path = "../bounded-vec", version = "1.1.0" }
light-hasher = { path = "../hasher", version = "1.1.0" }
light-concurrent-merkle-tree = { path = "../concurrent", version = "1.1.0" }
light-merkle-tree-reference = { path = "../reference", version = "1.1.0" }
light-utils = { path = "../../utils", version = "1.1.0" }
memoffset = "0.9"
num-bigint = "0.4"
num-traits = "0.2"
solana-program = { workspace = true, optional = true }
thiserror = "1.0"
[dev-dependencies]
light-hash-set = { workspace = true }
thiserror = "1.0"
rand = "0.8"
hex = "0.4"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.