repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipc/cli/src/commands/subnet/mod.rs | ipc/cli/src/commands/subnet/mod.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
pub use crate::commands::subnet::create::{CreateSubnet, CreateSubnetArgs};
use crate::commands::subnet::genesis_epoch::{GenesisEpoch, GenesisEpochArgs};
pub use crate::commands::subnet::join::{JoinSubnet, JoinSubnetArgs};
pub use crate::commands::subnet::kill::{KillSubnet, KillSubnetArgs};
pub use crate::commands::subnet::leave::{LeaveSubnet, LeaveSubnetArgs};
use crate::commands::subnet::list_subnets::{ListSubnets, ListSubnetsArgs};
use crate::commands::subnet::rpc::{RPCSubnet, RPCSubnetArgs};
use crate::commands::subnet::send_value::{SendValue, SendValueArgs};
use crate::commands::subnet::set_federated_power::{SetFederatedPower, SetFederatedPowerArgs};
use crate::commands::subnet::show_gateway_contract_commit_sha::{
ShowGatewayContractCommitSha, ShowGatewayContractCommitShaArgs,
};
use crate::commands::subnet::validator::{ValidatorInfo, ValidatorInfoArgs};
use crate::{CommandLineHandler, GlobalArguments};
use clap::{Args, Subcommand};
use self::bootstrap::{AddBootstrap, AddBootstrapArgs, ListBootstraps, ListBootstrapsArgs};
use self::join::{StakeSubnet, StakeSubnetArgs, UnstakeSubnet, UnstakeSubnetArgs};
use self::leave::{Claim, ClaimArgs};
use self::rpc::{ChainIdSubnet, ChainIdSubnetArgs};
pub mod bootstrap;
pub mod create;
mod genesis_epoch;
pub mod join;
pub mod kill;
pub mod leave;
pub mod list_subnets;
pub mod rpc;
pub mod send_value;
mod set_federated_power;
pub mod show_gateway_contract_commit_sha;
mod validator;
#[derive(Debug, Args)]
#[command(
name = "subnet",
about = "subnet related commands such as create, join and etc"
)]
#[command(args_conflicts_with_subcommands = true)]
pub(crate) struct SubnetCommandsArgs {
#[command(subcommand)]
command: Commands,
}
impl SubnetCommandsArgs {
pub async fn handle(&self, global: &GlobalArguments) -> anyhow::Result<()> {
match &self.command {
Commands::Create(args) => CreateSubnet::handle(global, args).await,
Commands::List(args) => ListSubnets::handle(global, args).await,
Commands::Join(args) => JoinSubnet::handle(global, args).await,
Commands::Rpc(args) => RPCSubnet::handle(global, args).await,
Commands::ChainId(args) => ChainIdSubnet::handle(global, args).await,
Commands::Leave(args) => LeaveSubnet::handle(global, args).await,
Commands::Kill(args) => KillSubnet::handle(global, args).await,
Commands::SendValue(args) => SendValue::handle(global, args).await,
Commands::Stake(args) => StakeSubnet::handle(global, args).await,
Commands::Unstake(args) => UnstakeSubnet::handle(global, args).await,
Commands::Claim(args) => Claim::handle(global, args).await,
Commands::AddBootstrap(args) => AddBootstrap::handle(global, args).await,
Commands::ListBootstraps(args) => ListBootstraps::handle(global, args).await,
Commands::GenesisEpoch(args) => GenesisEpoch::handle(global, args).await,
Commands::GetValidator(args) => ValidatorInfo::handle(global, args).await,
Commands::ShowGatewayContractCommitSha(args) => {
ShowGatewayContractCommitSha::handle(global, args).await
}
Commands::SetFederatedPower(args) => SetFederatedPower::handle(global, args).await,
}
}
}
#[derive(Debug, Subcommand)]
pub(crate) enum Commands {
Create(CreateSubnetArgs),
List(ListSubnetsArgs),
Join(JoinSubnetArgs),
Rpc(RPCSubnetArgs),
ChainId(ChainIdSubnetArgs),
Leave(LeaveSubnetArgs),
Kill(KillSubnetArgs),
SendValue(SendValueArgs),
Stake(StakeSubnetArgs),
Unstake(UnstakeSubnetArgs),
Claim(ClaimArgs),
AddBootstrap(AddBootstrapArgs),
ListBootstraps(ListBootstrapsArgs),
GenesisEpoch(GenesisEpochArgs),
GetValidator(ValidatorInfoArgs),
ShowGatewayContractCommitSha(ShowGatewayContractCommitShaArgs),
SetFederatedPower(SetFederatedPowerArgs),
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipc/cli/src/commands/subnet/join.rs | ipc/cli/src/commands/subnet/join.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
//! Join subnet cli command handler.
use async_trait::async_trait;
use clap::Args;
use ipc_api::subnet_id::SubnetID;
use num_traits::Zero;
use std::{fmt::Debug, str::FromStr};
use crate::{
f64_to_token_amount, get_ipc_provider, require_fil_addr_from_str, CommandLineHandler,
GlobalArguments,
};
/// The command to join a subnet
pub struct JoinSubnet;
#[async_trait]
impl CommandLineHandler for JoinSubnet {
type Arguments = JoinSubnetArgs;
async fn handle(global: &GlobalArguments, arguments: &Self::Arguments) -> anyhow::Result<()> {
log::debug!("join subnet with args: {:?}", arguments);
let mut provider = get_ipc_provider(global)?;
let subnet = SubnetID::from_str(&arguments.subnet)?;
let from = match &arguments.from {
Some(address) => Some(require_fil_addr_from_str(address)?),
None => None,
};
if let Some(initial_balance) = arguments.initial_balance.filter(|x| !x.is_zero()) {
log::info!("pre-funding address with {initial_balance}");
provider
.pre_fund(subnet.clone(), from, f64_to_token_amount(initial_balance)?)
.await?;
}
let epoch = provider
.join_subnet(subnet, from, f64_to_token_amount(arguments.collateral)?)
.await?;
println!("joined at epoch: {epoch}");
Ok(())
}
}
#[derive(Debug, Args)]
#[command(name = "join", about = "Join a subnet")]
pub struct JoinSubnetArgs {
#[arg(long, help = "The address that joins the subnet")]
pub from: Option<String>,
#[arg(long, help = "The subnet to join")]
pub subnet: String,
#[arg(
long,
help = "The collateral to stake in the subnet (in whole FIL units)"
)]
pub collateral: f64,
#[arg(
long,
help = "Optionally add an initial balance to the validator in genesis in the subnet"
)]
pub initial_balance: Option<f64>,
}
/// The command to stake in a subnet from validator
pub struct StakeSubnet;
#[async_trait]
impl CommandLineHandler for StakeSubnet {
type Arguments = StakeSubnetArgs;
async fn handle(global: &GlobalArguments, arguments: &Self::Arguments) -> anyhow::Result<()> {
log::debug!("join subnet with args: {:?}", arguments);
let mut provider = get_ipc_provider(global)?;
let subnet = SubnetID::from_str(&arguments.subnet)?;
let from = match &arguments.from {
Some(address) => Some(require_fil_addr_from_str(address)?),
None => None,
};
provider
.stake(subnet, from, f64_to_token_amount(arguments.collateral)?)
.await
}
}
#[derive(Debug, Args)]
#[command(name = "stake", about = "Add collateral to an already joined subnet")]
pub struct StakeSubnetArgs {
#[arg(long, help = "The address that stakes in the subnet")]
pub from: Option<String>,
#[arg(long, help = "The subnet to add collateral to")]
pub subnet: String,
#[arg(
long,
help = "The collateral to stake in the subnet (in whole FIL units)"
)]
pub collateral: f64,
}
/// The command to unstake in a subnet from validator
pub struct UnstakeSubnet;
#[async_trait]
impl CommandLineHandler for UnstakeSubnet {
type Arguments = UnstakeSubnetArgs;
async fn handle(global: &GlobalArguments, arguments: &Self::Arguments) -> anyhow::Result<()> {
log::debug!("join subnet with args: {:?}", arguments);
let mut provider = get_ipc_provider(global)?;
let subnet = SubnetID::from_str(&arguments.subnet)?;
let from = match &arguments.from {
Some(address) => Some(require_fil_addr_from_str(address)?),
None => None,
};
provider
.unstake(subnet, from, f64_to_token_amount(arguments.collateral)?)
.await
}
}
#[derive(Debug, Args)]
#[command(
name = "unstake",
about = "Remove collateral to an already joined subnet"
)]
pub struct UnstakeSubnetArgs {
#[arg(long, help = "The address that unstakes in the subnet")]
pub from: Option<String>,
#[arg(long, help = "The subnet to release collateral from")]
pub subnet: String,
#[arg(
long,
help = "The collateral to unstake from the subnet (in whole FIL units)"
)]
pub collateral: f64,
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipc/cli/src/commands/subnet/validator.rs | ipc/cli/src/commands/subnet/validator.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
//! Get the validator information
use async_trait::async_trait;
use clap::Args;
use fvm_shared::address::Address;
use ipc_api::subnet_id::SubnetID;
use std::fmt::Debug;
use std::str::FromStr;
use crate::{get_ipc_provider, CommandLineHandler, GlobalArguments};
/// The command to get the validator information
pub(crate) struct ValidatorInfo;
#[async_trait]
impl CommandLineHandler for ValidatorInfo {
type Arguments = ValidatorInfoArgs;
async fn handle(global: &GlobalArguments, arguments: &Self::Arguments) -> anyhow::Result<()> {
log::debug!("get validator info with args: {:?}", arguments);
let provider = get_ipc_provider(global)?;
let subnet = SubnetID::from_str(&arguments.subnet)?;
let validator = Address::from_str(&arguments.validator)?;
let validator_info = provider.get_validator_info(&subnet, &validator).await?;
println!("{}", validator_info);
Ok(())
}
}
#[derive(Debug, Args)]
#[command(name = "validator-info", about = "Get the validator info")]
pub(crate) struct ValidatorInfoArgs {
#[arg(long, help = "The subnet id to query validator info")]
pub subnet: String,
#[arg(long, help = "The validator address")]
pub validator: String,
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipc/cli/src/commands/subnet/rpc.rs | ipc/cli/src/commands/subnet/rpc.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
//! RPC subnet cli command handler.
use async_trait::async_trait;
use clap::Args;
use ipc_api::subnet_id::SubnetID;
use std::fmt::Debug;
use std::str::FromStr;
use crate::{get_ipc_provider, CommandLineHandler, GlobalArguments};
/// The command to get the RPC endpoint for a subnet
pub struct RPCSubnet;
#[async_trait]
impl CommandLineHandler for RPCSubnet {
type Arguments = RPCSubnetArgs;
async fn handle(global: &GlobalArguments, arguments: &Self::Arguments) -> anyhow::Result<()> {
log::debug!("get rpc for subnet with args: {:?}", arguments);
let provider = get_ipc_provider(global)?;
let subnet = SubnetID::from_str(&arguments.network)?;
let conn = match provider.connection(&subnet) {
None => return Err(anyhow::anyhow!("target subnet not found")),
Some(conn) => conn,
};
println!("rpc: {:?}", conn.subnet().rpc_http().to_string());
println!("chainID: {:?}", conn.manager().get_chain_id().await?);
Ok(())
}
}
#[derive(Debug, Args)]
#[command(name = "rpc", about = "RPC endpoint for a subnet")]
pub struct RPCSubnetArgs {
#[arg(long, help = "The network to get the ChainId from")]
pub network: String,
}
/// The command to get the chain ID for a subnet
pub struct ChainIdSubnet;
#[async_trait]
impl CommandLineHandler for ChainIdSubnet {
type Arguments = ChainIdSubnetArgs;
async fn handle(global: &GlobalArguments, arguments: &Self::Arguments) -> anyhow::Result<()> {
log::debug!("get chain-id for subnet with args: {:?}", arguments);
let provider = get_ipc_provider(global)?;
let subnet = SubnetID::from_str(&arguments.network)?;
let conn = match provider.connection(&subnet) {
None => return Err(anyhow::anyhow!("target subnet not found")),
Some(conn) => conn,
};
println!("{:}", conn.manager().get_chain_id().await?);
Ok(())
}
}
#[derive(Debug, Args)]
#[command(name = "chain-id", about = "Chain ID endpoint for a subnet")]
pub struct ChainIdSubnetArgs {
#[arg(long, help = "The network to get the Chain ID from")]
pub network: String,
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipc/cli/src/commands/subnet/genesis_epoch.rs | ipc/cli/src/commands/subnet/genesis_epoch.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
//! Get the genesis epoch cli command
use async_trait::async_trait;
use clap::Args;
use ipc_api::subnet_id::SubnetID;
use std::fmt::Debug;
use std::str::FromStr;
use crate::{get_ipc_provider, CommandLineHandler, GlobalArguments};
/// The command to get the genensis epoch.
pub(crate) struct GenesisEpoch;
#[async_trait]
impl CommandLineHandler for GenesisEpoch {
type Arguments = GenesisEpochArgs;
async fn handle(global: &GlobalArguments, arguments: &Self::Arguments) -> anyhow::Result<()> {
log::debug!("get genesis epoch with args: {:?}", arguments);
let provider = get_ipc_provider(global)?;
let subnet = SubnetID::from_str(&arguments.subnet)?;
let ls = provider.genesis_epoch(&subnet).await?;
println!("genesis epoch: {}", ls);
Ok(())
}
}
#[derive(Debug, Args)]
#[command(name = "genesis-epoch", about = "Get the genesis epoch of subnet")]
pub(crate) struct GenesisEpochArgs {
#[arg(long, help = "The subnet id to query genesis epoch")]
pub subnet: String,
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipc/cli/src/commands/subnet/leave.rs | ipc/cli/src/commands/subnet/leave.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
//! Leave subnet cli command handler.
use async_trait::async_trait;
use clap::Args;
use ipc_api::subnet_id::SubnetID;
use std::{fmt::Debug, str::FromStr};
use crate::{get_ipc_provider, require_fil_addr_from_str, CommandLineHandler, GlobalArguments};
/// The command to leave a new subnet.
pub struct LeaveSubnet;
#[async_trait]
impl CommandLineHandler for LeaveSubnet {
type Arguments = LeaveSubnetArgs;
async fn handle(global: &GlobalArguments, arguments: &Self::Arguments) -> anyhow::Result<()> {
log::debug!("leave subnet with args: {:?}", arguments);
let mut provider = get_ipc_provider(global)?;
let subnet = SubnetID::from_str(&arguments.subnet)?;
let from = match &arguments.from {
Some(address) => Some(require_fil_addr_from_str(address)?),
None => None,
};
provider.leave_subnet(subnet, from).await
}
}
#[derive(Debug, Args)]
#[command(name = "leave", about = "Leaving a subnet")]
pub struct LeaveSubnetArgs {
#[arg(long, help = "The address that leaves the subnet")]
pub from: Option<String>,
#[arg(long, help = "The subnet to leave")]
pub subnet: String,
}
/// The command to claim collateral for a validator after leaving
pub struct Claim;
#[async_trait]
impl CommandLineHandler for Claim {
type Arguments = ClaimArgs;
async fn handle(global: &GlobalArguments, arguments: &Self::Arguments) -> anyhow::Result<()> {
log::debug!("leave subnet with args: {:?}", arguments);
let mut provider = get_ipc_provider(global)?;
let subnet = SubnetID::from_str(&arguments.subnet)?;
let from = match &arguments.from {
Some(address) => Some(require_fil_addr_from_str(address)?),
None => None,
};
provider.claim_collateral(subnet, from).await
}
}
#[derive(Debug, Args)]
#[command(
name = "claim",
about = "Claim collateral or rewards available for validators and relayers, respectively"
)]
pub struct ClaimArgs {
#[arg(long, help = "The address that claims the collateral")]
pub from: Option<String>,
#[arg(long, help = "The subnet to claim from")]
pub subnet: String,
#[arg(
long,
help = "Determine if we want to claim rewards instead of collateral"
)]
pub rewards: bool,
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipc/cli/src/commands/subnet/bootstrap.rs | ipc/cli/src/commands/subnet/bootstrap.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
//! Subnet bootstrap-related commands
use async_trait::async_trait;
use clap::Args;
use ipc_api::subnet_id::SubnetID;
use std::{fmt::Debug, str::FromStr};
use crate::{get_ipc_provider, require_fil_addr_from_str, CommandLineHandler, GlobalArguments};
/// The command to add a bootstrap subnet
pub struct AddBootstrap;
#[async_trait]
impl CommandLineHandler for AddBootstrap {
type Arguments = AddBootstrapArgs;
async fn handle(global: &GlobalArguments, arguments: &Self::Arguments) -> anyhow::Result<()> {
log::debug!("add subnet bootstrap with args: {:?}", arguments);
let mut provider = get_ipc_provider(global)?;
let subnet = SubnetID::from_str(&arguments.subnet)?;
let from = match &arguments.from {
Some(address) => Some(require_fil_addr_from_str(address)?),
None => None,
};
provider
.add_bootstrap(&subnet, from, arguments.endpoint.clone())
.await
}
}
#[derive(Debug, Args)]
#[command(name = "add-bootstrap", about = "Advertise bootstrap in the subnet")]
pub struct AddBootstrapArgs {
#[arg(long, help = "The address of the validator adding the bootstrap")]
pub from: Option<String>,
#[arg(long, help = "The subnet to add the bootstrap to")]
pub subnet: String,
#[arg(long, help = "The bootstrap node's network endpoint")]
pub endpoint: String,
}
/// The command to list bootstrap nodes in a subnet
pub struct ListBootstraps;
#[async_trait]
impl CommandLineHandler for ListBootstraps {
type Arguments = ListBootstrapsArgs;
async fn handle(global: &GlobalArguments, arguments: &Self::Arguments) -> anyhow::Result<()> {
log::debug!("add subnet bootstrap with args: {:?}", arguments);
let provider = get_ipc_provider(global)?;
let subnet = SubnetID::from_str(&arguments.subnet)?;
print!(
"{}",
provider
.list_bootstrap_nodes(&subnet)
.await?
.iter()
.as_slice()
.join(",")
);
println!();
Ok(())
}
}
#[derive(Debug, Args)]
#[command(name = "list-bootstraps", about = "List bootstraps in the subnet")]
pub struct ListBootstrapsArgs {
#[arg(long, help = "The subnet to list bootstraps from")]
pub subnet: String,
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipc/cli/src/commands/subnet/create.rs | ipc/cli/src/commands/subnet/create.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
//! Create subnet cli command handler.
use std::fmt::Debug;
use std::str::FromStr;
use async_trait::async_trait;
use clap::Args;
use fvm_shared::clock::ChainEpoch;
use ipc_api::subnet::{PermissionMode, SupplyKind, SupplySource};
use ipc_api::subnet_id::SubnetID;
use crate::commands::get_ipc_provider;
use crate::{f64_to_token_amount, require_fil_addr_from_str, CommandLineHandler, GlobalArguments};
const DEFAULT_ACTIVE_VALIDATORS: u16 = 100;
/// The command to create a new subnet actor.
pub struct CreateSubnet;
impl CreateSubnet {
pub async fn create(
global: &GlobalArguments,
arguments: &CreateSubnetArgs,
) -> anyhow::Result<String> {
let mut provider = get_ipc_provider(global)?;
let parent = SubnetID::from_str(&arguments.parent)?;
let from = match &arguments.from {
Some(address) => Some(require_fil_addr_from_str(address)?),
None => None,
};
let token_address = if let Some(addr) = &arguments.supply_source_address {
Some(require_fil_addr_from_str(addr)?)
} else {
None
};
let supply_source = SupplySource {
kind: arguments.supply_source_kind,
token_address,
};
let addr = provider
.create_subnet(
from,
parent,
arguments.min_validators,
f64_to_token_amount(arguments.min_validator_stake)?,
arguments.bottomup_check_period,
arguments
.active_validators_limit
.unwrap_or(DEFAULT_ACTIVE_VALIDATORS),
f64_to_token_amount(arguments.min_cross_msg_fee)?,
arguments.permission_mode,
supply_source,
)
.await?;
Ok(addr.to_string())
}
}
#[async_trait]
impl CommandLineHandler for CreateSubnet {
type Arguments = CreateSubnetArgs;
async fn handle(global: &GlobalArguments, arguments: &Self::Arguments) -> anyhow::Result<()> {
log::debug!("create subnet with args: {:?}", arguments);
let address = CreateSubnet::create(global, arguments).await?;
log::info!(
"created subnet actor with id: {}/{}",
arguments.parent,
address
);
Ok(())
}
}
#[derive(Debug, Args)]
#[command(name = "create", about = "Create a new subnet actor")]
pub struct CreateSubnetArgs {
#[arg(long, help = "The address that creates the subnet")]
pub from: Option<String>,
#[arg(long, help = "The parent subnet to create the new actor in")]
pub parent: String,
#[arg(
long,
help = "The minimum number of collateral required for validators in (in whole FIL; the minimum is 1 nanoFIL)"
)]
pub min_validator_stake: f64,
#[arg(
long,
help = "Minimum number of validators required to bootstrap the subnet"
)]
pub min_validators: u64,
#[arg(long, help = "The bottom up checkpoint period in number of blocks")]
pub bottomup_check_period: ChainEpoch,
#[arg(long, help = "The max number of active validators in subnet")]
pub active_validators_limit: Option<u16>,
#[arg(
long,
default_value = "0.000001",
help = "Minimum fee for cross-net messages in subnet (in whole FIL; the minimum is 1 nanoFIL)"
)]
pub min_cross_msg_fee: f64,
#[arg(
long,
help = "The permission mode for the subnet: collateral, federated and static",
value_parser = PermissionMode::from_str,
)]
// TODO figure out a way to use a newtype + ValueEnum, or reference PermissionMode::VARIANTS to
// enumerate all variants
pub permission_mode: PermissionMode,
#[arg(
long,
help = "The kind of supply source of a subnet on its parent subnet: native or erc20",
value_parser = SupplyKind::from_str,
)]
// TODO figure out a way to use a newtype + ValueEnum, or reference SupplySourceKind::VARIANTS to
// enumerate all variants
pub supply_source_kind: SupplyKind,
#[arg(
long,
help = "The address of supply source of a subnet on its parent subnet. None if kind is native"
)]
pub supply_source_address: Option<String>,
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipc/cli/src/commands/subnet/show_gateway_contract_commit_sha.rs | ipc/cli/src/commands/subnet/show_gateway_contract_commit_sha.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
use async_trait::async_trait;
use clap::Args;
use ipc_api::subnet_id::SubnetID;
use std::fmt::Debug;
use std::str::from_utf8;
use std::str::FromStr;
use crate::{get_ipc_provider, CommandLineHandler, GlobalArguments};
pub(crate) struct ShowGatewayContractCommitSha;
#[async_trait]
impl CommandLineHandler for ShowGatewayContractCommitSha {
type Arguments = ShowGatewayContractCommitShaArgs;
async fn handle(global: &GlobalArguments, arguments: &Self::Arguments) -> anyhow::Result<()> {
log::debug!("show contract commit sha with args: {:?}", arguments);
let provider = get_ipc_provider(global)?;
let subnet = SubnetID::from_str(&arguments.network)?;
let commit_sha = provider.get_commit_sha(&subnet).await?;
let commit_sha_str = from_utf8(&commit_sha).unwrap();
println!(
"Using commit SHA {} for contracts in subnet {}",
commit_sha_str, subnet
);
Ok(())
}
}
#[derive(Debug, Args)]
#[command(
name = "show-gateway-contract-commit-sha",
about = "Show code commit SHA for contracts deployed in this network"
)]
pub(crate) struct ShowGatewayContractCommitShaArgs {
#[arg(long, help = "The network id to query child subnets")]
pub network: String,
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipc/cli/src/commands/subnet/set_federated_power.rs | ipc/cli/src/commands/subnet/set_federated_power.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
//! Set federated power cli handler
use crate::commands::{get_ipc_provider, require_fil_addr_from_str};
use crate::{CommandLineHandler, GlobalArguments};
use async_trait::async_trait;
use clap::Args;
use fvm_shared::address::Address;
use ipc_api::subnet_id::SubnetID;
use std::str::FromStr;
/// The command to set federated power.
pub struct SetFederatedPower;
#[async_trait]
impl CommandLineHandler for crate::commands::subnet::SetFederatedPower {
type Arguments = crate::commands::subnet::SetFederatedPowerArgs;
async fn handle(global: &GlobalArguments, arguments: &Self::Arguments) -> anyhow::Result<()> {
log::debug!("set federated power with args: {:?}", arguments);
let provider = get_ipc_provider(global)?;
let subnet = SubnetID::from_str(&arguments.subnet)?;
let addresses: Vec<Address> = arguments
.validator_addresses
.iter()
.map(|address| require_fil_addr_from_str(address).unwrap())
.collect();
let public_keys: Vec<Vec<u8>> = arguments
.validator_pubkeys
.iter()
.map(|key| hex::decode(key).unwrap())
.collect();
let from_address = require_fil_addr_from_str(&arguments.from).unwrap();
let chain_epoch = provider
.set_federated_power(
&from_address,
&subnet,
&addresses,
&public_keys,
&arguments.validator_power,
)
.await?;
println!("New federated power is set at epoch {chain_epoch}");
Ok(())
}
}
#[derive(Debug, Args)]
#[command(
name = "set-federated-power",
about = "Set federated power for validators"
)]
pub struct SetFederatedPowerArgs {
#[arg(long, help = "The address to sign and pay for this transaction.")]
pub from: String,
#[arg(long, help = "The subnet to release collateral from")]
pub subnet: String,
#[arg(long, num_args = 1.., help = "Addresses of validators, separated by space")]
pub validator_addresses: Vec<String>,
#[arg(long, num_args = 1.., help = "Public keys of validators, separated by space")]
pub validator_pubkeys: Vec<String>,
#[arg(long, num_args = 1.., help = "Federated of validators, separated by space")]
pub validator_power: Vec<u128>,
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipc/cli/src/commands/config/mod.rs | ipc/cli/src/commands/config/mod.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
//! This mod triggers a config reload in the IPC-Agent Json RPC server.
mod init;
use clap::{Args, Subcommand};
use std::fmt::Debug;
use crate::commands::config::init::{InitConfig, InitConfigArgs};
use crate::{CommandLineHandler, GlobalArguments};
#[derive(Debug, Args)]
#[command(name = "config", about = "config related commands")]
#[command(args_conflicts_with_subcommands = true)]
pub(crate) struct ConfigCommandsArgs {
#[command(subcommand)]
command: Commands,
}
impl ConfigCommandsArgs {
pub async fn handle(&self, global: &GlobalArguments) -> anyhow::Result<()> {
match &self.command {
Commands::Init(args) => InitConfig::handle(global, args).await,
}
}
}
#[derive(Debug, Subcommand)]
pub(crate) enum Commands {
Init(InitConfigArgs),
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipc/cli/src/commands/config/init.rs | ipc/cli/src/commands/config/init.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
use crate::{CommandLineHandler, GlobalArguments};
use async_trait::async_trait;
use ipc_provider::config::DEFAULT_CONFIG_TEMPLATE;
use std::io::Write;
use clap::Args;
/// The command to initialize a new config template in a specific path
pub(crate) struct InitConfig;
#[async_trait]
impl CommandLineHandler for InitConfig {
type Arguments = InitConfigArgs;
async fn handle(global: &GlobalArguments, _arguments: &Self::Arguments) -> anyhow::Result<()> {
let path = global.config_path();
log::debug!("initializing empty config file in {}", path);
let file_path = std::path::Path::new(&path);
if let Some(parent) = file_path.parent() {
std::fs::create_dir_all(parent)?;
}
let mut file = std::fs::File::create(&path).map_err(|e| {
log::error!("couldn't create config file");
e
})?;
file.write_all(DEFAULT_CONFIG_TEMPLATE.as_bytes())
.map_err(|e| {
log::error!("error populating empty config template");
e
})?;
log::info!("Empty config populated successful in {}", &path);
Ok(())
}
}
#[derive(Debug, Args)]
#[command(about = "Arguments to initialize a new empty config file")]
pub(crate) struct InitConfigArgs {}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipc/cli/src/commands/checkpoint/relayer.rs | ipc/cli/src/commands/checkpoint/relayer.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
use crate::commands::get_subnet_config;
use crate::{require_fil_addr_from_str, CommandLineHandler, GlobalArguments};
use anyhow::anyhow;
use async_trait::async_trait;
use clap::Args;
use fvm_shared::address::Address;
use fvm_shared::clock::ChainEpoch;
use ipc_api::subnet_id::SubnetID;
use ipc_provider::checkpoint::BottomUpCheckpointManager;
use ipc_provider::config::Config;
use ipc_provider::new_evm_keystore_from_config;
use ipc_wallet::EvmKeyStore;
use std::str::FromStr;
use std::sync::{Arc, RwLock};
use std::time::Duration;
const DEFAULT_POLLING_INTERVAL: u64 = 15;
/// The command to run the bottom up relayer in the background.
pub(crate) struct BottomUpRelayer;
#[async_trait]
impl CommandLineHandler for BottomUpRelayer {
type Arguments = BottomUpRelayerArgs;
async fn handle(global: &GlobalArguments, arguments: &Self::Arguments) -> anyhow::Result<()> {
log::debug!("start bottom up relayer with args: {:?}", arguments);
let config_path = global.config_path();
let config = Arc::new(Config::from_file(&config_path)?);
let mut keystore = new_evm_keystore_from_config(config)?;
let submitter = match (arguments.submitter.as_ref(), keystore.get_default()?) {
(Some(submitter), _) => require_fil_addr_from_str(submitter)?,
(None, Some(addr)) => {
log::info!("using default address: {addr:?}");
Address::try_from(addr)?
}
_ => {
return Err(anyhow!("no submitter address provided"));
}
};
let subnet = SubnetID::from_str(&arguments.subnet)?;
let parent = subnet
.parent()
.ok_or_else(|| anyhow!("root does not have parent"))?;
let child = get_subnet_config(&config_path, &subnet)?;
let parent = get_subnet_config(&config_path, &parent)?;
let mut manager = BottomUpCheckpointManager::new_evm_manager(
parent.clone(),
child.clone(),
Arc::new(RwLock::new(keystore)),
arguments.max_parallelism,
)
.await?;
if let Some(v) = arguments.finalization_blocks {
manager = manager.with_finalization_blocks(v as ChainEpoch);
}
let interval = Duration::from_secs(
arguments
.checkpoint_interval_sec
.unwrap_or(DEFAULT_POLLING_INTERVAL),
);
manager.run(submitter, interval).await;
Ok(())
}
}
#[derive(Debug, Args)]
#[command(about = "Start the bottom up relayer daemon")]
pub(crate) struct BottomUpRelayerArgs {
#[arg(long, help = "The subnet id of the checkpointing subnet")]
pub subnet: String,
#[arg(long, help = "The number of seconds to submit checkpoint")]
pub checkpoint_interval_sec: Option<u64>,
#[arg(
long,
default_value = "0",
help = "The number of blocks away from chain head that is considered final"
)]
pub finalization_blocks: Option<u64>,
#[arg(long, help = "The hex encoded address of the submitter")]
pub submitter: Option<String>,
#[arg(
long,
default_value = "4",
help = "The max parallelism for submitting checkpoints"
)]
pub max_parallelism: usize,
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipc/cli/src/commands/checkpoint/mod.rs | ipc/cli/src/commands/checkpoint/mod.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
use crate::commands::checkpoint::bottomup_bundles::{GetBottomUpBundles, GetBottomUpBundlesArgs};
use crate::commands::checkpoint::bottomup_height::{
LastBottomUpCheckpointHeight, LastBottomUpCheckpointHeightArgs,
};
use crate::commands::checkpoint::list_validator_changes::{
ListValidatorChanges, ListValidatorChangesArgs,
};
use crate::commands::checkpoint::quorum_reached::{
GetQuorumReacehdEvents, GetQuorumReachedEventsArgs,
};
use crate::commands::checkpoint::relayer::{BottomUpRelayer, BottomUpRelayerArgs};
use crate::{CommandLineHandler, GlobalArguments};
use clap::{Args, Subcommand};
mod bottomup_bundles;
mod bottomup_height;
mod list_validator_changes;
mod quorum_reached;
mod relayer;
#[derive(Debug, Args)]
#[command(name = "checkpoint", about = "checkpoint related commands")]
#[command(args_conflicts_with_subcommands = true)]
pub(crate) struct CheckpointCommandsArgs {
#[command(subcommand)]
command: Commands,
}
impl CheckpointCommandsArgs {
pub async fn handle(&self, global: &GlobalArguments) -> anyhow::Result<()> {
match &self.command {
Commands::Relayer(args) => BottomUpRelayer::handle(global, args).await,
Commands::ListValidatorChanges(args) => {
ListValidatorChanges::handle(global, args).await
}
Commands::ListBottomupBundle(args) => GetBottomUpBundles::handle(global, args).await,
Commands::QuorumReachedEvents(args) => {
GetQuorumReacehdEvents::handle(global, args).await
}
Commands::LastBottomupCheckpointHeight(args) => {
LastBottomUpCheckpointHeight::handle(global, args).await
}
}
}
}
#[derive(Debug, Subcommand)]
pub(crate) enum Commands {
Relayer(BottomUpRelayerArgs),
ListValidatorChanges(ListValidatorChangesArgs),
ListBottomupBundle(GetBottomUpBundlesArgs),
QuorumReachedEvents(GetQuorumReachedEventsArgs),
LastBottomupCheckpointHeight(LastBottomUpCheckpointHeightArgs),
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipc/cli/src/commands/checkpoint/bottomup_bundles.rs | ipc/cli/src/commands/checkpoint/bottomup_bundles.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
//! List bottom up bundles
use std::fmt::Debug;
use std::str::FromStr;
use async_trait::async_trait;
use clap::Args;
use fvm_shared::clock::ChainEpoch;
use ipc_api::subnet_id::SubnetID;
use crate::commands::get_ipc_provider;
use crate::{CommandLineHandler, GlobalArguments};
/// The command to get bottom up bundles at height.
pub(crate) struct GetBottomUpBundles;
#[async_trait]
impl CommandLineHandler for GetBottomUpBundles {
type Arguments = GetBottomUpBundlesArgs;
async fn handle(global: &GlobalArguments, arguments: &Self::Arguments) -> anyhow::Result<()> {
log::debug!("get bottom up bundles with args: {:?}", arguments);
let provider = get_ipc_provider(global)?;
let subnet = SubnetID::from_str(&arguments.subnet)?;
for h in arguments.from_epoch..=arguments.to_epoch {
let Some(bundle) = provider.get_bottom_up_bundle(&subnet, h).await? else {
continue;
};
println!("bottom up checkpoint bundle at height: {}", h);
println!("{}", serde_json::to_string(&bundle)?);
}
Ok(())
}
}
#[derive(Debug, Args)]
#[command(about = "List bottom up checkpoint signature bundle for a child subnet")]
pub(crate) struct GetBottomUpBundlesArgs {
#[arg(long, help = "The target subnet to perform query")]
pub subnet: String,
#[arg(long, help = "Include checkpoints from this epoch")]
pub from_epoch: ChainEpoch,
#[arg(long, help = "Include checkpoints up to this epoch")]
pub to_epoch: ChainEpoch,
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipc/cli/src/commands/checkpoint/quorum_reached.rs | ipc/cli/src/commands/checkpoint/quorum_reached.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
//! List quorum reached events
use std::fmt::Debug;
use std::str::FromStr;
use async_trait::async_trait;
use clap::Args;
use fvm_shared::clock::ChainEpoch;
use ipc_api::subnet_id::SubnetID;
use crate::commands::get_ipc_provider;
use crate::{CommandLineHandler, GlobalArguments};
/// The command to list quorum reached at height.
pub(crate) struct GetQuorumReacehdEvents;
#[async_trait]
impl CommandLineHandler for GetQuorumReacehdEvents {
type Arguments = GetQuorumReachedEventsArgs;
async fn handle(global: &GlobalArguments, arguments: &Self::Arguments) -> anyhow::Result<()> {
log::debug!("get quorum reached events with args: {:?}", arguments);
let provider = get_ipc_provider(global)?;
let subnet = SubnetID::from_str(&arguments.subnet)?;
for h in arguments.from_epoch..=arguments.to_epoch {
let events = provider.quorum_reached_events(&subnet, h).await?;
for e in events {
println!("{e}");
}
}
Ok(())
}
}
#[derive(Debug, Args)]
#[command(about = "List quorum reached events for a child subnet")]
pub(crate) struct GetQuorumReachedEventsArgs {
#[arg(long, help = "The target subnet to perform query")]
pub subnet: String,
#[arg(long, help = "Include events from this epoch")]
pub from_epoch: ChainEpoch,
#[arg(long, help = "Include events up to this epoch")]
pub to_epoch: ChainEpoch,
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipc/cli/src/commands/checkpoint/list_validator_changes.rs | ipc/cli/src/commands/checkpoint/list_validator_changes.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
//! List validator change set cli command
use std::fmt::Debug;
use std::str::FromStr;
use async_trait::async_trait;
use clap::Args;
use fvm_shared::clock::ChainEpoch;
use ipc_api::subnet_id::SubnetID;
use crate::commands::get_ipc_provider;
use crate::{CommandLineHandler, GlobalArguments};
/// The command to list validator changes committed in a subnet.
pub(crate) struct ListValidatorChanges;
#[async_trait]
impl CommandLineHandler for ListValidatorChanges {
type Arguments = ListValidatorChangesArgs;
async fn handle(global: &GlobalArguments, arguments: &Self::Arguments) -> anyhow::Result<()> {
log::debug!("list validator changes with args: {:?}", arguments);
let provider = get_ipc_provider(global)?;
let subnet = SubnetID::from_str(&arguments.subnet)?;
for h in arguments.from_epoch..=arguments.to_epoch {
let changes = provider.get_validator_changeset(&subnet, h).await?;
log::info!("changes at height: {h} are: {:?}", changes.value);
}
Ok(())
}
}
#[derive(Debug, Args)]
#[command(about = "List of validator changes commmitted for a child subnet")]
pub(crate) struct ListValidatorChangesArgs {
#[arg(long, help = "Lists the validator changes between two epochs")]
pub subnet: String,
#[arg(long, help = "Include checkpoints from this epoch")]
pub from_epoch: ChainEpoch,
#[arg(long, help = "Include checkpoints up to this epoch")]
pub to_epoch: ChainEpoch,
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipc/cli/src/commands/checkpoint/bottomup_height.rs | ipc/cli/src/commands/checkpoint/bottomup_height.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
use std::fmt::Debug;
use std::str::FromStr;
use async_trait::async_trait;
use clap::Args;
use ipc_api::subnet_id::SubnetID;
use crate::commands::get_ipc_provider;
use crate::{CommandLineHandler, GlobalArguments};
/// The command to get the last bottom up checkpoint height in a subnet.
pub(crate) struct LastBottomUpCheckpointHeight;
#[async_trait]
impl CommandLineHandler for LastBottomUpCheckpointHeight {
type Arguments = LastBottomUpCheckpointHeightArgs;
async fn handle(global: &GlobalArguments, arguments: &Self::Arguments) -> anyhow::Result<()> {
log::debug!(
"list bottom up checkpoint height with args: {:?}",
arguments
);
let provider = get_ipc_provider(global)?;
let subnet = SubnetID::from_str(&arguments.subnet)?;
let height = provider.last_bottom_up_checkpoint_height(&subnet).await?;
println!("height: {height}");
Ok(())
}
}
#[derive(Debug, Args)]
#[command(about = "Last bottom up checkpoint height committed for a child subnet in the parent")]
pub(crate) struct LastBottomUpCheckpointHeightArgs {
#[arg(long, help = "The target subnet to perform query")]
pub subnet: String,
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ext/merkle-tree-rs/src/lib.rs | ext/merkle-tree-rs/src/lib.rs | // Copyright 2022-2024 Ikechukwu Ahiara Marvellous (@literallymarvellous)
// SPDX-License-Identifier: MIT
//
// Forked from https://github.com/literallymarvellous/merkle-tree-rs with assumed MIT license
// as per Cargo.toml: https://github.com/literallymarvellous/merkle-tree-rs/blob/d4abd1ca716e65d05e577e2f22b69947bef5b843/Cargo.toml#L5
//
// License headers added post-fork.
#[allow(clippy::all)]
pub mod core;
pub mod format;
pub mod standard;
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ext/merkle-tree-rs/src/standard.rs | ext/merkle-tree-rs/src/standard.rs | // Copyright 2022-2024 Ikechukwu Ahiara Marvellous (@literallymarvellous)
// SPDX-License-Identifier: MIT
//
// Forked from https://github.com/literallymarvellous/merkle-tree-rs with assumed MIT license
// as per Cargo.toml: https://github.com/literallymarvellous/merkle-tree-rs/blob/d4abd1ca716e65d05e577e2f22b69947bef5b843/Cargo.toml#L5
//
// License headers added post-fork.
use anyhow::{anyhow, bail, Result};
use ethers::{
abi::{
self,
param_type::Reader,
token::{LenientTokenizer, Tokenizer},
Token,
},
types::Bytes,
utils::{hex, keccak256},
};
use serde::{Deserialize, Serialize};
use std::{borrow::Cow, collections::HashMap, marker::PhantomData};
use crate::{
core::{
get_multi_proof, get_proof, make_merkle_tree, process_multi_proof, process_proof,
render_merkle_tree, Hash, MultiProof,
},
format::{FormatHash, Hex0x},
};
#[allow(dead_code)]
struct HashedValues {
value: Vec<String>,
value_index: usize,
hash: Hash,
}
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
struct Values {
value: Vec<String>,
tree_index: usize,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct StandardMerkleTreeData {
format: String,
tree: Vec<String>,
values: Vec<Values>,
leaf_encoding: Vec<String>,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct StandardMerkleTree<F = Hex0x> {
hash_lookup: HashMap<Hash, usize>,
tree: Vec<Hash>,
values: Vec<Values>,
leaf_encoding: Vec<String>,
format: PhantomData<F>,
}
pub enum LeafType {
Number(usize),
LeafBytes(Vec<String>),
}
pub fn standard_leaf_hash(values: Vec<String>, params: &[String]) -> Result<Hash> {
let tokens = params
.iter()
.enumerate()
.map(|(i, p)| {
let param_type = Reader::read(p)?;
Ok(LenientTokenizer::tokenize(¶m_type, &values[i])?)
})
.collect::<Result<Vec<Token>>>()?;
let hash = keccak256(keccak256(Bytes::from(abi::encode(&tokens))));
Ok(Hash::from(hash))
}
pub fn check_bounds<T>(values: &[T], index: usize) -> Result<()> {
if index >= values.len() {
bail!("Index out of range")
}
Ok(())
}
impl<F> StandardMerkleTree<F>
where
F: FormatHash,
{
fn new(tree: Vec<Hash>, values: Vec<Values>, leaf_encoding: Vec<String>) -> Result<Self> {
let mut hash_lookup = HashMap::new();
for (i, v) in values.iter().enumerate() {
hash_lookup.insert(standard_leaf_hash(v.value.clone(), &leaf_encoding)?, i);
}
Ok(Self {
hash_lookup,
tree,
values,
leaf_encoding,
format: PhantomData,
})
}
pub fn of<V: ToString, E: ToString>(values: &[Vec<V>], leaf_encode: &[E]) -> Result<Self> {
let values: Vec<Vec<String>> = values
.iter()
.map(|v| v.iter().map(|v| v.to_string()).collect())
.collect();
let leaf_encode: Vec<String> = leaf_encode.iter().map(|v| v.to_string()).collect();
let mut hashed_values: Vec<HashedValues> = values
.iter()
.enumerate()
.map(|(i, v)| {
Ok(HashedValues {
value: (*v).to_vec(),
value_index: i,
hash: standard_leaf_hash(v.clone(), &leaf_encode)?,
})
})
.collect::<Result<Vec<_>>>()?;
hashed_values.sort_by(|a, b| a.hash.cmp(&b.hash));
let tree = make_merkle_tree(hashed_values.iter().map(|v| v.hash).collect())?;
let mut indexed_values: Vec<Values> = values
.iter()
.map(|v| Values {
value: (*v).to_vec(),
tree_index: 0,
})
.collect();
hashed_values.iter().enumerate().for_each(|(i, v)| {
indexed_values[v.value_index].tree_index = tree.len() - i - 1;
});
Self::new(tree, indexed_values, leaf_encode)
}
pub fn load(data: StandardMerkleTreeData) -> Result<Self> {
if data.format != "standard-v1" {
bail!("Unknown format");
}
let tree = data
.tree
.iter()
.map(|leaf| Hash::from_slice(&hex::decode(leaf.split_at(2).1).unwrap()))
.collect();
Self::new(tree, data.values, data.leaf_encoding)
}
pub fn dump(&self) -> StandardMerkleTreeData {
StandardMerkleTreeData {
format: "standard-v1".to_owned(),
tree: self
.tree
.iter()
.map(|leaf| format!("0x{}", hex::encode(leaf)))
.collect(),
values: self.values.clone(),
leaf_encoding: self.leaf_encoding.clone(),
}
}
pub fn render(&self) -> Result<String> {
render_merkle_tree(&self.tree)
}
pub fn root(&self) -> F::Out {
F::format(Cow::Borrowed(&self.tree[0]))
}
pub fn validate(&self) -> Result<()> {
for i in 0..self.values.len() {
self.validate_value(i)?;
}
Ok(())
}
pub fn leaf_hash<V: ToString>(&self, leaf: &[V]) -> Result<F::Out> {
let leaf: Vec<String> = leaf.iter().map(|v| v.to_string()).collect();
let h = standard_leaf_hash(leaf, &self.leaf_encoding)?;
Ok(F::format(Cow::Owned(h)))
}
pub fn leaf_lookup<V: ToString>(&self, leaf: &[V]) -> Result<usize> {
let leaf: Vec<String> = leaf.iter().map(|v| v.to_string()).collect();
let leaf_hash = standard_leaf_hash(leaf, &self.leaf_encoding)?;
self.hash_lookup
.get(&leaf_hash)
.cloned()
.ok_or_else(|| anyhow!("Leaf is not in tree"))
}
pub fn get_proof(&self, leaf: LeafType) -> Result<Vec<F::Out>> {
let value_index = match leaf {
LeafType::Number(i) => i,
LeafType::LeafBytes(v) => {
self.leaf_lookup(&v.iter().map(|v| v.as_str()).collect::<Vec<&str>>())?
}
};
self.validate_value(value_index)?;
// rebuild tree index and generate proof
let value = self.values.get(value_index).unwrap();
let proof = get_proof(self.tree.clone(), value.tree_index)?;
// check proof
let hash = self.tree.get(value.tree_index).unwrap();
let implied_root = process_proof(hash, &proof)?;
if !implied_root.eq(self.tree.first().unwrap()) {
bail!("Unable to prove value")
}
Ok(proof
.into_iter()
.map(|p| F::format(Cow::Owned(p)))
.collect())
}
pub fn get_multi_proof(&self, leaves: &[LeafType]) -> Result<MultiProof<Vec<String>, F::Out>> {
let value_indices: Vec<usize> = leaves
.iter()
.map(|leaf| match leaf {
LeafType::Number(i) => Ok(*i),
LeafType::LeafBytes(v) => {
self.leaf_lookup(&v.iter().map(|v| v.as_str()).collect::<Vec<&str>>())
}
})
.collect::<Result<Vec<usize>>>()?;
for i in value_indices.iter() {
self.validate_value(*i)?;
}
// rebuild tree indices and generate proof
let mut indices: Vec<usize> = value_indices
.iter()
.map(|i| self.values.get(*i).unwrap().tree_index)
.collect();
let multi_proof = get_multi_proof(self.tree.clone(), &mut indices)?;
// check proof
let implied_root = process_multi_proof(&multi_proof)?;
if !implied_root.eq(self.tree.first().unwrap()) {
bail!("Unable to prove value")
}
let leaves: Vec<Vec<String>> = multi_proof
.leaves
.iter()
.map(|leaf| {
let index = *self.hash_lookup.get(leaf).unwrap();
self.values.get(index).unwrap().value.clone()
})
.collect();
let proof = multi_proof
.proof
.into_iter()
.map(|p| F::format(Cow::Owned(p)))
.collect();
Ok(MultiProof {
leaves,
proof,
proof_flags: multi_proof.proof_flags,
})
}
fn validate_value(&self, index: usize) -> Result<()> {
check_bounds(&self.values, index)?;
let value = self.values.get(index).unwrap();
check_bounds(&self.tree, value.tree_index)?;
let leaf = standard_leaf_hash(value.value.clone(), &self.leaf_encoding)?;
if !leaf.eq(self.tree.get(value.tree_index).unwrap()) {
bail!("Merkle tree does not contain the expected value")
}
Ok(())
}
}
impl Iterator for StandardMerkleTree {
type Item = Vec<String>;
fn next(&mut self) -> Option<Self::Item> {
if !self.values.is_empty() {
let v = self.values.remove(0);
Some(v.value)
} else {
None
}
}
}
#[cfg(test)]
mod tests {
use crate::format::Raw;
use super::*;
fn characters(s: &str) -> (Vec<Vec<String>>, StandardMerkleTree) {
let l: Vec<Vec<String>> = s.chars().map(|c| vec![c.to_string()]).collect();
let values: Vec<Vec<&str>> = l
.iter()
.map(|v| v.iter().map(|v| v.as_str()).collect())
.collect();
let t = StandardMerkleTree::of(&values, &["string"]).unwrap();
(l, t)
}
#[test]
fn test_standard_leaf_hash() {
let values = vec![
"0x1111111111111111111111111111111111111111".to_string(),
"5000000000000000000".to_string(),
];
let hash =
standard_leaf_hash(values, &["address".to_string(), "uint".to_string()]).unwrap();
let expected_hash: Hash = [
235, 2, 196, 33, 207, 164, 137, 118, 230, 109, 251, 41, 18, 7, 69, 144, 158, 163, 160,
248, 67, 69, 108, 38, 60, 248, 241, 37, 52, 131, 226, 131,
]
.into();
assert_eq!(hash, expected_hash)
}
#[test]
fn test_of() {
let values = vec![
vec![
"0x1111111111111111111111111111111111111111",
"5000000000000000000",
],
vec![
"0x2222222222222222222222222222222222222222",
"2500000000000000000",
],
];
let merkle_tree: StandardMerkleTree =
StandardMerkleTree::of(&values, &["address", "uint256"]).unwrap();
let expected_tree = vec![
"0xd4dee0beab2d53f2cc83e567171bd2820e49898130a22622b10ead383e90bd77",
"0xeb02c421cfa48976e66dfb29120745909ea3a0f843456c263cf8f1253483e283",
"0xb92c48e9d7abe27fd8dfd6b5dfdbfb1c9a463f80c712b66f3a5180a090cccafc",
];
assert_eq!(merkle_tree.dump().tree, expected_tree);
}
#[test]
fn test_validate() {
let (_, t) = characters("abcdef");
t.validate().unwrap();
}
#[test]
fn test_get_proof() {
let (_, t) = characters("abcdef");
for (i, v) in t.clone().enumerate() {
let proof = t.get_proof(LeafType::Number(i)).unwrap();
let proof2 = t.get_proof(LeafType::LeafBytes(v)).unwrap();
assert_eq!(proof, proof2);
}
}
#[test]
fn test_get_multi_proof() {
let (l, t) = characters("abcdef");
let l: Vec<Vec<String>> = l
.iter()
.map(|v| v.iter().map(|v| v.to_string()).collect())
.collect();
let leaves_array = vec![
vec![],
vec![0, 1],
vec![0, 1, 5],
vec![1, 3, 4, 5],
vec![0, 2, 4, 5],
vec![0, 1, 2, 3, 4, 5],
];
leaves_array.iter().for_each(|ids| {
let leaves: Vec<LeafType> = ids.iter().map(|i| LeafType::Number(*i)).collect();
let proof = t.get_multi_proof(&leaves).unwrap();
let leaves: Vec<LeafType> = ids
.iter()
.map(|i| LeafType::LeafBytes(l[*i].clone()))
.collect();
let proof2 = t.get_multi_proof(&leaves).unwrap();
assert_eq!(proof, proof2);
})
}
#[test]
fn test_render() {
let (_, t) = characters("abc");
println!("{:?}", t.tree);
let expected = "0) 0xf2129b5a697531ef818f644564a6552b35c549722385bc52aa7fe46c0b5f46b1
├─ 1) 0xfa914d99a18dc32d9725b3ef1c50426deb40ec8d0885dac8edcc5bfd6d030016
│ ├─ 3) 0x9c15a6a0eaeed500fd9eed4cbeab71f797cefcc67bfd46683e4d2e6ff7f06d1c
│ └─ 4) 0x19ba6c6333e0e9a15bf67523e0676e2f23eb8e574092552d5e888c64a4bb3681
└─ 2) 0x9cf5a63718145ba968a01c1d557020181c5b252f665cf7386d370eddb176517b";
assert_eq!(t.render().unwrap(), expected);
}
#[test]
fn test_dump_load() {
let (_, t) = characters("abcdef");
let t2 = StandardMerkleTree::load(t.dump()).unwrap();
t2.validate().unwrap();
assert_eq!(t, t2);
}
#[test]
fn test_root() {
let (_, t) = characters("abc");
assert_eq!(
t.root(),
"0xf2129b5a697531ef818f644564a6552b35c549722385bc52aa7fe46c0b5f46b1"
)
}
#[test]
fn test_raw_format() {
let (_, t1) = characters("abcdef");
let t2: StandardMerkleTree<Raw> = StandardMerkleTree::load(t1.dump()).unwrap();
let r1 = t1.root();
let r2 = t2.root();
assert_eq!(r2.as_bytes(), hex::decode(r1).unwrap());
}
#[test]
#[should_panic = "Index out of range"]
fn test_out_of_bounds_panic() {
let (_, t) = characters("a");
t.get_proof(LeafType::Number(1)).unwrap();
}
#[test]
#[should_panic = "Unknown format"]
fn test_unrecognized_tree_dump() {
let _t: StandardMerkleTree = StandardMerkleTree::load(StandardMerkleTreeData {
format: "nonstandard".to_string(),
tree: Vec::new(),
values: Vec::new(),
leaf_encoding: Vec::new(),
})
.unwrap();
}
#[test]
#[should_panic = "Merkle tree does not contain the expected value"]
fn test_malformed_tree_dump() {
let zero = format!("0x{}", hex::encode(Bytes::from(vec![0u8; 32])));
let t: StandardMerkleTree = StandardMerkleTree::load(StandardMerkleTreeData {
format: "standard-v1".to_string(),
tree: vec![zero],
values: vec![Values {
value: vec!['0'.to_string()],
tree_index: 0,
}],
leaf_encoding: vec!["uint256".to_string()],
})
.unwrap();
t.get_proof(LeafType::Number(0)).unwrap();
}
#[test]
#[should_panic = "Unable to prove value"]
fn test_malformed_tree_dump2() {
let zero_bytes = Bytes::from(vec![0u8; 32]);
let zero = format!("0x{}", hex::encode(zero_bytes.clone()));
let keccak_zero = format!("0x{}", hex::encode(keccak256(keccak256(zero_bytes))));
let t: StandardMerkleTree = StandardMerkleTree::load(StandardMerkleTreeData {
format: "standard-v1".to_string(),
tree: vec![zero.clone(), zero, keccak_zero],
values: vec![Values {
value: vec!['0'.to_string()],
tree_index: 2,
}],
leaf_encoding: vec!["uint256".to_string()],
})
.unwrap();
t.get_proof(LeafType::Number(0)).unwrap();
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ext/merkle-tree-rs/src/core.rs | ext/merkle-tree-rs/src/core.rs | // Copyright 2022-2024 Ikechukwu Ahiara Marvellous (@literallymarvellous)
// SPDX-License-Identifier: MIT
//
// Forked from https://github.com/literallymarvellous/merkle-tree-rs with assumed MIT license
// as per Cargo.toml: https://github.com/literallymarvellous/merkle-tree-rs/blob/d4abd1ca716e65d05e577e2f22b69947bef5b843/Cargo.toml#L5
//
// License headers added post-fork.
use anyhow::{anyhow, bail, Result};
use ethers::utils::{hex, keccak256};
use std::{collections::HashMap, result::Result::Ok};
pub type Hash = ethers::types::H256;
#[derive(PartialEq, Debug)]
pub struct MultiProof<T, U> {
pub(crate) leaves: Vec<T>,
pub(crate) proof: Vec<U>,
pub(crate) proof_flags: Vec<bool>,
}
pub fn hash_pair(a: &Hash, b: &Hash) -> Hash {
let mut s = [a.as_ref(), b.as_ref()];
s.sort();
let bytes = s.concat();
Hash::from(keccak256(bytes))
}
pub fn left_child_index(i: usize) -> usize {
2 * i + 1
}
pub fn right_child_index(i: usize) -> usize {
2 * i + 2
}
pub fn parent_index(i: usize) -> Result<usize> {
if i > 0 {
Ok((i - 1) / 2)
} else {
Err(anyhow!("Root has no parent"))
}
}
pub fn sibling_index(i: i32) -> Result<usize> {
if i > 0 {
let r = i - (-1i32).pow((i % 2).try_into().unwrap());
Ok(r as usize)
} else {
Err(anyhow!("Root has no sibling"))
}
}
pub fn is_tree_node(tree: &[Hash], i: usize) -> bool {
i < tree.len()
}
pub fn is_internal_node(tree: &[Hash], i: usize) -> bool {
is_tree_node(tree, left_child_index(i))
}
pub fn is_leaf_node(tree: &[Hash], i: usize) -> bool {
is_tree_node(tree, i) && !is_internal_node(tree, i)
}
pub fn check_tree_node(tree: &[Hash], i: usize) -> Result<()> {
if is_tree_node(tree, i) {
Ok(())
} else {
Err(anyhow!("Index is not in tree"))
}
}
pub fn check_internal_node(tree: &[Hash], i: usize) -> Result<()> {
if is_internal_node(tree, i) {
Ok(())
} else {
Err(anyhow!("Index is not in tree"))
}
}
pub fn check_leaf_node(tree: &[Hash], i: usize) -> Result<()> {
if !is_leaf_node(tree, i) {
bail!("Index is not in tree");
}
Ok(())
}
pub fn make_merkle_tree(leaves: Vec<Hash>) -> Result<Vec<Hash>> {
if leaves.is_empty() {
bail!("Expected non-zero number of leaves")
};
let tree_length = 2 * leaves.len() - 1;
let mut tree: Vec<Hash> = vec![Hash::zero(); tree_length];
leaves
.iter()
.enumerate()
.for_each(|(i, v)| tree[tree_length - 1 - i] = v.clone());
for i in (0..tree_length - leaves.len()).rev() {
let left_child = &tree[left_child_index(i)];
let right_child = &tree[right_child_index(i)];
tree[i] = hash_pair(left_child, right_child);
}
Ok(tree)
}
pub fn get_proof(tree: Vec<Hash>, mut i: usize) -> Result<Vec<Hash>> {
check_leaf_node(&tree, i)?;
let mut proof = Vec::new();
while i > 0 {
let sibling_i = sibling_index(i.try_into()?)?;
proof.push(tree[sibling_i].clone());
i = parent_index(i)?;
}
Ok(proof)
}
pub fn process_proof(leaf: &Hash, proof: &[Hash]) -> Result<Hash> {
Ok(proof.iter().fold(leaf.clone(), |a, b| hash_pair(&a, b)))
}
pub fn get_multi_proof(tree: Vec<Hash>, indices: &mut [usize]) -> Result<MultiProof<Hash, Hash>> {
for i in indices.iter() {
check_leaf_node(&tree, *i)?;
}
indices.sort_by(|a, b| b.cmp(a));
if indices
.iter()
.skip(1)
.enumerate()
.any(|(i, v)| *v == indices[i])
{
bail!("Cannot prove duplicated index")
}
let mut stack = indices[..].to_vec();
let mut proof: Vec<Hash> = Vec::new();
let mut proof_flags: Vec<bool> = Vec::new();
while !stack.is_empty() && stack[0] > 0 {
let j = stack.remove(0);
let s = sibling_index(j.try_into()?)?;
let p = parent_index(j)?;
if !stack.is_empty() && s == stack[0] {
proof_flags.push(true);
stack.remove(0);
} else {
proof_flags.push(false);
proof.push(tree[s].clone());
}
stack.push(p);
}
if indices.is_empty() {
proof.push(tree[0].clone());
}
Ok(MultiProof {
leaves: indices.iter().map(|i| tree[*i].clone()).collect(),
proof,
proof_flags,
})
}
pub fn process_multi_proof(multi_proof: &MultiProof<Hash, Hash>) -> Result<Hash> {
if multi_proof.proof.len() < multi_proof.proof_flags.iter().filter(|&&b| !b).count() {
bail!("Invalid multiproof format")
}
if multi_proof.leaves.len() + multi_proof.proof.len() != multi_proof.proof_flags.len() + 1 {
bail!("Provide leaves and multi_proof are not compatible")
}
let mut stack = multi_proof.leaves[..].to_vec();
let mut proof = multi_proof.proof[..].to_vec();
for flag in &multi_proof.proof_flags {
let a = stack.remove(0);
let b = if *flag {
stack.remove(0)
} else {
proof.remove(0)
};
stack.push(hash_pair(&a, &b))
}
if let Some(b) = stack.pop() {
return Ok(b);
}
Ok(proof.remove(0))
}
pub fn is_valid_merkle_tree(tree: Vec<Hash>) -> bool {
for (i, node) in tree.iter().enumerate() {
let l = left_child_index(i);
let r = right_child_index(i);
if r >= tree.len() {
if l < tree.len() {
return false;
}
} else if !node.eq(&hash_pair(&tree[l], &tree[r])) {
return false;
}
}
!tree.is_empty()
}
pub fn render_merkle_tree(tree: &[Hash]) -> Result<String> {
if tree.is_empty() {
bail!("Expected non-zero number of nodes");
}
let mut stack = vec![0];
let mut lines: Vec<String> = Vec::new();
let mut parent_graph = HashMap::new();
let mut path: Vec<Vec<usize>> = Vec::new();
while !stack.is_empty() {
let index = stack.pop().unwrap();
let current_path = path.pop().unwrap_or_default();
match current_path.len() {
0 => {
let s1 = index.to_string()
+ ") "
+ &format!("0x{}", hex::encode(tree.get(index).unwrap()));
lines.push(s1);
}
_ => {
let s1 = ¤t_path[..current_path.len() - 1]
.iter()
.map(|p| vec![" ", "│ "][*p])
.collect::<Vec<&str>>()
.join("");
let s2 = ¤t_path[current_path.len() - 1..]
.iter()
.map(|p| vec!["└─ ", "├─ "][*p])
.collect::<Vec<&str>>()
.join("");
let s3 = index.to_string()
+ ") "
+ &format!("0x{}", hex::encode(tree.get(index).unwrap()));
lines.push(s1.to_owned() + s2 + &s3);
}
}
if right_child_index(index) < tree.len() {
parent_graph.insert(index, [left_child_index(index), right_child_index(index)]);
stack.push(right_child_index(index));
path.push([current_path.clone(), vec![0]].concat());
stack.push(left_child_index(index));
path.push([current_path, vec![1]].concat());
}
}
Ok(lines.join("\n"))
}
#[cfg(test)]
mod tests {
use super::*;
fn make_tree() -> Vec<Hash> {
let tree = vec![
Hash::from([
115, 209, 118, 200, 5, 4, 69, 77, 194, 99, 240, 121, 27, 47, 159, 212, 239, 185,
42, 0, 241, 72, 77, 142, 45, 32, 88, 158, 8, 61, 44, 11,
]),
Hash::from([
206, 8, 250, 120, 108, 113, 57, 176, 105, 92, 78, 166, 155, 96, 168, 176, 157, 57,
37, 199, 165, 0, 152, 41, 72, 109, 244, 215, 70, 159, 202, 146,
]),
Hash::from([
230, 18, 175, 174, 238, 192, 61, 110, 232, 8, 30, 90, 33, 224, 209, 91, 37, 85,
171, 114, 56, 219, 231, 210, 62, 217, 230, 42, 18, 28, 139, 203,
]),
Hash::from([
233, 80, 165, 147, 77, 183, 162, 199, 17, 207, 58, 7, 225, 101, 161, 93, 18, 143,
70, 211, 166, 76, 208, 229, 24, 100, 67, 52, 237, 111, 198, 96,
]),
Hash::from([
15, 164, 23, 177, 133, 189, 185, 36, 130, 179, 11, 37, 19, 14, 240, 222, 25, 13,
39, 28, 169, 28, 138, 102, 28, 45, 64, 166, 30, 143, 108, 92,
]),
Hash::from([
233, 88, 165, 147, 77, 183, 162, 199, 170, 207, 58, 67, 225, 101, 161, 93, 18, 143,
7, 211, 166, 76, 248, 229, 224, 113, 67, 52, 237, 131, 198, 96,
]),
Hash::from([
157, 164, 23, 177, 133, 189, 185, 36, 130, 79, 11, 7, 190, 14, 240, 222, 55, 123,
39, 238, 169, 228, 138, 102, 8, 45, 64, 166, 3, 143, 48, 92,
]),
];
tree
}
#[test]
fn test_hash_pair() {
let a = Hash::from([
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
25, 26, 27, 28, 29, 30, 31, 32,
]);
let b = Hash::from([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31,
]);
let h = hash_pair(&a, &b);
let e = keccak256([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
]);
assert_eq!(h.as_bytes(), &e);
}
#[test]
fn test_make_merkle_tree() {
let byte = Hash::from([
157, 164, 23, 177, 133, 189, 185, 36, 130, 79, 11, 7, 190, 14, 240, 222, 55, 123, 39,
238, 169, 228, 138, 102, 8, 45, 64, 166, 3, 143, 48, 92,
]);
let byte2 = Hash::from([
233, 88, 165, 147, 77, 183, 162, 199, 170, 207, 58, 67, 225, 101, 161, 93, 18, 143, 7,
211, 166, 76, 248, 229, 224, 113, 67, 52, 237, 131, 198, 96,
]);
let byte3 = Hash::from([
15, 164, 23, 177, 133, 189, 185, 36, 130, 179, 11, 37, 19, 14, 240, 222, 25, 13, 39,
28, 169, 28, 138, 102, 28, 45, 64, 166, 30, 143, 108, 92,
]);
let byte4 = Hash::from([
233, 80, 165, 147, 77, 183, 162, 199, 17, 207, 58, 7, 225, 101, 161, 93, 18, 143, 70,
211, 166, 76, 208, 229, 24, 100, 67, 52, 237, 111, 198, 96,
]);
let leaves = vec![byte, byte2, byte3, byte4];
let tree = make_merkle_tree(leaves).unwrap();
let expected_tree = make_tree();
assert_eq!(tree, expected_tree);
}
#[test]
fn test_get_proof() {
let expected_tree = make_tree();
let proof = get_proof(expected_tree, 6).unwrap();
let expected_proof = vec![
Hash::from([
233, 88, 165, 147, 77, 183, 162, 199, 170, 207, 58, 67, 225, 101, 161, 93, 18, 143,
7, 211, 166, 76, 248, 229, 224, 113, 67, 52, 237, 131, 198, 96,
]),
Hash::from([
206, 8, 250, 120, 108, 113, 57, 176, 105, 92, 78, 166, 155, 96, 168, 176, 157, 57,
37, 199, 165, 0, 152, 41, 72, 109, 244, 215, 70, 159, 202, 146,
]),
];
assert_eq!(proof, expected_proof);
}
#[test]
fn test_process_proof() {
let leaf = Hash::from([
157, 164, 23, 177, 133, 189, 185, 36, 130, 79, 11, 7, 190, 14, 240, 222, 55, 123, 39,
238, 169, 228, 138, 102, 8, 45, 64, 166, 3, 143, 48, 92,
]);
let proof = vec![
Hash::from([
233, 88, 165, 147, 77, 183, 162, 199, 170, 207, 58, 67, 225, 101, 161, 93, 18, 143,
7, 211, 166, 76, 248, 229, 224, 113, 67, 52, 237, 131, 198, 96,
]),
Hash::from([
206, 8, 250, 120, 108, 113, 57, 176, 105, 92, 78, 166, 155, 96, 168, 176, 157, 57,
37, 199, 165, 0, 152, 41, 72, 109, 244, 215, 70, 159, 202, 146,
]),
];
let expected_root = Hash::from([
115, 209, 118, 200, 5, 4, 69, 77, 194, 99, 240, 121, 27, 47, 159, 212, 239, 185, 42, 0,
241, 72, 77, 142, 45, 32, 88, 158, 8, 61, 44, 11,
]);
let root = process_proof(&leaf, &proof).unwrap();
assert_eq!(root, expected_root)
}
#[test]
fn test_get_multi_proof() {
let tree = make_tree();
let multi_proof = get_multi_proof(tree, &mut [4, 6]).unwrap();
let expected_multi_proof = MultiProof {
leaves: [
Hash::from([
157, 164, 23, 177, 133, 189, 185, 36, 130, 79, 11, 7, 190, 14, 240, 222, 55,
123, 39, 238, 169, 228, 138, 102, 8, 45, 64, 166, 3, 143, 48, 92,
]),
Hash::from([
15, 164, 23, 177, 133, 189, 185, 36, 130, 179, 11, 37, 19, 14, 240, 222, 25,
13, 39, 28, 169, 28, 138, 102, 28, 45, 64, 166, 30, 143, 108, 92,
]),
]
.to_vec(),
proof: [
Hash::from([
233, 88, 165, 147, 77, 183, 162, 199, 170, 207, 58, 67, 225, 101, 161, 93, 18,
143, 7, 211, 166, 76, 248, 229, 224, 113, 67, 52, 237, 131, 198, 96,
]),
Hash::from([
233, 80, 165, 147, 77, 183, 162, 199, 17, 207, 58, 7, 225, 101, 161, 93, 18,
143, 70, 211, 166, 76, 208, 229, 24, 100, 67, 52, 237, 111, 198, 96,
]),
]
.to_vec(),
proof_flags: [false, false, true].into(),
};
assert_eq!(multi_proof, expected_multi_proof);
}
#[test]
fn test_process_multi_proof() {
let multi_proof = MultiProof {
leaves: [
Hash::from([
157, 164, 23, 177, 133, 189, 185, 36, 130, 79, 11, 7, 190, 14, 240, 222, 55,
123, 39, 238, 169, 228, 138, 102, 8, 45, 64, 166, 3, 143, 48, 92,
]),
Hash::from([
15, 164, 23, 177, 133, 189, 185, 36, 130, 179, 11, 37, 19, 14, 240, 222, 25,
13, 39, 28, 169, 28, 138, 102, 28, 45, 64, 166, 30, 143, 108, 92,
]),
]
.to_vec(),
proof: [
Hash::from([
233, 88, 165, 147, 77, 183, 162, 199, 170, 207, 58, 67, 225, 101, 161, 93, 18,
143, 7, 211, 166, 76, 248, 229, 224, 113, 67, 52, 237, 131, 198, 96,
]),
Hash::from([
233, 80, 165, 147, 77, 183, 162, 199, 17, 207, 58, 7, 225, 101, 161, 93, 18,
143, 70, 211, 166, 76, 208, 229, 24, 100, 67, 52, 237, 111, 198, 96,
]),
]
.to_vec(),
proof_flags: [false, false, true].into(),
};
let root = process_multi_proof(&multi_proof).unwrap();
let expected_root = Hash::from([
115, 209, 118, 200, 5, 4, 69, 77, 194, 99, 240, 121, 27, 47, 159, 212, 239, 185, 42, 0,
241, 72, 77, 142, 45, 32, 88, 158, 8, 61, 44, 11,
]);
assert_eq!(root, expected_root);
}
#[test]
fn test_is_valid_merkle_tree() {
let tree = make_tree();
assert!(is_valid_merkle_tree(tree));
}
#[test]
fn test_render_merkle_tree() {
let tree = make_tree();
let render = render_merkle_tree(&tree).unwrap();
println!("tree: \n {}", render);
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ext/merkle-tree-rs/src/format.rs | ext/merkle-tree-rs/src/format.rs | // Copyright 2022-2024 Ikechukwu Ahiara Marvellous (@literallymarvellous)
// SPDX-License-Identifier: MIT
//
// Forked from https://github.com/literallymarvellous/merkle-tree-rs with assumed MIT license
// as per Cargo.toml: https://github.com/literallymarvellous/merkle-tree-rs/blob/d4abd1ca716e65d05e577e2f22b69947bef5b843/Cargo.toml#L5
//
// License headers added post-fork.
use std::borrow::Cow;
use crate::core::Hash;
pub trait FormatHash {
type Out;
fn format(hash: Cow<Hash>) -> Self::Out;
}
/// Format hashes as 0x prefixed hexadecimal string.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Hex0x;
impl FormatHash for Hex0x {
type Out = String;
fn format(hash: Cow<Hash>) -> Self::Out {
format!("0x{}", ethers::utils::hex::encode(hash.as_ref()))
}
}
/// Return hashes as bytes.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Raw;
impl FormatHash for Raw {
type Out = Hash;
fn format(hash: Cow<Hash>) -> Self::Out {
hash.into_owned()
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/contracts/binding/build.rs | contracts/binding/build.rs | // Copyright 2021-2023 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::io::Write;
use std::path::{Path, PathBuf};
/// Generate Rust bindings from the IPC Solidity Actors ABI artifacts.
///
/// These are built by `make ipc-actors-abi`, here we just add the final step
/// so we have better code completion with Rust Analyzer.
fn main() {
// Run with `cargo build -vv` to see output from any `eprintln!` or `println!`.
// Maybe we want to skip the build and use the files as-is, could be imported as crate.
// Enabled by default so that in the monorepo we don't have to worry about stale code.
if std::env::var("BUILD_BINDINGS").unwrap_or("1".to_string()) == "0" {
return;
}
// Where are the Solidity artifacts.
let output_dir = std::env::var("OUTPUT").unwrap_or("out".to_string());
let ipc_actors_dir = workspace_dir()
.join("contracts")
.to_string_lossy()
.into_owned();
let lib_path = format!("{ipc_actors_dir}/binding/src/lib.rs");
let mut lib = std::fs::File::create(&lib_path)
.unwrap_or_else(|e| panic!("failed to create {lib_path}: {e}"));
writeln!(lib, "// DO NOT EDIT! This file was generated by build.rs").unwrap();
writeln!(lib, "#[macro_use]\nmod convert;").unwrap();
// The list of actors we need bindings for, based on how the ipc-actor uses `abigen!`.
// With the diamond pattern, there is a contract that holds state, and there are these facets which have the code,
// so we need bindings for the facets, but well (I think) use the same address with all of them.
for contract_name in [
"IDiamond",
"DiamondLoupeFacet",
"DiamondCutFacet",
"OwnershipFacet",
"GatewayDiamond",
"GatewayManagerFacet",
"GatewayGetterFacet",
"CheckpointingFacet",
"TopDownFinalityFacet",
"XnetMessagingFacet",
"GatewayMessengerFacet",
"SubnetActorCheckpointingFacet",
"SubnetActorDiamond",
"SubnetActorGetterFacet",
"SubnetActorManagerFacet",
"SubnetActorPauseFacet",
"SubnetActorRewardFacet",
"SubnetRegistryDiamond",
"RegisterSubnetFacet",
"SubnetGetterFacet",
"LibStaking",
"LibStakingChangeLog",
"LibGateway",
"LibQuorum",
] {
let module_name = camel_to_snake(contract_name);
let input_path =
format!("{ipc_actors_dir}/{output_dir}/{contract_name}.sol/{contract_name}.json");
let output_path = format!("{ipc_actors_dir}/binding/src/{}.rs", module_name);
ethers::prelude::Abigen::new(contract_name, &input_path)
.expect("failed to create Abigen")
.generate()
.expect("failed to generate Rust bindings")
.write_to_file(output_path)
.expect("failed to write Rust code");
writeln!(lib, "#[allow(clippy::all)]\npub mod {module_name};").unwrap();
println!("cargo:rerun-if-changed={input_path}");
}
writeln!(
lib,
"\n// The list of contracts need to convert FvmAddress to fvm_shared::Address"
)
.unwrap();
let fvm_address_conversion = vec![
"GatewayManagerFacet",
"GatewayGetterFacet",
"XnetMessagingFacet",
"GatewayMessengerFacet",
"SubnetActorCheckpointingFacet",
"SubnetActorGetterFacet",
"LibGateway",
];
let modules = fvm_address_conversion.into_iter().map(camel_to_snake);
for module in modules {
writeln!(lib, "fvm_address_conversion!({module});").unwrap();
}
writeln!(
lib,
"\n// The list of contracts that need to convert common types between each other"
)
.unwrap();
let common_type_conversion = vec![
("SubnetActorGetterFacet", "CheckpointingFacet"),
("SubnetActorGetterFacet", "XnetMessagingFacet"),
];
for (contract1, contract2) in common_type_conversion {
writeln!(
lib,
"common_type_conversion!({}, {});",
camel_to_snake(contract1),
camel_to_snake(contract2)
)
.unwrap();
}
println!("cargo:rerun-if-changed=build.rs");
// Run rustfmt on binding/src/lib.rs to make sure we don't accidentally format it in our IDEs
//
// sync the binding/src/lib.rs file to disk
lib.sync_all().unwrap();
// then run rustfmt on the file (it should be available as its specifed in our toolchain
let mut proc = std::process::Command::new("rustfmt")
.arg(lib_path)
.spawn()
.expect("rustfmt failed to start");
let ecode = proc.wait().expect("rustfmt failed to run");
assert!(ecode.success());
}
/// Convert ContractName to contract_name so we can use it as a Rust module.
///
/// We could just lowercase, but this is what `Abigen` does as well, and it's more readable with complex names.
fn camel_to_snake(name: &str) -> String {
let mut out = String::new();
for (i, c) in name.chars().enumerate() {
match (i, c) {
(0, c) if c.is_uppercase() => {
out.push(c.to_ascii_lowercase());
}
(_, c) if c.is_uppercase() => {
out.push('_');
out.push(c.to_ascii_lowercase());
}
(_, c) => {
out.push(c);
}
}
}
out
}
// Find the root of the workspace, not this crate, which is what `env!("CARGO_MANIFEST_DIR")` would return
fn workspace_dir() -> PathBuf {
let output = std::process::Command::new(env!("CARGO"))
.arg("locate-project")
.arg("--workspace")
.arg("--message-format=plain")
.output()
.unwrap()
.stdout;
let cargo_path = Path::new(std::str::from_utf8(&output).unwrap().trim());
cargo_path.parent().unwrap().to_path_buf()
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/contracts/binding/src/lib.rs | contracts/binding/src/lib.rs | // DO NOT EDIT! This file was generated by build.rs
#[macro_use]
mod convert;
#[allow(clippy::all)]
pub mod checkpointing_facet;
#[allow(clippy::all)]
pub mod diamond_cut_facet;
#[allow(clippy::all)]
pub mod diamond_loupe_facet;
#[allow(clippy::all)]
pub mod gateway_diamond;
#[allow(clippy::all)]
pub mod gateway_getter_facet;
#[allow(clippy::all)]
pub mod gateway_manager_facet;
#[allow(clippy::all)]
pub mod gateway_messenger_facet;
#[allow(clippy::all)]
pub mod i_diamond;
#[allow(clippy::all)]
pub mod lib_gateway;
#[allow(clippy::all)]
pub mod lib_quorum;
#[allow(clippy::all)]
pub mod lib_staking;
#[allow(clippy::all)]
pub mod lib_staking_change_log;
#[allow(clippy::all)]
pub mod ownership_facet;
#[allow(clippy::all)]
pub mod register_subnet_facet;
#[allow(clippy::all)]
pub mod subnet_actor_checkpointing_facet;
#[allow(clippy::all)]
pub mod subnet_actor_diamond;
#[allow(clippy::all)]
pub mod subnet_actor_getter_facet;
#[allow(clippy::all)]
pub mod subnet_actor_manager_facet;
#[allow(clippy::all)]
pub mod subnet_actor_pause_facet;
#[allow(clippy::all)]
pub mod subnet_actor_reward_facet;
#[allow(clippy::all)]
pub mod subnet_getter_facet;
#[allow(clippy::all)]
pub mod subnet_registry_diamond;
#[allow(clippy::all)]
pub mod top_down_finality_facet;
#[allow(clippy::all)]
pub mod xnet_messaging_facet;
// The list of contracts need to convert FvmAddress to fvm_shared::Address
fvm_address_conversion!(gateway_manager_facet);
fvm_address_conversion!(gateway_getter_facet);
fvm_address_conversion!(xnet_messaging_facet);
fvm_address_conversion!(gateway_messenger_facet);
fvm_address_conversion!(subnet_actor_checkpointing_facet);
fvm_address_conversion!(subnet_actor_getter_facet);
fvm_address_conversion!(lib_gateway);
// The list of contracts that need to convert common types between each other
common_type_conversion!(subnet_actor_getter_facet, checkpointing_facet);
common_type_conversion!(subnet_actor_getter_facet, xnet_messaging_facet);
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/contracts/binding/src/convert.rs | contracts/binding/src/convert.rs | /// The type conversion for fvm address to evm solidity contracts. We need this convenient macro because
/// the abigen is creating the same struct but under different modules. This save a lot of
/// code.
macro_rules! fvm_address_conversion {
($module:ident) => {
impl TryFrom<fvm_shared::address::Address> for $module::FvmAddress {
type Error = anyhow::Error;
fn try_from(value: fvm_shared::address::Address) -> Result<Self, Self::Error> {
Ok($module::FvmAddress {
addr_type: value.protocol() as u8,
payload: $crate::convert::addr_payload_to_bytes(value.into_payload())?,
})
}
}
impl TryFrom<$module::FvmAddress> for fvm_shared::address::Address {
type Error = anyhow::Error;
fn try_from(value: $module::FvmAddress) -> Result<Self, Self::Error> {
let protocol = value.addr_type;
let addr = $crate::convert::bytes_to_fvm_addr(protocol, &value.payload)?;
Ok(addr)
}
}
};
}
/// There are common types between the different facets, such as SubnetID. A util macro that handles the common
/// type conversions
macro_rules! common_type_conversion {
($module1:ident, $module2:ident) => {
impl From<$module1::SubnetID> for $module2::SubnetID {
fn from(value: $module1::SubnetID) -> Self {
$module2::SubnetID {
root: value.root,
route: value.route,
}
}
}
impl From<$module2::SubnetID> for $module1::SubnetID {
fn from(value: $module2::SubnetID) -> Self {
$module1::SubnetID {
root: value.root,
route: value.route,
}
}
}
};
}
/// Converts a Rust type FVM address into its underlying payload
/// so it can be represented internally in a Solidity contract.
pub(crate) fn addr_payload_to_bytes(
payload: fvm_shared::address::Payload,
) -> anyhow::Result<ethers::types::Bytes> {
match payload {
fvm_shared::address::Payload::Secp256k1(v) => Ok(ethers::types::Bytes::from(v)),
fvm_shared::address::Payload::Delegated(d) => {
let addr = d.subaddress();
let b = ethers::abi::encode(&[ethers::abi::Token::Tuple(vec![
ethers::abi::Token::Uint(ethers::types::U256::from(d.namespace())),
ethers::abi::Token::Uint(ethers::types::U256::from(addr.len())),
ethers::abi::Token::Bytes(addr.to_vec()),
])]);
Ok(ethers::types::Bytes::from(b))
}
_ => Err(anyhow::anyhow!("Invalid payload type")),
}
}
/// It takes the bytes from an FVMAddress represented in Solidity and
/// converts it into the corresponding FVM address Rust type.
pub(crate) fn bytes_to_fvm_addr(
protocol: u8,
bytes: &[u8],
) -> anyhow::Result<fvm_shared::address::Address> {
let addr = match protocol {
1 => {
let merged = [[1u8].as_slice(), bytes].concat();
fvm_shared::address::Address::from_bytes(&merged)?
}
4 => {
let mut data = ethers::abi::decode(
&[ethers::abi::ParamType::Tuple(vec![
ethers::abi::ParamType::Uint(32),
ethers::abi::ParamType::Uint(32),
ethers::abi::ParamType::Bytes,
])],
bytes,
)?;
let mut data = data
.pop()
.ok_or_else(|| anyhow::anyhow!("invalid tuple data length"))?
.into_tuple()
.ok_or_else(|| anyhow::anyhow!("not tuple"))?;
let raw_bytes = data
.pop()
.ok_or_else(|| anyhow::anyhow!("invalid length, should be 3"))?
.into_bytes()
.ok_or_else(|| anyhow::anyhow!("invalid bytes"))?;
let len = data
.pop()
.ok_or_else(|| anyhow::anyhow!("invalid length, should be 3"))?
.into_uint()
.ok_or_else(|| anyhow::anyhow!("invalid uint"))?
.as_u128();
let namespace = data
.pop()
.ok_or_else(|| anyhow::anyhow!("invalid length, should be 3"))?
.into_uint()
.ok_or_else(|| anyhow::anyhow!("invalid uint"))?
.as_u64();
if len as usize != raw_bytes.len() {
return Err(anyhow::anyhow!("bytes len not match"));
}
fvm_shared::address::Address::new_delegated(namespace, &raw_bytes)?
}
_ => return Err(anyhow::anyhow!("address not support now")),
};
Ok(addr)
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipld/resolver/src/provider_cache.rs | ipld/resolver/src/provider_cache.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
use std::collections::{HashMap, HashSet};
use ipc_api::subnet_id::SubnetID;
use libp2p::PeerId;
use crate::{provider_record::ProviderRecord, Timestamp};
/// Change in the supported subnets of a peer.
#[derive(Debug)]
pub struct ProviderDelta {
pub is_new: bool,
pub added: Vec<SubnetID>,
pub removed: Vec<SubnetID>,
}
impl ProviderDelta {
pub fn is_empty(&self) -> bool {
self.added.is_empty() && self.removed.is_empty()
}
}
/// Track which subnets are provided for by which set of peers.
pub struct SubnetProviderCache {
/// Maximum number of subnets to track, to protect against DoS attacks, trying to
/// flood someone with subnets that don't actually exist. When the number of subnets
/// reaches this value, we remove the subnet with the smallest number of providers;
/// hopefully this would be a subnet
max_subnets: usize,
/// User defined list of subnets which will never be pruned. This can be used to
/// ward off attacks that would prevent us from adding subnets we know we want to
/// support, and not rely on dynamic discovery of their peers.
pinned_subnets: HashSet<SubnetID>,
/// Set of peers with known addresses. Only such peers can be added to the cache.
routable_peers: HashSet<PeerId>,
/// List of peer IDs supporting each subnet.
subnet_providers: HashMap<SubnetID, HashSet<PeerId>>,
/// Timestamp of the last record received about a peer.
peer_timestamps: HashMap<PeerId, Timestamp>,
}
impl SubnetProviderCache {
pub fn new(max_subnets: usize, static_subnets: Vec<SubnetID>) -> Self {
Self {
pinned_subnets: HashSet::from_iter(static_subnets),
max_subnets,
routable_peers: Default::default(),
subnet_providers: Default::default(),
peer_timestamps: Default::default(),
}
}
/// Pin a subnet, after which it won't be pruned.
pub fn pin_subnet(&mut self, subnet_id: SubnetID) {
self.pinned_subnets.insert(subnet_id);
}
/// Unpin a subnet, which allows it to be pruned.
pub fn unpin_subnet(&mut self, subnet_id: &SubnetID) {
self.pinned_subnets.remove(subnet_id);
}
/// Mark a peer as routable.
///
/// Once routable, the cache will keep track of provided subnets.
pub fn set_routable(&mut self, peer_id: PeerId) {
self.routable_peers.insert(peer_id);
}
/// Mark a previously routable peer as unroutable.
///
/// Once unroutable, the cache will stop tracking the provided subnets.
pub fn set_unroutable(&mut self, peer_id: PeerId) {
self.routable_peers.remove(&peer_id);
self.peer_timestamps.remove(&peer_id);
for providers in self.subnet_providers.values_mut() {
providers.remove(&peer_id);
}
}
/// Number of routable peers.
pub fn num_routable(&mut self) -> usize {
self.routable_peers.len()
}
/// Check if a peer has been marked as routable.
pub fn is_routable(&self, peer_id: &PeerId) -> bool {
self.routable_peers.contains(peer_id)
}
/// Check whether we have received recent updates from a peer.
pub fn has_timestamp(&self, peer_id: &PeerId) -> bool {
self.peer_timestamps.contains_key(peer_id)
}
/// Try to add a provider to the cache.
///
/// Returns `None` if the peer is not routable and nothing could be added.
///
/// Returns `Some` if the peer is routable, containing the newly added
/// and newly removed associations for this peer.
pub fn add_provider(&mut self, record: &ProviderRecord) -> Option<ProviderDelta> {
if !self.is_routable(&record.peer_id) {
return None;
}
let mut delta = ProviderDelta {
is_new: !self.has_timestamp(&record.peer_id),
added: Vec::new(),
removed: Vec::new(),
};
let timestamp = self.peer_timestamps.entry(record.peer_id).or_default();
if *timestamp < record.timestamp {
*timestamp = record.timestamp;
// The currently supported subnets of the peer.
let mut subnet_ids = HashSet::new();
subnet_ids.extend(record.subnet_ids.iter());
// Remove the peer from subnets it no longer supports.
for (subnet_id, peer_ids) in self.subnet_providers.iter_mut() {
if !subnet_ids.contains(subnet_id) && peer_ids.remove(&record.peer_id) {
delta.removed.push(subnet_id.clone());
}
}
// Add peer to new subnets it supports now.
for subnet_id in record.subnet_ids.iter() {
let peer_ids = self.subnet_providers.entry(subnet_id.clone()).or_default();
if peer_ids.insert(record.peer_id) {
delta.added.push(subnet_id.clone());
}
}
// Remove subnets that have been added but are too small to survive a pruning.
let removed_subnet_ids = self.prune_subnets();
delta.added.retain(|id| !removed_subnet_ids.contains(id))
}
Some(delta)
}
/// Ensure we don't have more than `max_subnets` number of subnets in the cache.
///
/// Returns the removed subnet IDs.
fn prune_subnets(&mut self) -> HashSet<SubnetID> {
let mut removed_subnet_ids = HashSet::new();
let to_prune = self.subnet_providers.len().saturating_sub(self.max_subnets);
if to_prune > 0 {
let mut counts = self
.subnet_providers
.iter()
.map(|(id, ps)| (id.clone(), ps.len()))
.collect::<Vec<_>>();
counts.sort_by_key(|(_, count)| *count);
for (subnet_id, _) in counts {
if self.pinned_subnets.contains(&subnet_id) {
continue;
}
self.subnet_providers.remove(&subnet_id);
removed_subnet_ids.insert(subnet_id);
if removed_subnet_ids.len() == to_prune {
break;
}
}
}
removed_subnet_ids
}
/// Prune any provider which hasn't provided an update since a cutoff timestamp.
///
/// Returns the list of pruned peers.
pub fn prune_providers(&mut self, cutoff_timestamp: Timestamp) -> Vec<PeerId> {
let to_prune = self
.peer_timestamps
.iter()
.filter_map(|(id, ts)| {
if *ts < cutoff_timestamp {
Some(*id)
} else {
None
}
})
.collect::<Vec<_>>();
for peer_id in to_prune.iter() {
self.set_unroutable(*peer_id);
}
to_prune
}
/// List any known providers of a subnet.
pub fn providers_of_subnet(&self, subnet_id: &SubnetID) -> Vec<PeerId> {
self.subnet_providers
.get(subnet_id)
.map(|hs| hs.iter().cloned().collect())
.unwrap_or_default()
}
}
#[cfg(test)]
mod tests {
use std::collections::{HashMap, HashSet};
use ipc_api::subnet_id::SubnetID;
use libp2p::{identity::Keypair, PeerId};
use quickcheck::Arbitrary;
use quickcheck_macros::quickcheck;
use crate::{arb::ArbSubnetID, provider_record::ProviderRecord, Timestamp};
use super::SubnetProviderCache;
#[derive(Debug, Clone)]
struct TestRecords(Vec<ProviderRecord>);
// Limited number of records from a limited set of peers.
impl Arbitrary for TestRecords {
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
let rc = usize::arbitrary(g) % 20;
let pc = 1 + rc / 2;
let mut ps = Vec::new();
let mut rs = Vec::new();
for _ in 0..pc {
let pk = Keypair::generate_ed25519();
let peer_id = pk.public().to_peer_id();
ps.push(peer_id)
}
for _ in 0..rc {
let peer_id = ps[usize::arbitrary(g) % ps.len()];
let mut subnet_ids = Vec::new();
for _ in 0..usize::arbitrary(g) % 5 {
subnet_ids.push(ArbSubnetID::arbitrary(g).0)
}
let record = ProviderRecord {
peer_id,
subnet_ids,
timestamp: Timestamp::arbitrary(g),
};
rs.push(record)
}
Self(rs)
}
}
type Providers = HashMap<SubnetID, HashSet<PeerId>>;
/// Build a provider mapping to check the cache against.
fn build_providers(records: &Vec<ProviderRecord>) -> Providers {
// Only the last timestamp should be kept, but it might not be unique.
let mut max_timestamps: HashMap<PeerId, Timestamp> = Default::default();
for record in records {
let mts = max_timestamps.entry(record.peer_id).or_default();
if *mts < record.timestamp {
*mts = record.timestamp;
}
}
let mut providers: HashMap<SubnetID, HashSet<PeerId>> = Default::default();
let mut seen: HashSet<PeerId> = Default::default();
for record in records {
if record.timestamp != max_timestamps[&record.peer_id] {
continue;
}
if !seen.insert(record.peer_id) {
continue;
}
for subnet_id in record.subnet_ids.iter() {
providers
.entry(subnet_id.clone())
.or_default()
.insert(record.peer_id);
}
}
providers
}
/// Check the cache against the reference built in the test.
fn check_providers(providers: &Providers, cache: &SubnetProviderCache) -> Result<(), String> {
for (subnet_id, exp_peer_ids) in providers {
let peer_ids = cache.providers_of_subnet(subnet_id);
if peer_ids.len() != exp_peer_ids.len() {
return Err(format!(
"expected {} peers, got {} in subnet {:?}",
exp_peer_ids.len(),
peer_ids.len(),
subnet_id
));
}
for peer_id in peer_ids {
if !exp_peer_ids.contains(&peer_id) {
return Err("wrong peer ID".into());
}
}
}
Ok(())
}
#[quickcheck]
fn prop_subnets_pruned(records: TestRecords, max_subnets: usize) -> bool {
let max_subnets = max_subnets % 10;
let mut cache = SubnetProviderCache::new(max_subnets, Vec::new());
for record in records.0 {
cache.set_routable(record.peer_id);
if cache.add_provider(&record).is_none() {
return false;
}
}
cache.subnet_providers.len() <= max_subnets
}
#[quickcheck]
fn prop_subnets_pinned(records: TestRecords) -> Result<(), String> {
// Find two subnets to pin.
let providers = build_providers(&records.0);
if providers.len() < 2 {
return Ok(());
}
let subnets = providers.keys().take(2).collect::<Vec<_>>();
let mut cache = SubnetProviderCache::new(3, vec![subnets[0].clone()]);
cache.pin_subnet(subnets[1].clone());
for record in records.0 {
cache.set_routable(record.peer_id);
cache.add_provider(&record);
}
if !cache.subnet_providers.contains_key(subnets[0]) {
return Err("static subnet not found".into());
}
if !cache.subnet_providers.contains_key(subnets[1]) {
return Err("pinned subnet not found".into());
}
Ok(())
}
#[quickcheck]
fn prop_providers_listed(records: TestRecords) -> Result<(), String> {
let records = records.0;
let mut cache = SubnetProviderCache::new(usize::MAX, Vec::new());
for record in records.iter() {
cache.set_routable(record.peer_id);
cache.add_provider(record);
}
let providers = build_providers(&records);
check_providers(&providers, &cache)
}
#[quickcheck]
fn prop_providers_pruned(
records: TestRecords,
cutoff_timestamp: Timestamp,
) -> Result<(), String> {
let mut records = records.0;
let mut cache = SubnetProviderCache::new(usize::MAX, Vec::new());
for record in records.iter() {
cache.set_routable(record.peer_id);
cache.add_provider(record);
}
cache.prune_providers(cutoff_timestamp);
// Build a reference from only what has come after the cutoff timestamp.
records.retain(|r| r.timestamp >= cutoff_timestamp);
let providers = build_providers(&records);
check_providers(&providers, &cache)
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipld/resolver/src/signed_record.rs | ipld/resolver/src/signed_record.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
use libp2p::core::signed_envelope;
use libp2p::identity::PublicKey;
use libp2p::{core::SignedEnvelope, identity::Keypair};
use serde::de::DeserializeOwned;
use serde::Serialize;
const DOMAIN_SEP: &str = "/ipc/ipld/resolver";
pub trait Record {
/// Payload type for the [`SignedEnvelope`].
fn payload_type() -> &'static str;
/// Check that the [`PublicKey`] recovered from the [`SignedEnvelope`]
/// is consistent with the payload.
fn check_signing_key(&self, key: &PublicKey) -> bool;
}
/// A [`ProviderRecord`] with a [`SignedEnvelope`] proving that the
/// peer indeed is ready to provide the data for the listed subnets.
#[derive(Debug, Clone)]
pub struct SignedRecord<R> {
/// The deserialized and validated record.
record: R,
/// The [`SignedEnvelope`] from which the record was deserialized from.
envelope: SignedEnvelope,
}
// Based on `libp2p_core::peer_record::PeerRecord`
impl<R> SignedRecord<R>
where
R: Record + Serialize + DeserializeOwned,
{
/// Create a new [`SignedRecord`] with a signed envelope
/// which can be shared with others.
pub fn new(key: &Keypair, record: R) -> anyhow::Result<Self> {
let payload = fvm_ipld_encoding::to_vec(&record)?;
let envelope = SignedEnvelope::new(
key,
DOMAIN_SEP.to_owned(),
R::payload_type().as_bytes().to_vec(),
payload,
)?;
Ok(Self { record, envelope })
}
pub fn from_signed_envelope(envelope: SignedEnvelope) -> Result<Self, FromEnvelopeError> {
let (payload, signing_key) = envelope
.payload_and_signing_key(DOMAIN_SEP.to_owned(), R::payload_type().as_bytes())?;
let record = fvm_ipld_encoding::from_slice::<R>(payload)?;
if !record.check_signing_key(signing_key) {
return Err(FromEnvelopeError::MismatchedSignature);
}
Ok(Self { record, envelope })
}
/// Deserialize then check the domain tags and the signature.
pub fn from_bytes(bytes: &[u8]) -> anyhow::Result<Self> {
let envelope = SignedEnvelope::from_protobuf_encoding(bytes)?;
let signed_record = Self::from_signed_envelope(envelope)?;
Ok(signed_record)
}
pub fn record(&self) -> &R {
&self.record
}
pub fn envelope(&self) -> &SignedEnvelope {
&self.envelope
}
pub fn into_record(self) -> R {
self.record
}
pub fn into_envelope(self) -> SignedEnvelope {
self.envelope
}
}
#[derive(thiserror::Error, Debug)]
pub enum FromEnvelopeError {
/// Failed to extract the payload from the envelope.
#[error("Failed to extract payload from envelope")]
BadPayload(#[from] signed_envelope::ReadPayloadError),
/// Failed to decode the provided bytes as the record type.
#[error("Failed to decode bytes as record")]
InvalidRecord(#[from] fvm_ipld_encoding::Error),
/// The signer of the envelope is different than the peer id in the record.
#[error("The signer of the envelope is different than the peer id in the record")]
MismatchedSignature,
}
#[cfg(test)]
pub mod tests {
use fvm_ipld_encoding::de::DeserializeOwned;
use libp2p::core::SignedEnvelope;
use serde::Serialize;
use super::{Record, SignedRecord};
pub fn prop_roundtrip<R>(signed_record: SignedRecord<R>) -> bool
where
R: Serialize + DeserializeOwned + Record + PartialEq,
{
let envelope_bytes = signed_record.envelope().clone().into_protobuf_encoding();
let envelope =
SignedEnvelope::from_protobuf_encoding(&envelope_bytes).expect("envelope roundtrip");
let signed_record2 =
SignedRecord::<R>::from_signed_envelope(envelope).expect("record roundtrip");
signed_record2.into_record() == *signed_record.record()
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipld/resolver/src/stats.rs | ipld/resolver/src/stats.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
use lazy_static::lazy_static;
use prometheus::{Histogram, HistogramOpts, IntCounter, IntGauge, Registry};
macro_rules! metrics {
($($name:ident : $type:ty = $make:expr);* $(;)?) => {
$(
lazy_static! {
pub static ref $name: $type = $make.unwrap();
}
)*
pub fn register_metrics(registry: &Registry) -> anyhow::Result<()> {
$(registry.register(Box::new($name.clone()))?;)*
Ok(())
}
};
}
metrics! {
PING_RTT: Histogram =
Histogram::with_opts(HistogramOpts::new("ping_rtt", "Ping roundtrip time"));
PING_TIMEOUT: IntCounter =
IntCounter::new("ping_timeouts", "Number of timed out pings");
PING_FAILURE: IntCounter =
IntCounter::new("ping_failure", "Number of failed pings");
PING_SUCCESS: IntCounter =
IntCounter::new("ping_success", "Number of successful pings",);
IDENTIFY_FAILURE: IntCounter =
IntCounter::new("identify_failure", "Number of Identify errors",);
IDENTIFY_RECEIVED: IntCounter =
IntCounter::new("identify_received", "Number of Identify infos received",);
DISCOVERY_BACKGROUND_LOOKUP: IntCounter = IntCounter::new(
"discovery_background_lookup",
"Number of background lookups started",
);
DISCOVERY_CONNECTED_PEERS: IntGauge =
IntGauge::new("discovery_connected_peers", "Number of connections",);
MEMBERSHIP_SKIPPED_PEERS: IntCounter =
IntCounter::new("membership_skipped_peers", "Number of providers skipped",);
MEMBERSHIP_ROUTABLE_PEERS: IntGauge =
IntGauge::new("membership_routable_peers", "Number of routable peers");
MEMBERSHIP_PROVIDER_PEERS: IntGauge =
IntGauge::new("membership_provider_peers", "Number of unique providers");
MEMBERSHIP_UNKNOWN_TOPIC: IntCounter = IntCounter::new(
"membership_unknown_topic",
"Number of messages with unknown topic"
);
MEMBERSHIP_INVALID_MESSAGE: IntCounter = IntCounter::new(
"membership_invalid_message",
"Number of invalid messages received"
);
MEMBERSHIP_PUBLISH_SUCCESS: IntCounter = IntCounter::new(
"membership_publish_total", "Number of published messages"
);
MEMBERSHIP_PUBLISH_FAILURE: IntCounter = IntCounter::new(
"membership_publish_failure",
"Number of failed publish attempts"
);
CONTENT_RESOLVE_RUNNING: IntGauge = IntGauge::new(
"content_resolve_running",
"Number of currently running content resolutions"
);
CONTENT_RESOLVE_NO_PEERS: IntCounter = IntCounter::new(
"content_resolve_no_peers",
"Number of resolutions with no known peers"
);
CONTENT_RESOLVE_SUCCESS: IntCounter = IntCounter::new(
"content_resolve_success",
"Number of successful resolutions"
);
CONTENT_RESOLVE_FAILURE: IntCounter = IntCounter::new(
"content_resolve_failure",
"Number of failed resolutions"
);
CONTENT_RESOLVE_FALLBACK: IntCounter = IntCounter::new(
"content_resolve_fallback",
"Number of resolutions that fall back on secondary peers"
);
CONTENT_RESOLVE_PEERS: Histogram = Histogram::with_opts(HistogramOpts::new(
"content_resolve_peers",
"Number of peers found for resolution from a subnet"
));
CONTENT_CONNECTED_PEERS: Histogram = Histogram::with_opts(HistogramOpts::new(
"content_connected_peers",
"Number of connected peers in a resolution"
));
CONTENT_RATE_LIMITED: IntCounter = IntCounter::new(
"content_rate_limited",
"Number of rate limited requests"
);
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipld/resolver/src/lib.rs | ipld/resolver/src/lib.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
mod behaviour;
mod client;
mod hash;
mod limiter;
mod service;
mod stats;
mod timestamp;
mod provider_cache;
mod provider_record;
mod signed_record;
mod vote_record;
#[cfg(any(test, feature = "arb"))]
mod arb;
#[cfg(feature = "missing_blocks")]
pub mod missing_blocks;
pub use behaviour::{ContentConfig, DiscoveryConfig, MembershipConfig, NetworkConfig};
pub use client::{Client, Resolver};
pub use service::{Config, ConnectionConfig, Event, NoKnownPeers, Service};
pub use timestamp::Timestamp;
pub use vote_record::{ValidatorKey, VoteRecord};
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipld/resolver/src/vote_record.rs | ipld/resolver/src/vote_record.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
use ipc_api::subnet_id::SubnetID;
use libp2p::identity::{Keypair, PublicKey};
use serde::de::{DeserializeOwned, Error};
use serde::{Deserialize, Serialize};
use crate::{
signed_record::{Record, SignedRecord},
Timestamp,
};
/// The basic idea is that validators, identified by their public key,
/// vote about things regarding the subnet in which they participate.
#[derive(Debug, Clone, Eq, PartialEq, Hash, PartialOrd, Ord)]
pub struct ValidatorKey(PublicKey);
impl Serialize for ValidatorKey {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let bz = self.0.encode_protobuf();
bz.serialize(serializer)
}
}
impl<'de> Deserialize<'de> for ValidatorKey {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let bz = Vec::<u8>::deserialize(deserializer)?;
match PublicKey::try_decode_protobuf(&bz) {
Ok(pk) => Ok(Self(pk)),
Err(e) => Err(D::Error::custom(format!("error decoding PublicKey: {e}"))),
}
}
}
impl From<PublicKey> for ValidatorKey {
fn from(value: PublicKey) -> Self {
Self(value)
}
}
impl From<ValidatorKey> for PublicKey {
fn from(value: ValidatorKey) -> Self {
value.0
}
}
impl From<libsecp256k1::PublicKey> for ValidatorKey {
fn from(value: libsecp256k1::PublicKey) -> Self {
let public_key =
libp2p::identity::secp256k1::PublicKey::try_from_bytes(&value.serialize_compressed())
.expect("secp256k1 public key");
Self::from(PublicKey::from(public_key))
}
}
/// Vote by a validator about the validity/availability/finality
/// of something in a given subnet.
#[derive(Serialize, Deserialize, Debug, Clone, Eq, PartialEq)]
pub struct VoteRecord<C> {
/// Public key of the validator.
pub public_key: ValidatorKey,
/// The subnet in which the vote is valid, to prevent a vote on the same subject
/// in one subnet being replayed by an attacker on a different subnet.
pub subnet_id: SubnetID,
/// The content the vote is about.
pub content: C,
/// Timestamp to thwart potential replay attacks.
pub timestamp: Timestamp,
}
impl<C> Record for VoteRecord<C> {
fn payload_type() -> &'static str {
"/ipc/vote-record"
}
fn check_signing_key(&self, key: &PublicKey) -> bool {
self.public_key.0 == *key
}
}
pub type SignedVoteRecord<C> = SignedRecord<VoteRecord<C>>;
impl<C> VoteRecord<C>
where
C: Serialize + DeserializeOwned,
{
/// Create a new [`SignedVoteRecord`] with the current timestamp
/// and a signed envelope which can be shared with others.
pub fn signed(
key: &Keypair,
subnet_id: SubnetID,
content: C,
) -> anyhow::Result<SignedVoteRecord<C>> {
let timestamp = Timestamp::now();
let record = VoteRecord {
public_key: ValidatorKey(key.public()),
subnet_id,
content,
timestamp,
};
let signed = SignedRecord::new(key, record)?;
Ok(signed)
}
}
#[cfg(any(test, feature = "arb"))]
mod arb {
use libp2p::identity::Keypair;
use quickcheck::Arbitrary;
use serde::de::DeserializeOwned;
use serde::Serialize;
use crate::arb::ArbSubnetID;
use super::{SignedVoteRecord, VoteRecord};
/// Create a valid [`SignedVoteRecord`] with a random key.
impl<V> Arbitrary for SignedVoteRecord<V>
where
V: Arbitrary + Serialize + DeserializeOwned,
{
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
let key = Keypair::generate_secp256k1();
let subnet_id = ArbSubnetID::arbitrary(g).0;
let content = V::arbitrary(g);
VoteRecord::signed(&key, subnet_id, content).expect("error creating signed envelope")
}
}
}
#[cfg(test)]
mod tests {
use quickcheck_macros::quickcheck;
use super::SignedVoteRecord;
#[quickcheck]
fn prop_roundtrip(signed_record: SignedVoteRecord<String>) -> bool {
crate::signed_record::tests::prop_roundtrip(signed_record)
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipld/resolver/src/missing_blocks.rs | ipld/resolver/src/missing_blocks.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
// Copyright 2019-2022 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
use fvm_ipld_blockstore::Blockstore;
use libipld::Cid;
use libipld::{prelude::*, store::StoreParams, Ipld};
/// Recursively find all [`Cid`] fields in the [`Block`] structures stored in the
/// [`Blockstore`] and return all CIDs which could *not* be retrieved from the store.
///
/// This function is available as a convenience, to be used by any [`BitswapStore`]
/// implementation as they see fit.
pub fn missing_blocks<BS: Blockstore, P: StoreParams>(
bs: &mut BS,
cid: &Cid,
) -> anyhow::Result<Vec<Cid>>
where
Ipld: References<<P as StoreParams>::Codecs>,
{
let mut stack = vec![*cid];
let mut missing = vec![];
while let Some(cid) = stack.pop() {
if let Some(data) = bs.get(&cid)? {
let block = libipld::Block::<P>::new_unchecked(cid, data);
block.references(&mut stack)?;
} else {
missing.push(cid);
}
}
Ok(missing)
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipld/resolver/src/timestamp.rs | ipld/resolver/src/timestamp.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
use serde::{Deserialize, Serialize};
use std::ops::{Add, Sub};
use std::time::{Duration, SystemTime};
/// Unix timestamp in seconds since epoch, which we can use to select the
/// more recent message during gossiping.
#[derive(Clone, Copy, Eq, PartialEq, PartialOrd, Ord, Debug, Serialize, Deserialize, Default)]
pub struct Timestamp(u64);
impl Timestamp {
/// Current timestamp.
pub fn now() -> Self {
let secs = SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.expect("now() is never before UNIX_EPOCH")
.as_secs();
Self(secs)
}
/// Seconds elapsed since Unix epoch.
pub fn as_secs(&self) -> u64 {
self.0
}
}
impl Sub<Duration> for Timestamp {
type Output = Self;
fn sub(self, rhs: Duration) -> Self {
Self(self.as_secs().saturating_sub(rhs.as_secs()))
}
}
impl Add<Duration> for Timestamp {
type Output = Self;
fn add(self, rhs: Duration) -> Self {
Self(self.as_secs().saturating_add(rhs.as_secs()))
}
}
#[cfg(any(test, feature = "arb"))]
mod arb {
use super::Timestamp;
impl quickcheck::Arbitrary for Timestamp {
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
Self(u64::arbitrary(g).saturating_add(1))
}
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipld/resolver/src/client.rs | ipld/resolver/src/client.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
use anyhow::anyhow;
use async_trait::async_trait;
use ipc_api::subnet_id::SubnetID;
use libipld::Cid;
use tokio::sync::mpsc::UnboundedSender;
use tokio::sync::oneshot;
use crate::{
service::{Request, ResolveResult},
vote_record::SignedVoteRecord,
};
/// A facade to the [`Service`] to provide a nicer interface than message passing would allow on its own.
#[derive(Clone)]
pub struct Client<V> {
request_tx: UnboundedSender<Request<V>>,
}
impl<V> Client<V> {
pub(crate) fn new(request_tx: UnboundedSender<Request<V>>) -> Self {
Self { request_tx }
}
/// Send a request to the [`Service`], unless it has stopped listening.
fn send_request(&self, req: Request<V>) -> anyhow::Result<()> {
self.request_tx
.send(req)
.map_err(|_| anyhow!("disconnected"))
}
/// Set the complete list of subnets currently supported by this node.
pub fn set_provided_subnets(&self, subnet_ids: Vec<SubnetID>) -> anyhow::Result<()> {
let req = Request::SetProvidedSubnets(subnet_ids);
self.send_request(req)
}
/// Add a subnet supported by this node.
pub fn add_provided_subnet(&self, subnet_id: SubnetID) -> anyhow::Result<()> {
let req = Request::AddProvidedSubnet(subnet_id);
self.send_request(req)
}
/// Remove a subnet no longer supported by this node.
pub fn remove_provided_subnet(&self, subnet_id: SubnetID) -> anyhow::Result<()> {
let req = Request::RemoveProvidedSubnet(subnet_id);
self.send_request(req)
}
/// Add a subnet we know really exist and we are interested in them.
pub fn pin_subnet(&self, subnet_id: SubnetID) -> anyhow::Result<()> {
let req = Request::PinSubnet(subnet_id);
self.send_request(req)
}
/// Unpin a we are no longer interested in.
pub fn unpin_subnet(&self, subnet_id: SubnetID) -> anyhow::Result<()> {
let req = Request::UnpinSubnet(subnet_id);
self.send_request(req)
}
/// Update the rate limit based on new projections for the same timeframe
/// the `content::Behaviour` was originally configured with. This can be
/// used if we can't come up with a good estimate for the amount of data
/// we have to serve from the subnets we participate in, but we can adjust
/// them on the fly based on what we observe on chain.
pub fn update_rate_limit(&self, bytes: u32) -> anyhow::Result<()> {
let req = Request::UpdateRateLimit(bytes);
self.send_request(req)
}
/// Publish a signed vote into a topic based on its subnet.
pub fn publish_vote(&self, vote: SignedVoteRecord<V>) -> anyhow::Result<()> {
let req = Request::PublishVote(Box::new(vote));
self.send_request(req)
}
/// Publish pre-emptively to a subnet that agents in the parent subnet
/// would be subscribed to if they are interested in receiving data
/// before they would have to use [`Client::resolve`] instead.
pub fn publish_preemptive(&self, subnet_id: SubnetID, data: Vec<u8>) -> anyhow::Result<()> {
let req = Request::PublishPreemptive(subnet_id, data);
self.send_request(req)
}
}
/// Trait to limit the capabilities to resolving CIDs.
#[async_trait]
pub trait Resolver {
/// Send a CID for resolution from a specific subnet, await its completion,
/// then return the result, to be inspected by the caller.
///
/// Upon success, the data should be found in the store.
async fn resolve(&self, cid: Cid, subnet_id: SubnetID) -> anyhow::Result<ResolveResult>;
}
#[async_trait]
impl<V> Resolver for Client<V>
where
V: Sync + Send + 'static,
{
/// Send a CID for resolution from a specific subnet, await its completion,
/// then return the result, to be inspected by the caller.
///
/// Upon success, the data should be found in the store.
async fn resolve(&self, cid: Cid, subnet_id: SubnetID) -> anyhow::Result<ResolveResult> {
let (tx, rx) = oneshot::channel();
let req = Request::Resolve(cid, subnet_id, tx);
self.send_request(req)?;
let res = rx.await?;
Ok(res)
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipld/resolver/src/provider_record.rs | ipld/resolver/src/provider_record.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
use ipc_api::subnet_id::SubnetID;
use libp2p::identity::Keypair;
use libp2p::PeerId;
use serde::{Deserialize, Serialize};
use crate::{
signed_record::{Record, SignedRecord},
Timestamp,
};
/// Record of the ability to provide data from a list of subnets.
///
/// Note that each the record contains the snapshot of the currently provided
/// subnets, not a delta. This means that if there were two peers using the
/// same keys running on different addresses, e.g. if the same operator ran
/// something supporting subnet A on one address, and another process supporting
/// subnet B on a different address, these would override each other, unless
/// they have different public keys (and thus peer IDs) associated with them.
///
/// This should be okay, as in practice there is no significance to these
/// peer IDs, we can even generate a fresh key-pair every time we run the
/// resolver.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub struct ProviderRecord {
/// The ID of the peer we can contact to pull data from.
pub peer_id: PeerId,
/// The IDs of the subnets they are participating in.
pub subnet_ids: Vec<SubnetID>,
/// Timestamp from when the peer published this record.
///
/// We use a timestamp instead of just a nonce so that we
/// can drop records which are too old, indicating that
/// the peer has dropped off.
pub timestamp: Timestamp,
}
impl Record for ProviderRecord {
fn payload_type() -> &'static str {
"/ipc/provider-record"
}
fn check_signing_key(&self, key: &libp2p::identity::PublicKey) -> bool {
self.peer_id == key.to_peer_id()
}
}
pub type SignedProviderRecord = SignedRecord<ProviderRecord>;
impl ProviderRecord {
/// Create a new [`SignedProviderRecord`] with the current timestamp
/// and a signed envelope which can be shared with others.
pub fn signed(
key: &Keypair,
subnet_ids: Vec<SubnetID>,
) -> anyhow::Result<SignedProviderRecord> {
let timestamp = Timestamp::now();
let peer_id = key.public().to_peer_id();
let record = ProviderRecord {
peer_id,
subnet_ids,
timestamp,
};
let signed = SignedRecord::new(key, record)?;
Ok(signed)
}
}
#[cfg(any(test, feature = "arb"))]
mod arb {
use libp2p::identity::Keypair;
use quickcheck::Arbitrary;
use crate::arb::ArbSubnetID;
use super::{ProviderRecord, SignedProviderRecord};
/// Create a valid [`SignedProviderRecord`] with a random key.
impl Arbitrary for SignedProviderRecord {
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
// NOTE: Unfortunately the keys themselves are not deterministic, nor is the Timestamp.
let key = match u8::arbitrary(g) % 2 {
0 => Keypair::generate_ed25519(),
_ => Keypair::generate_secp256k1(),
};
// Limit the number of subnets and the depth of keys so data generation doesn't take too long.
let mut subnet_ids = Vec::new();
for _ in 0..u8::arbitrary(g) % 5 {
let subnet_id = ArbSubnetID::arbitrary(g);
subnet_ids.push(subnet_id.0)
}
ProviderRecord::signed(&key, subnet_ids).expect("error creating signed envelope")
}
}
}
#[cfg(test)]
mod tests {
use libp2p::core::SignedEnvelope;
use quickcheck_macros::quickcheck;
use super::SignedProviderRecord;
#[quickcheck]
fn prop_roundtrip(signed_record: SignedProviderRecord) -> bool {
crate::signed_record::tests::prop_roundtrip(signed_record)
}
#[quickcheck]
fn prop_tamper_proof(signed_record: SignedProviderRecord, idx: usize) -> bool {
let envelope: libp2p::core::SignedEnvelope = signed_record.into_envelope();
let mut envelope_bytes = envelope.into_protobuf_encoding();
// Do some kind of mutation to a random byte in the envelope; after that it should not validate.
let idx = idx % envelope_bytes.len();
envelope_bytes[idx] = u8::MAX - envelope_bytes[idx];
match SignedEnvelope::from_protobuf_encoding(&envelope_bytes) {
Err(_) => true, // Corrupted the protobuf itself.
Ok(envelope) => SignedProviderRecord::from_signed_envelope(envelope).is_err(),
}
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipld/resolver/src/service.rs | ipld/resolver/src/service.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
use std::collections::HashMap;
use std::time::Duration;
use anyhow::anyhow;
use bloom::{BloomFilter, ASMS};
use ipc_api::subnet_id::SubnetID;
use libipld::store::StoreParams;
use libipld::Cid;
use libp2p::futures::StreamExt;
use libp2p::swarm::SwarmEvent;
use libp2p::{
core::{muxing::StreamMuxerBox, transport::Boxed},
identity::Keypair,
noise, yamux, Multiaddr, PeerId, Swarm, Transport,
};
use libp2p::{identify, ping};
use libp2p_bitswap::{BitswapResponse, BitswapStore};
use libp2p_mplex::MplexConfig;
use log::{debug, error, info, trace, warn};
use prometheus::Registry;
use rand::seq::SliceRandom;
use serde::de::DeserializeOwned;
use serde::Serialize;
use tokio::select;
use tokio::sync::broadcast;
use tokio::sync::mpsc;
use tokio::sync::oneshot::{self, Sender};
use crate::behaviour::{
self, content, discovery, membership, Behaviour, BehaviourEvent, ConfigError, ContentConfig,
DiscoveryConfig, MembershipConfig, NetworkConfig,
};
use crate::client::Client;
use crate::stats;
use crate::vote_record::{SignedVoteRecord, VoteRecord};
/// Result of attempting to resolve a CID.
pub type ResolveResult = anyhow::Result<()>;
/// Channel to complete the results with.
type ResponseChannel = oneshot::Sender<ResolveResult>;
/// State of a query. The fallback peers can be used
/// if the current attempt fails.
struct Query {
cid: Cid,
subnet_id: SubnetID,
fallback_peer_ids: Vec<PeerId>,
response_channel: ResponseChannel,
}
/// Keeps track of where to send query responses to.
type QueryMap = HashMap<content::QueryId, Query>;
/// Error returned when we tried to get a CID from a subnet for
/// which we currently have no peers to contact
#[derive(thiserror::Error, Debug)]
#[error("No known peers for subnet {0}")]
pub struct NoKnownPeers(SubnetID);
#[derive(Debug, Clone)]
pub struct ConnectionConfig {
/// The address where we will listen to incoming connections.
pub listen_addr: Multiaddr,
/// A list of known external addresses this node is reachable on.
pub external_addresses: Vec<Multiaddr>,
/// Maximum number of incoming connections.
pub max_incoming: u32,
/// Expected number of peers, for sizing the Bloom filter.
pub expected_peer_count: u32,
/// Maximum number of peers to send Bitswap requests to in a single attempt.
pub max_peers_per_query: u32,
/// Maximum number of events in the push-based broadcast channel before a slow
/// consumer gets an error because it's falling behind.
pub event_buffer_capacity: u32,
}
#[derive(Debug, Clone)]
pub struct Config {
pub network: NetworkConfig,
pub discovery: DiscoveryConfig,
pub membership: MembershipConfig,
pub connection: ConnectionConfig,
pub content: ContentConfig,
}
/// Internal requests to enqueue to the [`Service`]
pub(crate) enum Request<V> {
SetProvidedSubnets(Vec<SubnetID>),
AddProvidedSubnet(SubnetID),
RemoveProvidedSubnet(SubnetID),
PublishVote(Box<SignedVoteRecord<V>>),
PublishPreemptive(SubnetID, Vec<u8>),
PinSubnet(SubnetID),
UnpinSubnet(SubnetID),
Resolve(Cid, SubnetID, ResponseChannel),
RateLimitUsed(PeerId, usize),
UpdateRateLimit(u32),
}
/// Events that arise from the subnets, pushed to the clients,
/// not part of a request-response action.
#[derive(Clone, Debug)]
pub enum Event<V> {
/// Received a vote about in a subnet about a CID.
ReceivedVote(Box<VoteRecord<V>>),
/// Received raw pre-emptive data published to a pinned subnet.
ReceivedPreemptive(SubnetID, Vec<u8>),
}
/// The `Service` handles P2P communication to resolve IPLD content by wrapping and driving a number of `libp2p` behaviours.
pub struct Service<P, V>
where
P: StoreParams,
V: Serialize + DeserializeOwned + Send + 'static,
{
peer_id: PeerId,
listen_addr: Multiaddr,
swarm: Swarm<Behaviour<P, V>>,
/// To match finished queries to response channels.
queries: QueryMap,
/// For receiving requests from the clients and self.
request_rx: mpsc::UnboundedReceiver<Request<V>>,
/// For creating new clients and sending messages to self.
request_tx: mpsc::UnboundedSender<Request<V>>,
/// For broadcasting events to all clients.
event_tx: broadcast::Sender<Event<V>>,
/// To avoid looking up the same peer over and over.
background_lookup_filter: BloomFilter,
/// To limit the number of peers contacted in a Bitswap resolution attempt.
max_peers_per_query: usize,
}
impl<P, V> Service<P, V>
where
P: StoreParams,
V: Serialize + DeserializeOwned + Clone + Send + 'static,
{
/// Build a [`Service`] and a [`Client`] with the default `tokio` transport.
pub fn new<S>(config: Config, store: S) -> Result<Self, ConfigError>
where
S: BitswapStore<Params = P>,
{
Self::new_with_transport(config, store, build_transport)
}
/// Build a [`Service`] and a [`Client`] by passing in a transport factory function.
///
/// The main goal is to be facilitate testing with a [`MemoryTransport`].
pub fn new_with_transport<S, F>(
config: Config,
store: S,
transport: F,
) -> Result<Self, ConfigError>
where
S: BitswapStore<Params = P>,
F: FnOnce(Keypair) -> Boxed<(PeerId, StreamMuxerBox)>,
{
let peer_id = config.network.local_peer_id();
let transport = transport(config.network.local_key.clone());
let behaviour = Behaviour::new(
config.network,
config.discovery,
config.membership,
config.content,
store,
)?;
// NOTE: Hardcoded values from Forest. Will leave them as is until we know we need to change.
// TODO: Where this these go? Used to be `SwarmBuilder::connection_limits`
// let _limits = ConnectionLimits::default()
// .with_max_pending_incoming(Some(10))
// .with_max_pending_outgoing(Some(30))
// .with_max_established_incoming(Some(config.connection.max_incoming))
// .with_max_established_outgoing(None) // Allow bitswap to connect to subnets we did not anticipate when we started.
// .with_max_established_per_peer(Some(5));
//.connection_limits(limits)
//.notify_handler_buffer_size(std::num::NonZeroUsize::new(20).expect("Not zero"))
//.connection_event_buffer_size(64)
//.build();
let mut swarm = Swarm::new(
transport,
behaviour,
peer_id,
libp2p::swarm::Config::with_tokio_executor(),
);
for addr in config.connection.external_addresses {
swarm.add_external_address(addr)
}
let (request_tx, request_rx) = mpsc::unbounded_channel();
let (event_tx, _) = broadcast::channel(config.connection.event_buffer_capacity as usize);
let service = Self {
peer_id,
listen_addr: config.connection.listen_addr,
swarm,
queries: Default::default(),
request_rx,
request_tx,
event_tx,
background_lookup_filter: BloomFilter::with_rate(
0.1,
config.connection.expected_peer_count,
),
max_peers_per_query: config.connection.max_peers_per_query as usize,
};
Ok(service)
}
/// Create a new [`Client`] instance bound to this `Service`.
///
/// The [`Client`] is geared towards request-response interactions,
/// while the `Receiver` returned by `subscribe` is used for events
/// which weren't initiated by the `Client`.
pub fn client(&self) -> Client<V> {
Client::new(self.request_tx.clone())
}
/// Create a new [`broadcast::Receiver`] instance bound to this `Service`,
/// which will be notified upon each event coming from any of the subnets
/// the `Service` is subscribed to.
///
/// The consumers are expected to process events quick enough to be within
/// the configured capacity of the broadcast channel, or otherwise be able
/// to deal with message loss if they fall behind.
///
/// # Notes
///
/// This is not part of the [`Client`] because `Receiver::recv` takes
/// a mutable reference and it would prevent the [`Client`] being used
/// for anything else.
///
/// One alternative design would be to accept an interface similar to
/// [`BitswapStore`] that we can pass events to. In that case we would
/// have to create an internal event queue to stand in front of it,
/// and because these events arrive from the outside, it would still
/// have to have limited capacity.
///
/// Because the channel has limited capacity, we have to take care not
/// to use it for signaling critical events that we want to await upon.
/// For example if we used this to signal the readiness of bootstrapping,
/// we should make sure we have not yet subscribed to external events
/// which could drown it out.
///
/// One way to achieve this is for the consumer of the events to redistribute
/// them into priorities event queues, some bounded, some unbounded.
pub fn subscribe(&self) -> broadcast::Receiver<Event<V>> {
self.event_tx.subscribe()
}
/// Register Prometheus metrics.
pub fn register_metrics(&mut self, registry: &Registry) -> anyhow::Result<()> {
self.content_mut().register_metrics(registry)?;
stats::register_metrics(registry)?;
Ok(())
}
/// Start the swarm listening for incoming connections and drive the events forward.
pub async fn run(mut self) -> anyhow::Result<()> {
// Start the swarm.
info!("running service on {}", self.listen_addr);
Swarm::listen_on(&mut self.swarm, self.listen_addr.clone())?;
loop {
select! {
swarm_event = self.swarm.next() => match swarm_event {
// Events raised by our behaviours.
Some(SwarmEvent::Behaviour(event)) => {
self.handle_behaviour_event(event)
},
// Connection events are handled by the behaviours, passed directly from the Swarm.
Some(_) => { },
// The connection is closed.
None => { break; },
},
request = self.request_rx.recv() => match request {
// A Client sent us a request.
Some(req) => self.handle_request(req),
// This shouldn't happen because the service has a copy of the sender.
// All Client instances have been dropped.
None => { break; }
}
};
}
Ok(())
}
/// Handle events that the [`NetworkBehaviour`] macro generated for our [`Behaviour`], one for each field.
fn handle_behaviour_event(&mut self, event: BehaviourEvent<P, V>) {
match event {
BehaviourEvent::Ping(e) => self.handle_ping_event(e),
BehaviourEvent::Identify(e) => self.handle_identify_event(e),
BehaviourEvent::Discovery(e) => self.handle_discovery_event(e),
BehaviourEvent::Membership(e) => self.handle_membership_event(e),
BehaviourEvent::Content(e) => self.handle_content_event(e),
}
}
// Copied from Forest.
fn handle_ping_event(&mut self, event: ping::Event) {
let peer_id = event.peer.to_base58();
match event.result {
Ok(rtt) => {
stats::PING_SUCCESS.inc();
stats::PING_RTT.observe(rtt.as_millis() as f64);
trace!(
"PingSuccess::Ping rtt to {} from {} is {} ms",
peer_id,
self.peer_id,
rtt.as_millis()
);
}
Err(ping::Failure::Timeout) => {
stats::PING_TIMEOUT.inc();
debug!("PingFailure::Timeout from {peer_id} to {}", self.peer_id);
}
Err(ping::Failure::Other { error }) => {
stats::PING_FAILURE.inc();
warn!(
"PingFailure::Other from {peer_id} to {}: {error}",
self.peer_id
);
}
Err(ping::Failure::Unsupported) => {
warn!("Should ban peer {peer_id} due to protocol error");
// TODO: How do we ban peers in 0.53 ?
// see https://github.com/libp2p/rust-libp2p/pull/3590/files
// self.swarm.ban_peer_id(event.peer);
}
}
}
fn handle_identify_event(&mut self, event: identify::Event) {
if let identify::Event::Error { peer_id, error } = event {
stats::IDENTIFY_FAILURE.inc();
warn!("Error identifying {peer_id}: {error}")
} else if let identify::Event::Received { peer_id, info } = event {
stats::IDENTIFY_RECEIVED.inc();
debug!("protocols supported by {peer_id}: {:?}", info.protocols);
debug!("adding identified address of {peer_id} to {}", self.peer_id);
self.discovery_mut().add_identified(&peer_id, info);
}
}
fn handle_discovery_event(&mut self, event: discovery::Event) {
match event {
discovery::Event::Added(peer_id, _) => {
debug!("adding routable peer {peer_id} to {}", self.peer_id);
self.membership_mut().set_routable(peer_id)
}
discovery::Event::Removed(peer_id) => {
debug!("removing unroutable peer {peer_id} from {}", self.peer_id);
self.membership_mut().set_unroutable(peer_id)
}
}
}
fn handle_membership_event(&mut self, event: membership::Event<V>) {
match event {
membership::Event::Skipped(peer_id) => {
debug!("skipped adding provider {peer_id} to {}", self.peer_id);
// Don't repeatedly look up peers we can't add to the routing table.
if self.background_lookup_filter.insert(&peer_id) {
debug!(
"triggering background lookup of {peer_id} on {}",
self.peer_id
);
self.discovery_mut().background_lookup(peer_id)
}
}
membership::Event::Updated(_, _) => {}
membership::Event::Removed(_) => {}
membership::Event::ReceivedVote(vote) => {
let event = Event::ReceivedVote(vote);
if self.event_tx.send(event).is_err() {
debug!("dropped received vote because there are no subscribers")
}
}
membership::Event::ReceivedPreemptive(subnet_id, data) => {
let event = Event::ReceivedPreemptive(subnet_id, data);
if self.event_tx.send(event).is_err() {
debug!("dropped received preemptive data because there are no subscribers")
}
}
}
}
/// Handle Bitswap lookup result.
fn handle_content_event(&mut self, event: content::Event) {
match event {
content::Event::Complete(query_id, result) => {
if let Some(query) = self.queries.remove(&query_id) {
self.resolve_query(query, result);
} else {
warn!("query ID not found");
}
}
content::Event::BitswapForward {
peer_id,
response_rx,
response_tx,
} => {
let request_tx = self.request_tx.clone();
tokio::task::spawn(async move {
if let Ok(res) = response_rx.await {
if let BitswapResponse::Block(bz) = &res {
let _ = request_tx.send(Request::RateLimitUsed(peer_id, bz.len()));
}
// Forward, if the listener is still open.
let _ = response_tx.send(res);
}
});
}
}
}
/// Handle an internal request coming from a [`Client`].
fn handle_request(&mut self, request: Request<V>) {
match request {
Request::SetProvidedSubnets(ids) => {
if let Err(e) = self.membership_mut().set_provided_subnets(ids) {
warn!("failed to publish set provided subnets: {e}")
}
}
Request::AddProvidedSubnet(id) => {
if let Err(e) = self.membership_mut().add_provided_subnet(id) {
warn!("failed to publish added provided subnet: {e}")
}
}
Request::RemoveProvidedSubnet(id) => {
if let Err(e) = self.membership_mut().remove_provided_subnet(id) {
warn!("failed to publish removed provided subnet: {e}")
}
}
Request::PublishVote(vote) => {
if let Err(e) = self.membership_mut().publish_vote(*vote) {
warn!("failed to publish vote: {e}")
}
}
Request::PublishPreemptive(subnet_id, data) => {
if let Err(e) = self.membership_mut().publish_preemptive(subnet_id, data) {
warn!("failed to publish pre-emptive data: {e}")
}
}
Request::PinSubnet(id) => {
if let Err(e) = self.membership_mut().pin_subnet(id) {
warn!("error pinning subnet: {e}")
}
}
Request::UnpinSubnet(id) => {
if let Err(e) = self.membership_mut().unpin_subnet(&id) {
warn!("error unpinning subnet: {e}")
}
}
Request::Resolve(cid, subnet_id, response_channel) => {
self.start_query(cid, subnet_id, response_channel)
}
Request::RateLimitUsed(peer_id, bytes) => {
self.content_mut().rate_limit_used(peer_id, bytes)
}
Request::UpdateRateLimit(bytes) => self.content_mut().update_rate_limit(bytes),
}
}
/// Start a CID resolution.
fn start_query(&mut self, cid: Cid, subnet_id: SubnetID, response_channel: ResponseChannel) {
let mut peers = self.membership_mut().providers_of_subnet(&subnet_id);
stats::CONTENT_RESOLVE_PEERS.observe(peers.len() as f64);
if peers.is_empty() {
stats::CONTENT_RESOLVE_NO_PEERS.inc();
send_resolve_result(response_channel, Err(anyhow!(NoKnownPeers(subnet_id))));
} else {
// Connect to them in a random order, so as not to overwhelm any specific peer.
peers.shuffle(&mut rand::thread_rng());
// Prioritize peers we already have an established connection with.
let (connected, known) = peers
.into_iter()
.partition::<Vec<_>, _>(|id| self.swarm.is_connected(id));
stats::CONTENT_CONNECTED_PEERS.observe(connected.len() as f64);
let peers = [connected, known].into_iter().flatten().collect();
let (peers, fallback) = self.split_peers_for_query(peers);
let query = Query {
cid,
subnet_id,
response_channel,
fallback_peer_ids: fallback,
};
let query_id = self.content_mut().resolve(cid, peers);
self.queries.insert(query_id, query);
}
}
/// Handle the results from a resolve attempt. If it succeeded, notify the
/// listener. Otherwise if we have fallback peers to try, start another
/// query and send the result to them. By default these are the peers
/// we know support the subnet, but weren't connected to when the we
/// first attempted the resolution.
fn resolve_query(&mut self, mut query: Query, result: ResolveResult) {
match result {
Ok(_) => {
stats::CONTENT_RESOLVE_SUCCESS.inc();
send_resolve_result(query.response_channel, result)
}
Err(_) if query.fallback_peer_ids.is_empty() => {
stats::CONTENT_RESOLVE_FAILURE.inc();
send_resolve_result(query.response_channel, result)
}
Err(e) => {
stats::CONTENT_RESOLVE_FALLBACK.inc();
debug!(
"resolving {} from {} failed with {}, but there are {} fallback peers to try",
query.cid,
query.subnet_id,
e,
query.fallback_peer_ids.len()
);
// Try to resolve from the next batch of peers.
let peers = std::mem::take(&mut query.fallback_peer_ids);
let (peers, fallback) = self.split_peers_for_query(peers);
let query_id = self.content_mut().resolve(query.cid, peers);
// Leave the rest for later.
query.fallback_peer_ids = fallback;
self.queries.insert(query_id, query);
}
}
}
/// Split peers into a group we query now and a group we fall back on if the current batch fails.
fn split_peers_for_query(&self, mut peers: Vec<PeerId>) -> (Vec<PeerId>, Vec<PeerId>) {
let size = std::cmp::min(self.max_peers_per_query, peers.len());
let fallback = peers.split_off(size);
(peers, fallback)
}
// The following are helper functions because Rust Analyzer has trouble with recognising that `swarm.behaviour_mut()` is a legal call.
fn discovery_mut(&mut self) -> &mut behaviour::discovery::Behaviour {
self.swarm.behaviour_mut().discovery_mut()
}
fn membership_mut(&mut self) -> &mut behaviour::membership::Behaviour<V> {
self.swarm.behaviour_mut().membership_mut()
}
fn content_mut(&mut self) -> &mut behaviour::content::Behaviour<P> {
self.swarm.behaviour_mut().content_mut()
}
}
/// Respond to the sender of the query, if they are still listening.
fn send_resolve_result(tx: Sender<ResolveResult>, res: ResolveResult) {
if tx.send(res).is_err() {
error!("error sending resolve result; listener closed")
}
}
/// Builds the transport stack that libp2p will communicate over.
///
/// Based on the equivalent in Forest.
pub fn build_transport(local_key: Keypair) -> Boxed<(PeerId, StreamMuxerBox)> {
let tcp_transport =
|| libp2p::tcp::tokio::Transport::new(libp2p::tcp::Config::new().nodelay(true));
let transport = libp2p::dns::tokio::Transport::system(tcp_transport()).unwrap();
let auth_config = noise::Config::new(&local_key).expect("Noise key generation failed");
let mplex_config = {
let mut mplex_config = MplexConfig::new();
mplex_config.set_max_buffer_size(usize::MAX);
// FIXME: Yamux will end up beaing deprecated.
let yamux_config = yamux::Config::default();
// yamux_config.set_window_update_mode(WindowUpdateMode::OnRead);
libp2p::core::upgrade::SelectUpgrade::new(yamux_config, mplex_config)
};
transport
.upgrade(libp2p::core::upgrade::Version::V1)
.authenticate(auth_config)
.multiplex(mplex_config)
.timeout(Duration::from_secs(20))
.boxed()
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipld/resolver/src/limiter.rs | ipld/resolver/src/limiter.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
use std::time::{Duration, Instant};
use gcra::GcraState;
pub use gcra::RateLimit;
use lru_time_cache::LruCache;
/// Track the rate limit of resources (e.g. bytes) consumed per key.
///
/// Forgets keys after long periods of inactivity.
pub struct RateLimiter<K> {
cache: LruCache<K, GcraState>,
}
impl<K> RateLimiter<K>
where
K: Ord + Clone,
{
pub fn new(ttl: Duration) -> Self {
Self {
cache: LruCache::with_expiry_duration(ttl),
}
}
/// Try to add a certain amount of resources consumed to a key.
///
/// Return `true` if the key was within limits, `false` if it needs to wait.
///
/// The [`RateLimit`] is passed in so that we can update it dynamically
/// based on how much data we anticipate we will have to serve.
pub fn add(&mut self, limit: &RateLimit, key: K, cost: u32) -> bool {
self.add_at(limit, key, cost, Instant::now())
}
/// Same as [`RateLimiter::add`] but allows passing in the time, for testing.
pub fn add_at(&mut self, limit: &RateLimit, key: K, cost: u32, at: Instant) -> bool {
#[allow(clippy::unwrap_or_default)]
let state = self.cache.entry(key).or_insert_with(GcraState::default);
state.check_and_modify_at(limit, at, cost).is_ok()
}
}
#[cfg(test)]
mod tests {
use std::time::{Duration, Instant};
use super::{RateLimit, RateLimiter};
#[test]
fn basics() {
// 10Mb per hour.
let one_hour = Duration::from_secs(60 * 60);
let rate_limit = RateLimit::new(10 * 1024 * 1024, one_hour);
let mut rate_limiter = RateLimiter::<&'static str>::new(one_hour);
assert!(rate_limiter.add(&rate_limit, "foo", 1024));
assert!(rate_limiter.add(&rate_limit, "foo", 5 * 1024 * 1024));
assert!(
!rate_limiter.add(&rate_limit, "foo", 5 * 1024 * 1024),
"can't over consume"
);
assert!(
rate_limiter.add(&rate_limit, "bar", 5 * 1024 * 1024),
"others can consume"
);
assert!(
rate_limiter.add_at(
&rate_limit,
"foo",
5 * 1024 * 1024,
Instant::now() + one_hour + Duration::from_secs(1)
),
"can consume again in the future"
);
let rate_limit = RateLimit::new(50 * 1024 * 1024, one_hour);
assert!(
rate_limiter.add(&rate_limit, "bar", 15 * 1024 * 1024),
"can raise quota"
);
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipld/resolver/src/hash.rs | ipld/resolver/src/hash.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
// Copyright 2019-2022 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
use blake2b_simd::Params;
/// Generates BLAKE2b hash of fixed 32 bytes size.
pub fn blake2b_256(ingest: &[u8]) -> [u8; 32] {
let digest = Params::new()
.hash_length(32)
.to_state()
.update(ingest)
.finalize();
let mut ret = [0u8; 32];
ret.clone_from_slice(digest.as_bytes());
ret
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn vector_hashing() {
let ing_vec = vec![1, 2, 3];
assert_eq!(blake2b_256(&ing_vec), blake2b_256(&[1, 2, 3]));
assert_ne!(blake2b_256(&ing_vec), blake2b_256(&[1, 2, 3, 4]));
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipld/resolver/src/arb.rs | ipld/resolver/src/arb.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
use fvm_shared::address::Address;
use ipc_api::subnet_id::SubnetID;
use libipld::{Cid, Multihash};
use quickcheck::Arbitrary;
/// Unfortunately an arbitrary `DelegatedAddress` can be inconsistent
/// with bytes that do not correspond to its length. This struct fixes
/// that so we can generate arbitrary addresses that don't fail equality
/// after a roundtrip.
#[derive(Clone, Debug)]
pub struct ArbAddress(pub Address);
impl Arbitrary for ArbAddress {
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
let addr = Address::arbitrary(g);
let bz = addr.to_bytes();
let addr = Address::from_bytes(&bz).expect("address roundtrip works");
Self(addr)
}
}
#[derive(Clone, Debug)]
pub struct ArbSubnetID(pub SubnetID);
impl Arbitrary for ArbSubnetID {
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
let child_count = usize::arbitrary(g) % 4;
let children = (0..child_count)
.map(|_| {
if bool::arbitrary(g) {
Address::new_id(u64::arbitrary(g))
} else {
// Only expectign EAM managed delegated addresses.
let subaddr: [u8; 20] = std::array::from_fn(|_| Arbitrary::arbitrary(g));
Address::new_delegated(10, &subaddr).unwrap()
}
})
.collect::<Vec<_>>();
Self(SubnetID::new(u64::arbitrary(g), children))
}
}
/// Unfortunately ref-fvm depends on cid:0.8.6, which depends on quickcheck:0.9
/// whereas here we use quickcheck:1.0. This causes conflicts and the `Arbitrary`
/// implementations for `Cid` are not usable to us, nor can we patch all `cid`
/// dependencies to use 0.9 because then the IPLD and other FVM traits don't work.
///
/// TODO: Remove this module when the `cid` dependency is updated.
///
/// NOTE: This is based on the [simpler version](https://github.com/ChainSafe/forest/blob/v0.6.0/blockchain/blocks/src/lib.rs) in Forest.
/// The original uses weighted distributions to generate more plausible CIDs.
#[derive(Clone)]
pub struct ArbCid(pub Cid);
impl Arbitrary for ArbCid {
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
Self(Cid::new_v1(
u64::arbitrary(g),
Multihash::wrap(u64::arbitrary(g), &[u8::arbitrary(g)]).unwrap(),
))
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipld/resolver/src/behaviour/discovery.rs | ipld/resolver/src/behaviour/discovery.rs | // Copyright 2022-2024 Protocol Labs
// Copyright 2019-2022 ChainSafe Systems
// SPDX-License-Identifier: MIT
use std::{
cmp,
collections::VecDeque,
task::{Context, Poll},
time::Duration,
};
use libp2p::{
core::Endpoint,
identify::Info,
kad::{self, store::MemoryStore},
multiaddr::Protocol,
swarm::{
behaviour::toggle::{Toggle, ToggleConnectionHandler},
derive_prelude::FromSwarm,
ConnectionDenied, ConnectionId, NetworkBehaviour, THandler, THandlerInEvent,
THandlerOutEvent, ToSwarm,
},
Multiaddr, PeerId, StreamProtocol,
};
use log::{debug, warn};
use tokio::time::Interval;
use crate::stats;
use super::NetworkConfig;
// NOTE: The Discovery behaviour is largely based on what exists in Forest. If it ain't broken...
// NOTE: Not sure if emitting events is going to be useful yet, but for now it's an example of having one.
/// Event generated by the `Discovery` behaviour.
#[derive(Debug)]
pub enum Event {
/// Event emitted when a peer is added or updated in the routing table,
/// which means if we later ask for its addresses, they should be known.
Added(PeerId, Vec<Multiaddr>),
/// Event emitted when a peer is removed from the routing table.
Removed(PeerId),
}
/// Configuration for [`discovery::Behaviour`].
#[derive(Clone, Debug)]
pub struct Config {
/// Custom nodes which never expire, e.g. bootstrap or reserved nodes.
///
/// The addresses must end with a `/p2p/<peer-id>` part.
pub static_addresses: Vec<Multiaddr>,
/// Number of connections at which point we pause further discovery lookups.
pub target_connections: usize,
/// Option to disable Kademlia, for example in a fixed static network.
pub enable_kademlia: bool,
}
#[derive(thiserror::Error, Debug)]
pub enum ConfigError {
#[error("invalid network: {0}")]
InvalidNetwork(String),
#[error("invalid bootstrap address: {0}")]
InvalidBootstrapAddress(Multiaddr),
#[error("no bootstrap address")]
NoBootstrapAddress,
}
/// Discovery behaviour, periodically running a random lookup with Kademlia to find new peers.
///
/// Our other option for peer discovery would be to rely on the Peer Exchange of Gossipsub.
/// However, the required Signed Records feature is not available in the Rust version of the library, as of v0.50.
pub struct Behaviour {
/// Local peer ID.
peer_id: PeerId,
/// User-defined list of nodes and their addresses.
/// Typically includes bootstrap nodes, or it can be used for a static network.
static_addresses: Vec<(PeerId, Multiaddr)>,
/// Name of the peer discovery protocol.
protocol_name: StreamProtocol,
/// Kademlia behaviour, if enabled.
inner: Toggle<kad::Behaviour<MemoryStore>>,
/// Number of current connections.
num_connections: usize,
/// Number of connections where further lookups are paused.
target_connections: usize,
/// Interval between random lookups.
lookup_interval: Interval,
/// Buffer incoming identify requests until we have finished the bootstrap.
bootstrap_buffer: Option<Vec<(PeerId, Info)>>,
/// Events to return when polled.
outbox: VecDeque<Event>,
}
impl Behaviour {
/// Create a [`discovery::Behaviour`] from the configuration.
pub fn new(nc: NetworkConfig, dc: Config) -> Result<Self, ConfigError> {
if nc.network_name.is_empty() {
return Err(ConfigError::InvalidNetwork(nc.network_name));
}
let local_peer_id = nc.local_peer_id();
// Parse static addresses.
let mut static_addresses = Vec::new();
for multiaddr in dc.static_addresses {
let mut addr = multiaddr.clone();
if let Some(Protocol::P2p(peer_id)) = addr.pop() {
// Trying to add the local peer to the routing table would be ignored,
// but if it's the only one we could falsely believe we can bootstrap.
if peer_id != local_peer_id {
static_addresses.push((peer_id, addr));
}
} else {
return Err(ConfigError::InvalidBootstrapAddress(multiaddr));
}
}
let mut outbox = VecDeque::new();
let protocol_name = format!("/ipc/{}/kad/1.0.0", nc.network_name);
let protocol_name =
StreamProtocol::try_from_owned(protocol_name).expect("valid protocol name");
let mut bootstrap_buffer = None;
let kademlia_opt = if dc.enable_kademlia {
let mut kad_config = kad::Config::default();
kad_config.set_protocol_names(vec![protocol_name.clone()]);
// Disable inserting records into the memory store, so peers cannot send `PutRecord`
// messages to store content in the memory of our node.
kad_config.set_record_filtering(kad::StoreInserts::FilterBoth);
let store = MemoryStore::new(local_peer_id);
let mut kademlia = kad::Behaviour::with_config(local_peer_id, store, kad_config);
// Setting the mode to server so that it doesn't deny connections until the external address is established.
// At least this seems to prevent in-memory tests from working, I'm not sure about what will happen with real servers.
kademlia.set_mode(Some(kad::Mode::Server));
// Bootstrap from the seeds. The first seed to stand up might have nobody to bootstrap from,
// although ideally there would be at least another peer, so we can easily restart it and come back.
if !static_addresses.is_empty() {
for (peer_id, addr) in static_addresses.iter() {
kademlia.add_address(peer_id, addr.clone());
}
kademlia
.bootstrap()
.map_err(|_| ConfigError::NoBootstrapAddress)?;
bootstrap_buffer = Some(Vec::new());
}
Some(kademlia)
} else {
// It would be nice to use `.group_by` here but it's unstable.
// Make sure static peers are reported as routable.
for (peer_id, addr) in static_addresses.iter() {
outbox.push_back(Event::Added(*peer_id, vec![addr.clone()]))
}
None
};
Ok(Self {
peer_id: nc.local_peer_id(),
static_addresses,
protocol_name,
inner: kademlia_opt.into(),
lookup_interval: tokio::time::interval(Duration::from_secs(1)),
outbox,
num_connections: 0,
bootstrap_buffer,
target_connections: dc.target_connections,
})
}
/// Lookup a peer, unless we already know their address, so that we have a chance to connect to them later.
pub fn background_lookup(&mut self, peer_id: PeerId) {
if self.addresses_of_peer(peer_id).is_empty() {
if let Some(kademlia) = self.inner.as_mut() {
stats::DISCOVERY_BACKGROUND_LOOKUP.inc();
kademlia.get_closest_peers(peer_id);
}
}
}
/// Check if a peer has a user defined addresses.
fn is_static(&self, peer_id: PeerId) -> bool {
self.static_addresses.iter().any(|(id, _)| *id == peer_id)
}
/// Add addresses we learned from the `Identify` protocol to Kademlia.
///
/// This seems to be the only way, because Kademlia rightfully treats
/// incoming connections as ephemeral addresses, but doesn't have an
/// alternative exchange mechanism.
pub fn add_identified(&mut self, peer_id: &PeerId, info: Info) {
if info.protocols.contains(&self.protocol_name) {
// If we are still in the process of bootstrapping peers, buffer the incoming self-identify records,
// to protect against eclipse attacks that could fill the k-table with entries to crowd out honest peers.
if let Some(buffer) = self.bootstrap_buffer.as_mut() {
if buffer.len() < self.target_connections
&& !buffer.iter().any(|(id, _)| id == peer_id)
{
buffer.push((*peer_id, info))
}
} else {
for addr in info.listen_addrs.iter().cloned() {
self.add_address(peer_id, addr);
}
}
}
}
/// Add a known address to Kademlia.
pub fn add_address(&mut self, peer_id: &PeerId, address: Multiaddr) {
if let Some(kademlia) = self.inner.as_mut() {
kademlia.add_address(peer_id, address);
}
}
fn addresses_of_peer(&mut self, peer_id: PeerId) -> Vec<Multiaddr> {
self.handle_pending_outbound_connection(
ConnectionId::new_unchecked(0),
Some(peer_id),
&[],
Endpoint::Listener,
)
.unwrap_or_default()
}
}
impl NetworkBehaviour for Behaviour {
type ConnectionHandler = ToggleConnectionHandler<
<kad::Behaviour<MemoryStore> as NetworkBehaviour>::ConnectionHandler,
>;
type ToSwarm = Event;
fn on_swarm_event(&mut self, event: FromSwarm) {
match &event {
FromSwarm::ConnectionEstablished(e) => {
if e.other_established == 0 {
stats::DISCOVERY_CONNECTED_PEERS.inc();
self.num_connections += 1;
}
}
FromSwarm::ConnectionClosed(e) => {
if e.remaining_established == 0 {
stats::DISCOVERY_CONNECTED_PEERS.dec();
self.num_connections -= 1;
}
}
_ => {}
};
self.inner.on_swarm_event(event)
}
fn on_connection_handler_event(
&mut self,
peer_id: PeerId,
connection_id: ConnectionId,
event: THandlerOutEvent<Self>,
) {
self.inner
.on_connection_handler_event(peer_id, connection_id, event)
}
fn handle_pending_inbound_connection(
&mut self,
connection_id: ConnectionId,
local_addr: &Multiaddr,
remote_addr: &Multiaddr,
) -> Result<(), ConnectionDenied> {
self.inner
.handle_pending_inbound_connection(connection_id, local_addr, remote_addr)
}
fn handle_established_inbound_connection(
&mut self,
connection_id: ConnectionId,
peer: PeerId,
local_addr: &Multiaddr,
remote_addr: &Multiaddr,
) -> Result<THandler<Self>, ConnectionDenied> {
self.inner.handle_established_inbound_connection(
connection_id,
peer,
local_addr,
remote_addr,
)
}
fn handle_pending_outbound_connection(
&mut self,
connection_id: ConnectionId,
maybe_peer: Option<PeerId>,
addresses: &[Multiaddr],
effective_role: Endpoint,
) -> Result<Vec<Multiaddr>, ConnectionDenied> {
let mut addrs = self.inner.handle_pending_outbound_connection(
connection_id,
maybe_peer,
addresses,
effective_role,
)?;
if let Some(peer_id) = maybe_peer {
addrs.extend(
self.static_addresses
.iter()
.filter(|(p, _)| *p == peer_id)
.map(|(_, a)| a.clone()),
);
}
Ok(addrs)
}
fn handle_established_outbound_connection(
&mut self,
connection_id: ConnectionId,
peer: PeerId,
addr: &Multiaddr,
role_override: Endpoint,
) -> Result<THandler<Self>, ConnectionDenied> {
self.inner
.handle_established_outbound_connection(connection_id, peer, addr, role_override)
}
fn poll(
&mut self,
cx: &mut Context<'_>,
) -> Poll<ToSwarm<Self::ToSwarm, THandlerInEvent<Self>>> {
// Emit own events first.
if let Some(ev) = self.outbox.pop_front() {
return Poll::Ready(ToSwarm::GenerateEvent(ev));
}
// Trigger periodic queries.
if self.lookup_interval.poll_tick(cx).is_ready() {
if self.num_connections < self.target_connections {
if let Some(k) = self.inner.as_mut() {
debug!("looking up a random peer");
let random_peer_id = PeerId::random();
k.get_closest_peers(random_peer_id);
}
}
// Schedule the next random query with exponentially increasing delay, capped at 60 seconds.
self.lookup_interval = tokio::time::interval(cmp::min(
self.lookup_interval.period() * 2,
Duration::from_secs(60),
));
// we need to reset the interval, otherwise the next tick completes immediately.
self.lookup_interval.reset();
}
// Poll Kademlia.
while let Poll::Ready(ev) = self.inner.poll(cx) {
match ev {
ToSwarm::GenerateEvent(ev) => {
match ev {
// We get this event for inbound connections, where the remote address may be ephemeral.
kad::Event::UnroutablePeer { peer } => {
debug!("{peer} unroutable from {}", self.peer_id);
}
kad::Event::InboundRequest {
request: kad::InboundRequest::PutRecord { source, .. },
} => {
warn!("disallowed Kademlia requests from {source}",)
}
// Information only.
kad::Event::InboundRequest { .. } => {}
kad::Event::ModeChanged { .. } => {}
// Finish bootstrapping.
kad::Event::OutboundQueryProgressed { result, step, .. } => match result {
kad::QueryResult::Bootstrap(result) if step.last => {
debug!("Bootstrapping finished with {result:?}");
if let Some(buffer) = self.bootstrap_buffer.take() {
debug!("Adding {} self-identified peers.", buffer.len());
for (peer_id, info) in buffer {
self.add_identified(&peer_id, info)
}
}
}
_ => {}
},
// The config ensures peers are added to the table if there's room.
// We're not emitting these as known peers because the address will probably not be returned by `addresses_of_peer`,
// so the outside service would have to keep track, which is not what we want.
kad::Event::RoutablePeer { peer, .. } => {
debug!("Kademlia in manual mode or bucket full, cannot add {peer}");
}
// Unfortunately, looking at the Kademlia behaviour, it looks like when it goes from pending to active,
// it won't emit another event, so we might as well tentatively emit an event here.
kad::Event::PendingRoutablePeer { peer, address } => {
debug!("{peer} pending to the routing table of {}", self.peer_id);
self.outbox.push_back(Event::Added(peer, vec![address]))
}
// This event should ensure that we will be able to answer address lookups later.
kad::Event::RoutingUpdated {
peer,
addresses,
old_peer,
..
} => {
debug!("{peer} added to the routing table of {}", self.peer_id);
// There are two events here; we can only return one, so let's defer them to the outbox.
if let Some(peer_id) = old_peer {
if self.is_static(peer_id) {
self.outbox.push_back(Event::Removed(peer_id))
}
}
self.outbox
.push_back(Event::Added(peer, addresses.into_vec()))
}
}
}
other => {
return Poll::Ready(other.map_out(|_| unreachable!("already handled")));
}
}
}
Poll::Pending
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipld/resolver/src/behaviour/content.rs | ipld/resolver/src/behaviour/content.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
use std::{
collections::{HashMap, VecDeque},
task::{Context, Poll},
time::Duration,
};
use libipld::{store::StoreParams, Cid};
use libp2p::{
core::{ConnectedPoint, Endpoint},
futures::channel::oneshot,
multiaddr::Protocol,
swarm::{
derive_prelude::FromSwarm, ConnectionDenied, ConnectionId, NetworkBehaviour, THandler,
THandlerInEvent, THandlerOutEvent, ToSwarm,
},
Multiaddr, PeerId,
};
use libp2p_bitswap::{Bitswap, BitswapConfig, BitswapEvent, BitswapResponse, BitswapStore};
use log::debug;
use prometheus::Registry;
use crate::{
limiter::{RateLimit, RateLimiter},
stats,
};
pub type QueryId = libp2p_bitswap::QueryId;
// Not much to do here, just hiding the `Progress` event as I don't think we'll need it.
// We can't really turn it into anything more meaningful; the outer Service, which drives
// the Swarm events, will have to store the `QueryId` and figure out which CID it was about
// (there could be multiple queries running over the same CID) and how to respond to the
// original requestor (e.g. by completing a channel).
#[derive(Debug)]
pub enum Event {
/// Event raised when a resolution request is finished.
///
/// The result will indicate either success, or arbitrary failure.
/// If it is a success, the CID can be found in the [`BitswapStore`]
/// instance the behaviour was created with.
///
/// Note that it is possible that the synchronization completed
/// partially, but some recursive constituent is missing. The
/// caller can use the [`missing_blocks`] function to check
/// whether a retry is necessary.
Complete(QueryId, anyhow::Result<()>),
/// Event raised when we want to execute some logic with the `BitswapResponse`.
/// This is only raised if we are tracking rate limits. The service has to
/// do the forwarding between the two oneshot channels, and call this module
/// back between doing so.
#[allow(dead_code)]
BitswapForward {
peer_id: PeerId,
/// Receive response from the [`Bitswap`] behaviour.
/// Normally this goes straight to the handler.
response_rx: oneshot::Receiver<BitswapResponse>,
/// Forward the response to the handler.
response_tx: oneshot::Sender<BitswapResponse>,
},
}
/// Configuration for [`content::Behaviour`].
#[derive(Debug, Clone)]
pub struct Config {
/// Number of bytes that can be consumed remote peers in a time period.
///
/// 0 means no limit.
pub rate_limit_bytes: u32,
/// Length of the time period at which the consumption limit fills.
///
/// 0 means no limit.
pub rate_limit_period: Duration,
}
/// Behaviour built on [`Bitswap`] to resolve IPLD content from [`Cid`] to raw bytes.
pub struct Behaviour<P: StoreParams> {
inner: Bitswap<P>,
/// Remember which address peers connected from, so we can apply the rate limit
/// on the address, and not on the peer ID which they can change easily.
peer_addresses: HashMap<PeerId, Multiaddr>,
/// Limit the amount of data served by remote address.
rate_limiter: RateLimiter<Multiaddr>,
rate_limit_period: Duration,
rate_limit: Option<RateLimit>,
outbox: VecDeque<Event>,
}
impl<P: StoreParams> Behaviour<P> {
pub fn new<S>(config: Config, store: S) -> Self
where
S: BitswapStore<Params = P>,
{
let bitswap = Bitswap::new(BitswapConfig::default(), store);
let rate_limit = if config.rate_limit_bytes == 0 || config.rate_limit_period.is_zero() {
None
} else {
Some(RateLimit::new(
config.rate_limit_bytes,
config.rate_limit_period,
))
};
Self {
inner: bitswap,
peer_addresses: Default::default(),
rate_limiter: RateLimiter::new(config.rate_limit_period),
rate_limit_period: config.rate_limit_period,
rate_limit,
outbox: Default::default(),
}
}
/// Register Prometheus metrics.
pub fn register_metrics(&self, registry: &Registry) -> anyhow::Result<()> {
self.inner.register_metrics(registry)
}
/// Recursively resolve a [`Cid`] and all underlying CIDs into blocks.
///
/// The [`Bitswap`] behaviour will call the [`BitswapStore`] to ask for
/// blocks which are missing, ie. find CIDs which aren't available locally.
/// It is up to the store implementation to decide which links need to be
/// followed.
///
/// It is also up to the store implementation to decide which CIDs requests
/// to responds to, e.g. if we only want to resolve certain type of content,
/// then the store can look up in a restricted collection, rather than the
/// full IPLD store.
///
/// Resolution will be attempted from the peers passed to the method,
/// starting with the first one with `WANT-BLOCK`, then whoever responds
/// positively to `WANT-HAVE` requests. The caller should talk to the
/// `membership::Behaviour` first to find suitable peers, and then
/// prioritise peers which are connected.
///
/// The underlying [`libp2p_request_response::RequestResponse`] behaviour
/// will initiate connections to the peers which aren't connected at the moment.
pub fn resolve(&mut self, cid: Cid, peers: Vec<PeerId>) -> QueryId {
debug!("resolving {cid} from {peers:?}");
stats::CONTENT_RESOLVE_RUNNING.inc();
// Not passing any missing items, which will result in a call to `BitswapStore::missing_blocks`.
self.inner.sync(cid, peers, [].into_iter())
}
/// Check whether the peer has already exhaused their rate limit.
#[allow(dead_code)]
fn check_rate_limit(&mut self, peer_id: &PeerId, cid: &Cid) -> bool {
if let Some(ref rate_limit) = self.rate_limit {
if let Some(addr) = self.peer_addresses.get(peer_id).cloned() {
let bytes = cid.to_bytes().len().try_into().unwrap_or(u32::MAX);
if !self.rate_limiter.add(rate_limit, addr, bytes) {
return false;
}
}
}
true
}
/// Callback by the service after [`Event::BitswapForward`].
pub fn rate_limit_used(&mut self, peer_id: PeerId, bytes: usize) {
if let Some(ref rate_limit) = self.rate_limit {
if let Some(addr) = self.peer_addresses.get(&peer_id).cloned() {
let bytes = bytes.try_into().unwrap_or(u32::MAX);
let _ = self.rate_limiter.add(rate_limit, addr, bytes);
}
}
}
/// Update the rate limit to a new value, keeping the period as-is.
pub fn update_rate_limit(&mut self, bytes: u32) {
if bytes == 0 || self.rate_limit_period.is_zero() {
self.rate_limit = None;
} else {
self.rate_limit = Some(RateLimit::new(bytes, self.rate_limit_period))
}
}
}
impl<P: StoreParams> NetworkBehaviour for Behaviour<P> {
type ConnectionHandler = <Bitswap<P> as NetworkBehaviour>::ConnectionHandler;
type ToSwarm = Event;
fn on_swarm_event(&mut self, event: FromSwarm) {
// Store the remote address.
match &event {
FromSwarm::ConnectionEstablished(c) => {
if c.other_established == 0 {
let peer_addr = match c.endpoint {
ConnectedPoint::Dialer {
address: listen_addr,
..
} => listen_addr.clone(),
ConnectedPoint::Listener {
send_back_addr: ephemeral_addr,
..
} => select_non_ephemeral(ephemeral_addr.clone()),
};
self.peer_addresses.insert(c.peer_id, peer_addr);
}
}
FromSwarm::ConnectionClosed(c) => {
if c.remaining_established == 0 {
self.peer_addresses.remove(&c.peer_id);
}
}
// Note: Ignoring FromSwarm::AddressChange - as long as the same peer connects,
// not updating the address provides continuity of resource consumption.
_ => {}
}
self.inner.on_swarm_event(event)
}
fn on_connection_handler_event(
&mut self,
peer_id: PeerId,
connection_id: ConnectionId,
event: THandlerOutEvent<Self>,
) {
// TODO: `request_response::handler` is now private, so we cannot pattern match on the handler event.
// By the looks of the only way to access the request event is to let it go right into the RR protocol
// wrapped by the Bitswap behaviour and let it raise an event, however we will not see that event here.
// I'm not sure what we can do without moving rate limiting into the bitswap library itself, because
// what we did here relied on the ability to redirect the channels inside the request, but if the event
// itself is private to the `request_response` protocol there's nothing I can do.
// match event {
// request_response::handler::Event::Request {
// request_id,
// request,
// sender,
// } if self.rate_limit.is_some() => {
// if !self.check_rate_limit(&peer_id, &request.cid) {
// warn!("rate limiting {peer_id}");
// stats::CONTENT_RATE_LIMITED.inc();
// return;
// }
// // We need to hijack the response channel to record the size, otherwise it goes straight to the handler.
// let (tx, rx) = libp2p::futures::channel::oneshot::channel();
// let event = request_response::Event::Request {
// request_id,
// request,
// sender: tx,
// };
// self.inner
// .on_connection_handler_event(peer_id, connection_id, event);
// let forward = Event::BitswapForward {
// peer_id,
// response_rx: rx,
// response_tx: sender,
// };
// self.outbox.push_back(forward);
// }
// _ => self
// .inner
// .on_connection_handler_event(peer_id, connection_id, event),
// }
// debug!("BITSWAP CONNECTION HANDLER EVENT: {event:?}");
self.inner
.on_connection_handler_event(peer_id, connection_id, event)
}
fn handle_pending_inbound_connection(
&mut self,
connection_id: ConnectionId,
local_addr: &Multiaddr,
remote_addr: &Multiaddr,
) -> Result<(), ConnectionDenied> {
self.inner
.handle_pending_inbound_connection(connection_id, local_addr, remote_addr)
}
fn handle_established_inbound_connection(
&mut self,
connection_id: ConnectionId,
peer: PeerId,
local_addr: &Multiaddr,
remote_addr: &Multiaddr,
) -> Result<THandler<Self>, ConnectionDenied> {
self.inner.handle_established_inbound_connection(
connection_id,
peer,
local_addr,
remote_addr,
)
}
fn handle_pending_outbound_connection(
&mut self,
connection_id: ConnectionId,
maybe_peer: Option<PeerId>,
addresses: &[Multiaddr],
effective_role: Endpoint,
) -> Result<Vec<Multiaddr>, ConnectionDenied> {
self.inner.handle_pending_outbound_connection(
connection_id,
maybe_peer,
addresses,
effective_role,
)
}
fn handle_established_outbound_connection(
&mut self,
connection_id: ConnectionId,
peer: PeerId,
addr: &Multiaddr,
role_override: Endpoint,
) -> Result<THandler<Self>, ConnectionDenied> {
self.inner
.handle_established_outbound_connection(connection_id, peer, addr, role_override)
}
fn poll(
&mut self,
cx: &mut Context<'_>,
) -> Poll<ToSwarm<Self::ToSwarm, THandlerInEvent<Self>>> {
// Emit own events first.
if let Some(ev) = self.outbox.pop_front() {
return Poll::Ready(ToSwarm::GenerateEvent(ev));
}
// Poll Bitswap.
while let Poll::Ready(ev) = self.inner.poll(cx) {
// debug!("BITSWAP POLL: {ev:?}");
match ev {
ToSwarm::GenerateEvent(ev) => match ev {
BitswapEvent::Progress(_, _) => {}
BitswapEvent::Complete(id, result) => {
stats::CONTENT_RESOLVE_RUNNING.dec();
let out = Event::Complete(id, result);
return Poll::Ready(ToSwarm::GenerateEvent(out));
}
},
other => {
return Poll::Ready(other.map_out(|_| unreachable!("already handled")));
}
}
}
Poll::Pending
}
}
/// Get rid of parts of an address which are considered ephemeral,
/// keeping just the parts which would stay the same if for example
/// the same peer opened another connection from a different random port.
fn select_non_ephemeral(mut addr: Multiaddr) -> Multiaddr {
let mut keep = Vec::new();
while let Some(proto) = addr.pop() {
match proto {
// Some are valid on their own right.
Protocol::Ip4(_) | Protocol::Ip6(_) => {
keep.clear();
keep.push(proto);
break;
}
// Skip P2P peer ID, they might use a different identity.
Protocol::P2p(_) => {}
// Skip ephemeral parts.
Protocol::Tcp(_) | Protocol::Udp(_) => {}
// Everything else we keep until we see better options.
_ => {
keep.push(proto);
}
}
}
keep.reverse();
Multiaddr::from_iter(keep)
}
#[cfg(test)]
mod tests {
use libp2p::Multiaddr;
use super::select_non_ephemeral;
#[test]
fn non_ephemeral_addr() {
let examples = [
("/ip4/127.0.0.1/udt/sctp/5678", "/ip4/127.0.0.1"),
("/ip4/95.217.194.97/tcp/8008/p2p/12D3KooWC1EaEEpghwnPdd89LaPTKEweD1PRLz4aRBkJEA9UiUuS", "/ip4/95.217.194.97"),
("/udt/memory/10/p2p/12D3KooWC1EaEEpghwnPdd89LaPTKEweD1PRLz4aRBkJEA9UiUuS", "/udt/memory/10")
];
for (addr, exp) in examples {
let addr: Multiaddr = addr.parse().unwrap();
let exp: Multiaddr = exp.parse().unwrap();
assert_eq!(select_non_ephemeral(addr), exp);
}
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipld/resolver/src/behaviour/mod.rs | ipld/resolver/src/behaviour/mod.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
use libipld::store::StoreParams;
use libp2p::{
identify,
identity::{Keypair, PublicKey},
ping,
swarm::NetworkBehaviour,
PeerId,
};
use libp2p_bitswap::BitswapStore;
pub mod content;
pub mod discovery;
pub mod membership;
pub use content::Config as ContentConfig;
pub use discovery::Config as DiscoveryConfig;
pub use membership::Config as MembershipConfig;
use serde::{de::DeserializeOwned, Serialize};
#[derive(Clone, Debug)]
pub struct NetworkConfig {
/// Cryptographic key used to sign messages.
pub local_key: Keypair,
/// Network name to be differentiate this peer group.
pub network_name: String,
}
impl NetworkConfig {
pub fn local_public_key(&self) -> PublicKey {
self.local_key.public()
}
pub fn local_peer_id(&self) -> PeerId {
self.local_public_key().to_peer_id()
}
}
#[derive(thiserror::Error, Debug)]
pub enum ConfigError {
#[error("Error in the discovery configuration")]
Discovery(#[from] discovery::ConfigError),
#[error("Error in the membership configuration")]
Membership(#[from] membership::ConfigError),
}
/// Libp2p behaviour bundle to manage content resolution from other subnets, using:
///
/// * Kademlia for peer discovery
/// * Gossipsub to advertise subnet membership
/// * Bitswap to resolve CIDs
#[derive(NetworkBehaviour)]
pub struct Behaviour<P, V>
where
P: StoreParams,
{
ping: ping::Behaviour,
identify: identify::Behaviour,
discovery: discovery::Behaviour,
membership: membership::Behaviour<V>,
content: content::Behaviour<P>,
}
// Unfortunately by using `#[derive(NetworkBehaviour)]` we cannot easily inspects events
// from the inner behaviours, e.g. we cannot poll a behaviour and if it returns something
// of interest then call a method on another behaviour. We can do this in yet another wrapper
// where we manually implement `NetworkBehaviour`, or the outer service where we drive the
// Swarm; there we are free to call any of the behaviours as well as the Swarm.
impl<P, V> Behaviour<P, V>
where
P: StoreParams,
V: Serialize + DeserializeOwned,
{
pub fn new<S>(
nc: NetworkConfig,
dc: DiscoveryConfig,
mc: MembershipConfig,
cc: ContentConfig,
store: S,
) -> Result<Self, ConfigError>
where
S: BitswapStore<Params = P>,
{
Ok(Self {
ping: Default::default(),
identify: identify::Behaviour::new(identify::Config::new(
"ipfs/1.0.0".into(),
nc.local_public_key(),
)),
discovery: discovery::Behaviour::new(nc.clone(), dc)?,
membership: membership::Behaviour::new(nc, mc)?,
content: content::Behaviour::new(cc, store),
})
}
pub fn discovery_mut(&mut self) -> &mut discovery::Behaviour {
&mut self.discovery
}
pub fn membership_mut(&mut self) -> &mut membership::Behaviour<V> {
&mut self.membership
}
pub fn content_mut(&mut self) -> &mut content::Behaviour<P> {
&mut self.content
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipld/resolver/src/behaviour/membership.rs | ipld/resolver/src/behaviour/membership.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
use std::collections::{HashMap, HashSet, VecDeque};
use std::marker::PhantomData;
use std::task::{Context, Poll};
use std::time::Duration;
use anyhow::anyhow;
use ipc_api::subnet_id::SubnetID;
use libp2p::core::Endpoint;
use libp2p::gossipsub::{
self, IdentTopic, MessageAuthenticity, MessageId, PublishError, Sha256Topic, SubscriptionError,
Topic, TopicHash,
};
use libp2p::identity::Keypair;
use libp2p::swarm::derive_prelude::FromSwarm;
use libp2p::swarm::{
ConnectionDenied, ConnectionId, NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent,
ToSwarm,
};
use libp2p::{Multiaddr, PeerId};
use log::{debug, error, info, warn};
use serde::de::DeserializeOwned;
use serde::Serialize;
use tokio::time::{Instant, Interval};
use crate::hash::blake2b_256;
use crate::provider_cache::{ProviderDelta, SubnetProviderCache};
use crate::provider_record::{ProviderRecord, SignedProviderRecord};
use crate::vote_record::{SignedVoteRecord, VoteRecord};
use crate::{stats, Timestamp};
use super::NetworkConfig;
/// `Gossipsub` topic identifier for subnet membership.
const PUBSUB_MEMBERSHIP: &str = "/ipc/membership";
/// `Gossipsub` topic identifier for voting about content.
const PUBSUB_VOTES: &str = "/ipc/ipld/votes";
/// `Gossipsub` topic identifier for pre-emptively published blocks of data.
const PUBSUB_PREEMPTIVE: &str = "/ipc/ipld/pre-emptive";
/// Events emitted by the [`membership::Behaviour`] behaviour.
#[derive(Debug)]
pub enum Event<V> {
/// Indicate a change in the subnets a peer is known to support.
Updated(PeerId, ProviderDelta),
/// Indicate that we no longer treat a peer as routable and removed all their supported subnet associations.
Removed(PeerId),
/// We could not add a provider record to the cache because the chache hasn't
/// been told yet that the provider peer is routable. This event can be used
/// to trigger a lookup by the discovery module to learn the address.
Skipped(PeerId),
/// We received a [`VoteRecord`] in one of the subnets we are providing data for.
ReceivedVote(Box<VoteRecord<V>>),
/// We received preemptive data published in a subnet we were interested in.
ReceivedPreemptive(SubnetID, Vec<u8>),
}
/// Configuration for [`membership::Behaviour`].
#[derive(Clone, Debug)]
pub struct Config {
/// User defined list of subnets which will never be pruned from the cache.
pub static_subnets: Vec<SubnetID>,
/// Maximum number of subnets to track in the cache.
pub max_subnets: usize,
/// Publish interval for supported subnets.
pub publish_interval: Duration,
/// Minimum time between publishing own provider record in reaction to new joiners.
pub min_time_between_publish: Duration,
/// Maximum age of provider records before the peer is removed without an update.
pub max_provider_age: Duration,
}
#[derive(thiserror::Error, Debug)]
pub enum ConfigError {
#[error("invalid network: {0}")]
InvalidNetwork(String),
#[error("invalid gossipsub config: {0}")]
InvalidGossipsubConfig(String),
#[error("error subscribing to topic")]
Subscription(#[from] SubscriptionError),
}
/// A [`NetworkBehaviour`] internally using [`Gossipsub`] to learn which
/// peer is able to resolve CIDs in different subnets.
pub struct Behaviour<V> {
/// [`gossipsub::Behaviour`] to spread the information about subnet membership.
inner: gossipsub::Behaviour,
/// Events to return when polled.
outbox: VecDeque<Event<V>>,
/// [`Keypair`] used to sign [`SignedProviderRecord`] instances.
local_key: Keypair,
/// Name of the P2P network, used to separate `Gossipsub` topics.
network_name: String,
/// Name of the [`Gossipsub`] topic where subnet memberships are published.
membership_topic: IdentTopic,
/// List of subnet IDs this agent is providing data for.
subnet_ids: Vec<SubnetID>,
/// Voting topics we are currently subscribed to.
voting_topics: HashSet<TopicHash>,
/// Remember which subnet a topic was about.
preemptive_topics: HashMap<TopicHash, SubnetID>,
/// Caching the latest state of subnet providers.
provider_cache: SubnetProviderCache,
/// Interval between publishing the currently supported subnets.
///
/// This acts like a heartbeat; if a peer doesn't publish its snapshot for a long time,
/// other agents can prune it from their cache and not try to contact for resolution.
publish_interval: Interval,
/// Minimum time between publishing own provider record in reaction to new joiners.
min_time_between_publish: Duration,
/// Last time we gossiped our own provider record.
last_publish_timestamp: Timestamp,
/// Next time we will gossip our own provider record.
next_publish_timestamp: Timestamp,
/// Maximum time a provider can be without an update before it's pruned from the cache.
max_provider_age: Duration,
_phantom_vote: PhantomData<V>,
}
impl<V> Behaviour<V>
where
V: Serialize + DeserializeOwned,
{
pub fn new(nc: NetworkConfig, mc: Config) -> Result<Self, ConfigError> {
if nc.network_name.is_empty() {
return Err(ConfigError::InvalidNetwork(nc.network_name));
}
let membership_topic = Topic::new(format!("{}/{}", PUBSUB_MEMBERSHIP, nc.network_name));
let mut gossipsub_config = gossipsub::ConfigBuilder::default();
// Set the maximum message size to 2MB.
gossipsub_config.max_transmit_size(2 << 20);
gossipsub_config.message_id_fn(|msg: &gossipsub::Message| {
let s = blake2b_256(&msg.data);
MessageId::from(s)
});
let gossipsub_config = gossipsub_config
.build()
.map_err(|s| ConfigError::InvalidGossipsubConfig(s.to_string()))?;
let mut gossipsub = gossipsub::Behaviour::new(
MessageAuthenticity::Signed(nc.local_key.clone()),
gossipsub_config,
)
.map_err(|s| ConfigError::InvalidGossipsubConfig(s.into()))?;
gossipsub
.with_peer_score(
scoring::build_peer_score_params(membership_topic.clone()),
scoring::build_peer_score_thresholds(),
)
.map_err(ConfigError::InvalidGossipsubConfig)?;
// Subscribe to the topic.
gossipsub.subscribe(&membership_topic)?;
// Don't publish immediately, it's empty. Let the creator call `set_subnet_ids` to trigger initially.
let mut interval = tokio::time::interval(mc.publish_interval);
interval.reset();
// Not passing static subnets here; using pinning below instead so it subscribes as well
let provider_cache = SubnetProviderCache::new(mc.max_subnets, vec![]);
let mut membership = Self {
inner: gossipsub,
outbox: Default::default(),
local_key: nc.local_key,
network_name: nc.network_name,
membership_topic,
subnet_ids: Default::default(),
voting_topics: Default::default(),
preemptive_topics: Default::default(),
provider_cache,
publish_interval: interval,
min_time_between_publish: mc.min_time_between_publish,
last_publish_timestamp: Timestamp::default(),
next_publish_timestamp: Timestamp::now() + mc.publish_interval,
max_provider_age: mc.max_provider_age,
_phantom_vote: PhantomData,
};
for subnet_id in mc.static_subnets {
membership.pin_subnet(subnet_id)?;
}
Ok(membership)
}
fn subscribe(&mut self, topic: &Sha256Topic) -> Result<bool, SubscriptionError> {
info!("subscribing to ${topic}");
self.inner.subscribe(topic)
}
fn unsubscribe(&mut self, topic: &Sha256Topic) -> Result<bool, PublishError> {
info!("unsubscribing from ${topic}");
self.inner.unsubscribe(topic)
}
/// Construct the topic used to gossip about pre-emptively published data.
///
/// Replaces "/" with "_" to avoid clashes from prefix/suffix overlap.
fn preemptive_topic(&self, subnet_id: &SubnetID) -> Sha256Topic {
Topic::new(format!(
"{}/{}/{}",
PUBSUB_PREEMPTIVE,
self.network_name.replace('/', "_"),
subnet_id.to_string().replace('/', "_")
))
}
/// Subscribe to a preemptive topic.
fn preemptive_subscribe(&mut self, subnet_id: SubnetID) -> Result<(), SubscriptionError> {
let topic = self.preemptive_topic(&subnet_id);
self.subscribe(&topic)?;
self.preemptive_topics.insert(topic.hash(), subnet_id);
Ok(())
}
/// Subscribe to a preemptive topic.
fn preemptive_unsubscribe(&mut self, subnet_id: &SubnetID) -> anyhow::Result<()> {
let topic = self.preemptive_topic(subnet_id);
self.unsubscribe(&topic)?;
self.preemptive_topics.remove(&topic.hash());
Ok(())
}
/// Construct the topic used to gossip about votes.
///
/// Replaces "/" with "_" to avoid clashes from prefix/suffix overlap.
fn voting_topic(&self, subnet_id: &SubnetID) -> Sha256Topic {
Topic::new(format!(
"{}/{}/{}",
PUBSUB_VOTES,
self.network_name.replace('/', "_"),
subnet_id.to_string().replace('/', "_")
))
}
/// Subscribe to a voting topic.
fn voting_subscribe(&mut self, subnet_id: &SubnetID) -> Result<(), SubscriptionError> {
let topic = self.voting_topic(subnet_id);
self.subscribe(&topic)?;
self.voting_topics.insert(topic.hash());
Ok(())
}
/// Unsubscribe from a voting topic.
fn voting_unsubscribe(&mut self, subnet_id: &SubnetID) -> anyhow::Result<()> {
let topic = self.voting_topic(subnet_id);
self.unsubscribe(&topic)?;
self.voting_topics.remove(&topic.hash());
Ok(())
}
/// Set all the currently supported subnet IDs, then publish the updated list.
pub fn set_provided_subnets(&mut self, subnet_ids: Vec<SubnetID>) -> anyhow::Result<()> {
let old_subnet_ids = std::mem::take(&mut self.subnet_ids);
// Unsubscribe from removed.
for subnet_id in old_subnet_ids.iter() {
if !subnet_ids.contains(subnet_id) {
self.voting_unsubscribe(subnet_id)?;
}
}
// Subscribe to added.
for subnet_id in subnet_ids.iter() {
if !old_subnet_ids.contains(subnet_id) {
self.voting_subscribe(subnet_id)?;
}
}
self.subnet_ids = subnet_ids;
self.publish_membership()
}
/// Add a subnet to the list of supported subnets, then publish the updated list.
pub fn add_provided_subnet(&mut self, subnet_id: SubnetID) -> anyhow::Result<()> {
if self.subnet_ids.contains(&subnet_id) {
return Ok(());
}
self.voting_subscribe(&subnet_id)?;
self.subnet_ids.push(subnet_id);
self.publish_membership()
}
/// Remove a subnet from the list of supported subnets, then publish the updated list.
pub fn remove_provided_subnet(&mut self, subnet_id: SubnetID) -> anyhow::Result<()> {
if !self.subnet_ids.contains(&subnet_id) {
return Ok(());
}
self.voting_unsubscribe(&subnet_id)?;
self.subnet_ids.retain(|id| id != &subnet_id);
self.publish_membership()
}
/// Make sure a subnet is not pruned, so we always track its providers.
/// Also subscribe to pre-emptively published blocks of data.
///
/// This method could be called in a parent subnet when the ledger indicates
/// there is a known child subnet, so we make sure this subnet cannot be
/// crowded out during the initial phase of bootstrapping the network.
pub fn pin_subnet(&mut self, subnet_id: SubnetID) -> Result<(), SubscriptionError> {
self.preemptive_subscribe(subnet_id.clone())?;
self.provider_cache.pin_subnet(subnet_id);
Ok(())
}
/// Make a subnet pruneable and unsubscribe from pre-emptive data.
pub fn unpin_subnet(&mut self, subnet_id: &SubnetID) -> anyhow::Result<()> {
self.preemptive_unsubscribe(subnet_id)?;
self.provider_cache.unpin_subnet(subnet_id);
Ok(())
}
/// Send a message through Gossipsub to let everyone know about the current configuration.
fn publish_membership(&mut self) -> anyhow::Result<()> {
let record = ProviderRecord::signed(&self.local_key, self.subnet_ids.clone())?;
let data = record.into_envelope().into_protobuf_encoding();
debug!(
"publishing membership in {:?} to {}",
self.subnet_ids, self.membership_topic
);
match self.inner.publish(self.membership_topic.clone(), data) {
Err(e) => {
stats::MEMBERSHIP_PUBLISH_FAILURE.inc();
Err(anyhow!(e))
}
Ok(_msg_id) => {
stats::MEMBERSHIP_PUBLISH_SUCCESS.inc();
self.last_publish_timestamp = Timestamp::now();
self.next_publish_timestamp =
self.last_publish_timestamp + self.publish_interval.period();
self.publish_interval.reset(); // In case the change wasn't tiggered by the schedule.
Ok(())
}
}
}
/// Publish the vote of the validator running the agent about a CID to a subnet.
pub fn publish_vote(&mut self, vote: SignedVoteRecord<V>) -> anyhow::Result<()> {
let topic = self.voting_topic(&vote.record().subnet_id);
let data = vote.into_envelope().into_protobuf_encoding();
match self.inner.publish(topic, data) {
Err(e) => {
stats::MEMBERSHIP_PUBLISH_FAILURE.inc();
Err(anyhow!(e))
}
Ok(_msg_id) => {
stats::MEMBERSHIP_PUBLISH_SUCCESS.inc();
Ok(())
}
}
}
/// Publish arbitrary data to the pre-emptive topic of a subnet.
///
/// We are not expected to be subscribed to this topic, only agents on the parent subnet are.
pub fn publish_preemptive(&mut self, subnet_id: SubnetID, data: Vec<u8>) -> anyhow::Result<()> {
let topic = self.preemptive_topic(&subnet_id);
match self.inner.publish(topic, data) {
Err(e) => {
stats::MEMBERSHIP_PUBLISH_FAILURE.inc();
Err(anyhow!(e))
}
Ok(_msg_id) => {
stats::MEMBERSHIP_PUBLISH_SUCCESS.inc();
Ok(())
}
}
}
/// Mark a peer as routable in the cache.
///
/// Call this method when the discovery service learns the address of a peer.
pub fn set_routable(&mut self, peer_id: PeerId) {
self.provider_cache.set_routable(peer_id);
stats::MEMBERSHIP_ROUTABLE_PEERS
.set(self.provider_cache.num_routable().try_into().unwrap());
self.publish_for_new_peer(peer_id);
}
/// Mark a peer as unroutable in the cache.
///
/// Call this method when the discovery service forgets the address of a peer.
pub fn set_unroutable(&mut self, peer_id: PeerId) {
self.provider_cache.set_unroutable(peer_id);
self.outbox.push_back(Event::Removed(peer_id))
}
/// List the current providers of a subnet.
///
/// Call this method when looking for a peer to resolve content from.
pub fn providers_of_subnet(&self, subnet_id: &SubnetID) -> Vec<PeerId> {
self.provider_cache.providers_of_subnet(subnet_id)
}
/// Parse and handle a [`gossipsub::Message`]. If it's from the expected topic,
/// then raise domain event to let the rest of the application know about a
/// provider. Also update all the book keeping in the behaviour that we use
/// to answer future queries about the topic.
fn handle_message(&mut self, msg: gossipsub::Message) {
if msg.topic == self.membership_topic.hash() {
match SignedProviderRecord::from_bytes(&msg.data).map(|r| r.into_record()) {
Ok(record) => self.handle_provider_record(record),
Err(e) => {
stats::MEMBERSHIP_INVALID_MESSAGE.inc();
warn!(
"Gossip message from peer {:?} could not be deserialized as ProviderRecord: {e}",
msg.source
);
}
}
} else if self.voting_topics.contains(&msg.topic) {
match SignedVoteRecord::from_bytes(&msg.data).map(|r| r.into_record()) {
Ok(record) => self.handle_vote_record(record),
Err(e) => {
stats::MEMBERSHIP_INVALID_MESSAGE.inc();
warn!(
"Gossip message from peer {:?} could not be deserialized as VoteRecord: {e}",
msg.source
);
}
}
} else if let Some(subnet_id) = self.preemptive_topics.get(&msg.topic) {
self.handle_preemptive_data(subnet_id.clone(), msg.data)
} else {
stats::MEMBERSHIP_UNKNOWN_TOPIC.inc();
warn!(
"unknown gossipsub topic in message from {:?}: {}",
msg.source, msg.topic
);
}
}
/// Try to add a provider record to the cache.
///
/// If this is the first time we receive a record from the peer,
/// reciprocate by publishing our own.
fn handle_provider_record(&mut self, record: ProviderRecord) {
debug!("received provider record: {record:?}");
let (event, publish) = match self.provider_cache.add_provider(&record) {
None => {
stats::MEMBERSHIP_SKIPPED_PEERS.inc();
(Some(Event::Skipped(record.peer_id)), false)
}
Some(d) if d.is_empty() && !d.is_new => (None, false),
Some(d) => {
let publish = d.is_new;
(Some(Event::Updated(record.peer_id, d)), publish)
}
};
if let Some(event) = event {
self.outbox.push_back(event);
}
if publish {
stats::MEMBERSHIP_PROVIDER_PEERS.inc();
self.publish_for_new_peer(record.peer_id)
}
}
/// Raise an event to tell we received a new vote.
fn handle_vote_record(&mut self, record: VoteRecord<V>) {
self.outbox.push_back(Event::ReceivedVote(Box::new(record)))
}
fn handle_preemptive_data(&mut self, subnet_id: SubnetID, data: Vec<u8>) {
self.outbox
.push_back(Event::ReceivedPreemptive(subnet_id, data))
}
/// Handle new subscribers to the membership topic.
fn handle_subscriber(&mut self, peer_id: PeerId, topic: TopicHash) {
if topic == self.membership_topic.hash() {
self.publish_for_new_peer(peer_id)
}
}
/// Publish our provider record when we encounter a new peer, unless we have recently done so.
fn publish_for_new_peer(&mut self, peer_id: PeerId) {
if self.subnet_ids.is_empty() {
// We have nothing, so there's no need for them to know this ASAP.
// The reason we shouldn't disable periodic publishing of empty
// records completely is because it would also remove one of
// triggers for non-connected peers to eagerly publish their
// subnets when they see our empty records. Plus they could
// be good to show on metrics, to have a single source of
// the cluster size available on any node.
return;
}
let now = Timestamp::now();
if self.last_publish_timestamp > now - self.min_time_between_publish {
debug!("recently published, not publishing again for peer {peer_id}");
} else if self.next_publish_timestamp <= now + self.min_time_between_publish {
debug!("publishing soon for new peer {peer_id}"); // don't let new joiners delay it forever by hitting the next block
} else {
debug!("publishing for new peer {peer_id}");
// Create a new timer, rather than publish and reset. This way we don't repeat error handling.
// Give some time for Kademlia and Identify to do their bit on both sides. Works better in tests.
let delayed = Instant::now() + self.min_time_between_publish;
self.next_publish_timestamp = now + self.min_time_between_publish;
self.publish_interval =
tokio::time::interval_at(delayed, self.publish_interval.period())
}
}
/// Remove any membership record that hasn't been updated for a long time.
fn prune_membership(&mut self) {
let cutoff_timestamp = Timestamp::now() - self.max_provider_age;
let pruned = self.provider_cache.prune_providers(cutoff_timestamp);
for peer_id in pruned {
stats::MEMBERSHIP_PROVIDER_PEERS.dec();
self.outbox.push_back(Event::Removed(peer_id))
}
}
}
impl<V> NetworkBehaviour for Behaviour<V>
where
V: Serialize + DeserializeOwned + Send + 'static,
{
type ConnectionHandler = <gossipsub::Behaviour as NetworkBehaviour>::ConnectionHandler;
type ToSwarm = Event<V>;
fn on_swarm_event(&mut self, event: FromSwarm) {
self.inner.on_swarm_event(event)
}
fn on_connection_handler_event(
&mut self,
peer_id: PeerId,
connection_id: ConnectionId,
event: THandlerOutEvent<Self>,
) {
self.inner
.on_connection_handler_event(peer_id, connection_id, event)
}
fn handle_pending_inbound_connection(
&mut self,
connection_id: ConnectionId,
local_addr: &Multiaddr,
remote_addr: &Multiaddr,
) -> Result<(), ConnectionDenied> {
self.inner
.handle_pending_inbound_connection(connection_id, local_addr, remote_addr)
}
fn handle_established_inbound_connection(
&mut self,
connection_id: ConnectionId,
peer: PeerId,
local_addr: &Multiaddr,
remote_addr: &Multiaddr,
) -> Result<THandler<Self>, ConnectionDenied> {
self.inner.handle_established_inbound_connection(
connection_id,
peer,
local_addr,
remote_addr,
)
}
fn handle_pending_outbound_connection(
&mut self,
connection_id: ConnectionId,
maybe_peer: Option<PeerId>,
addresses: &[Multiaddr],
effective_role: Endpoint,
) -> Result<Vec<Multiaddr>, ConnectionDenied> {
self.inner.handle_pending_outbound_connection(
connection_id,
maybe_peer,
addresses,
effective_role,
)
}
fn handle_established_outbound_connection(
&mut self,
connection_id: ConnectionId,
peer: PeerId,
addr: &Multiaddr,
role_override: Endpoint,
) -> Result<THandler<Self>, ConnectionDenied> {
self.inner
.handle_established_outbound_connection(connection_id, peer, addr, role_override)
}
fn poll(
&mut self,
cx: &mut Context<'_>,
) -> Poll<ToSwarm<Self::ToSwarm, THandlerInEvent<Self>>> {
// Emit own events first.
if let Some(ev) = self.outbox.pop_front() {
return Poll::Ready(ToSwarm::GenerateEvent(ev));
}
// Republish our current peer record snapshot and prune old records.
if self.publish_interval.poll_tick(cx).is_ready() {
if let Err(e) = self.publish_membership() {
warn!("failed to publish membership: {e}")
};
self.prune_membership();
}
// Poll Gossipsub for events; this is where we can handle Gossipsub messages and
// store the associations from peers to subnets.
while let Poll::Ready(ev) = self.inner.poll(cx) {
match ev {
ToSwarm::GenerateEvent(ev) => {
match ev {
// NOTE: We could (ab)use the Gossipsub mechanism itself to signal subnet membership,
// however I think the information would only spread to our nearest neighbours we are
// connected to. If we assume there are hundreds of agents in each subnet which may
// or may not overlap, and each agent is connected to ~50 other agents, then the chance
// that there are subnets from which there are no or just a few connections is not
// insignificant. For this reason I opted to use messages instead, and let the content
// carry the information, spreading through the Gossipsub network regardless of the
// number of connected peers.
gossipsub::Event::Subscribed { peer_id, topic } => {
self.handle_subscriber(peer_id, topic)
}
gossipsub::Event::Unsubscribed { .. } => {}
// Log potential misconfiguration.
gossipsub::Event::GossipsubNotSupported { peer_id } => {
debug!("peer {peer_id} doesn't support gossipsub");
}
gossipsub::Event::Message { message, .. } => {
self.handle_message(message);
}
}
}
other => {
return Poll::Ready(other.map_out(|_| unreachable!("already handled")));
}
}
}
Poll::Pending
}
}
// Forest has Filecoin specific values copied from Lotus. Not sure what values to use,
// so I'll leave everything on default for now. Or maybe they should be left empty?
mod scoring {
use libp2p::gossipsub::{IdentTopic, PeerScoreParams, PeerScoreThresholds, TopicScoreParams};
pub fn build_peer_score_params(membership_topic: IdentTopic) -> PeerScoreParams {
let mut params = PeerScoreParams::default();
params
.topics
.insert(membership_topic.hash(), TopicScoreParams::default());
params
}
pub fn build_peer_score_thresholds() -> PeerScoreThresholds {
PeerScoreThresholds::default()
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipld/resolver/tests/smoke.rs | ipld/resolver/tests/smoke.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
//! Test that a cluster of IPLD resolver can be started in memory,
//! that they bootstrap from each other and are able to resolve CIDs.
//!
//! Run the tests as follows:
//! ```ignore
//! RUST_LOG=debug cargo test -p ipc_ipld_resolver --test smoke resolve
//! ```
// For inspiration on testing libp2p look at:
// * https://github.com/libp2p/rust-libp2p/blob/v0.50.0/misc/multistream-select/tests/transport.rs
// * https://github.com/libp2p/rust-libp2p/blob/v0.50.0/protocols/ping/tests/ping.rs
// * https://github.com/libp2p/rust-libp2p/blob/v0.50.0/protocols/gossipsub/tests/smoke.rs
// They all use a different combination of `MemoryTransport` and executors.
// These tests attempt to use `MemoryTransport` so it's quicker, with `Swarm::with_tokio_executor`
// so we can leave the polling to the `Service` running in a `Task`, rather than do it from the test
// (although these might be orthogonal).
use std::{
sync::atomic::{AtomicU64, Ordering},
time::Duration,
};
use anyhow::anyhow;
use cid::Cid;
use fvm_ipld_encoding::IPLD_RAW;
use fvm_ipld_hamt::Hamt;
use fvm_shared::{address::Address, ActorID};
use ipc_api::subnet_id::SubnetID;
use ipc_ipld_resolver::{
Client, Config, ConnectionConfig, ContentConfig, DiscoveryConfig, Event, MembershipConfig,
NetworkConfig, Resolver, Service, VoteRecord,
};
use libp2p::{
core::{
muxing::StreamMuxerBox,
transport::{Boxed, MemoryTransport},
},
identity::Keypair,
multiaddr::Protocol,
plaintext, yamux, Multiaddr, PeerId, Transport,
};
use multihash::{Code, MultihashDigest};
use rand::{rngs::StdRng, Rng, SeedableRng};
mod store;
use serde::{Deserialize, Serialize};
use store::*;
use tokio::{sync::broadcast, time::timeout};
const BIT_WIDTH: u32 = 8;
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)]
struct TestVote(Cid);
struct Agent {
config: Config,
client: Client<TestVote>,
events: broadcast::Receiver<Event<TestVote>>,
store: TestBlockstore,
}
struct Cluster {
agents: Vec<Agent>,
}
impl Cluster {
pub fn size(&self) -> usize {
self.agents.len()
}
/// Wait until the cluster is formed,
/// ie. nodes discover each other through their bootstrap.
pub async fn await_connect(&self) {
// Wait a little for the cluster to connect.
// TODO: Wait on some condition instead of sleep.
tokio::time::sleep(Duration::from_secs(1)).await;
}
}
struct ClusterBuilder {
size: u32,
rng: StdRng,
services: Vec<Service<TestStoreParams, TestVote>>,
agents: Vec<Agent>,
}
impl ClusterBuilder {
fn new(size: u32) -> Self {
// Each port has to be unique, so each test must use a different seed.
// This is shared between all instances.
static COUNTER: AtomicU64 = AtomicU64::new(0);
let seed = COUNTER.fetch_add(1, Ordering::Relaxed);
Self::new_with_seed(size, seed)
}
fn new_with_seed(size: u32, seed: u64) -> Self {
Self {
size,
rng: rand::rngs::StdRng::seed_from_u64(seed),
services: Default::default(),
agents: Default::default(),
}
}
/// Add a node with randomized address, optionally bootstrapping from an existing node.
fn add_node(&mut self, bootstrap: Option<usize>) {
let bootstrap_addr = bootstrap.map(|i| {
let config = &self.agents[i].config;
let peer_id = config.network.local_peer_id();
let mut addr = config.connection.listen_addr.clone();
addr.push(Protocol::P2p(peer_id));
addr
});
let config = make_config(&mut self.rng, self.size, bootstrap_addr);
let (service, store) = make_service(config.clone());
let client = service.client();
let events = service.subscribe();
self.services.push(service);
self.agents.push(Agent {
config,
client,
events,
store,
});
}
/// Start running all services
fn run(self) -> Cluster {
for service in self.services {
tokio::task::spawn(async move { service.run().await.expect("error running service") });
}
Cluster {
agents: self.agents,
}
}
}
/// Run the tests with `RUST_LOG=debug` to see the logs, for example:
///
/// ```text
/// RUST_LOG=debug cargo test -p ipc_ipld_resolver --test smoke resolve
/// ```
fn init_log() {
// This line means the test runner will buffer the logs (if `RUST_LOG` is on)
// and print them after any failure. With `-- --nocapture` we see them as we go.
let _ = env_logger::builder().is_test(true).try_init();
// Alternatively with this we see them printed to the console regardless of outcome:
//env_logger::init();
}
/// Start a cluster of agents from a single bootstrap node,
/// make available some content on one agent and resolve it from another.
#[tokio::test]
async fn single_bootstrap_single_provider_resolve_one() {
init_log();
// Choose agents.
let cluster_size = 3;
let bootstrap_idx = 0;
let provider_idx = 1;
let resolver_idx = 2;
let mut cluster = make_cluster_with_bootstrap(cluster_size, bootstrap_idx).await;
// Insert a CID of a complex recursive data structure.
let cid = insert_test_data(&mut cluster.agents[provider_idx]).expect("failed to insert data");
// Sanity check that we can read the data back.
check_test_data(&mut cluster.agents[provider_idx], &cid).expect("failed to read back the data");
// Announce the support of some subnet.
let subnet_id = make_subnet_id(1001);
cluster.agents[provider_idx]
.client
.add_provided_subnet(subnet_id.clone())
.expect("failed to add provided subnet");
// Wait a little for the gossip to spread and peer lookups to happen, then another round of gossip.
// TODO: Wait on some condition instead of sleep.
tokio::time::sleep(Duration::from_secs(3)).await;
// Ask for the CID to be resolved from another peer.
tokio::time::timeout(
Duration::from_secs(3),
cluster.agents[resolver_idx]
.client
.resolve(cid, subnet_id.clone()),
)
.await
.expect("timeout resolving content")
.expect("failed to send request")
.expect("failed to resolve content");
// Check that the CID is deposited into the store of the requestor.
check_test_data(&mut cluster.agents[resolver_idx], &cid).expect("failed to resolve from store");
}
/// Start two agents, subscribe to the same subnet, publish and receive a vote.
#[tokio::test]
async fn single_bootstrap_publish_receive_vote() {
init_log();
let mut cluster = make_cluster_with_bootstrap(2, 0).await;
// Announce the support of some subnet.
let subnet_id = make_subnet_id(1001);
for i in 0..cluster.size() {
cluster.agents[i]
.client
.add_provided_subnet(subnet_id.clone())
.expect("failed to add provided subnet");
}
// Wait a little for the gossip to spread and peer lookups to happen, then another round of gossip.
// TODO: Wait on some condition instead of sleep.
tokio::time::sleep(Duration::from_secs(2)).await;
// Vote on some random CID.
let validator_key = Keypair::generate_secp256k1();
let cid = Cid::new_v1(IPLD_RAW, Code::Sha2_256.digest(b"foo"));
let vote =
VoteRecord::signed(&validator_key, subnet_id, TestVote(cid)).expect("failed to sign vote");
// Pubilish vote
cluster.agents[0]
.client
.publish_vote(vote.clone())
.expect("failed to send vote");
// Receive vote.
let event = timeout(Duration::from_secs(2), cluster.agents[1].events.recv())
.await
.expect("timeout receiving vote")
.expect("error receiving vote");
if let Event::ReceivedVote(v) = event {
assert_eq!(&*v, vote.record());
} else {
panic!("unexpected {event:?}")
}
}
/// Start two agents, pin a subnet, publish preemptively and receive.
#[tokio::test]
async fn single_bootstrap_publish_receive_preemptive() {
init_log();
let mut cluster = make_cluster_with_bootstrap(2, 0).await;
// Pin a subnet on the bootstrap node.
let subnet_id = make_subnet_id(1001);
cluster.agents[0]
.client
.pin_subnet(subnet_id.clone())
.expect("failed to pin subnet");
// TODO: Wait on some condition instead of sleep.
tokio::time::sleep(Duration::from_secs(1)).await;
// Publish some content from the other agent.
let data = vec![1, 2, 3];
cluster.agents[1]
.client
.publish_preemptive(subnet_id.clone(), data.clone())
.expect("failed to send vote");
// Receive pre-emptive data..
let event = timeout(Duration::from_secs(2), cluster.agents[0].events.recv())
.await
.expect("timeout receiving data")
.expect("error receiving data");
if let Event::ReceivedPreemptive(s, d) = event {
assert_eq!(s, subnet_id);
assert_eq!(d, data);
} else {
panic!("unexpected {event:?}")
}
}
#[tokio::test]
async fn can_register_metrics() {
let mut rng = rand::rngs::StdRng::seed_from_u64(0);
let config = make_config(&mut rng, 1, None);
let (mut service, _) = make_service(config);
let registry = prometheus::Registry::new();
service.register_metrics(®istry).unwrap();
}
async fn make_cluster_with_bootstrap(cluster_size: u32, bootstrap_idx: usize) -> Cluster {
// TODO: Get the seed from QuickCheck
let mut builder = ClusterBuilder::new(cluster_size);
// Build a cluster of nodes.
for i in 0..builder.size {
builder.add_node(if i == 0 { None } else { Some(bootstrap_idx) });
}
// Start the swarms.
let cluster = builder.run();
cluster.await_connect().await;
cluster
}
fn make_service(config: Config) -> (Service<TestStoreParams, TestVote>, TestBlockstore) {
let store = TestBlockstore::default();
let svc = Service::new_with_transport(config, store.clone(), build_transport).unwrap();
(svc, store)
}
fn make_config(rng: &mut StdRng, cluster_size: u32, bootstrap_addr: Option<Multiaddr>) -> Config {
let config = Config {
connection: ConnectionConfig {
listen_addr: Multiaddr::from(Protocol::Memory(rng.gen::<u64>())),
external_addresses: vec![],
expected_peer_count: cluster_size,
max_incoming: cluster_size,
max_peers_per_query: cluster_size,
event_buffer_capacity: cluster_size,
},
network: NetworkConfig {
local_key: Keypair::generate_secp256k1(),
network_name: "smoke-test".to_owned(),
},
discovery: DiscoveryConfig {
static_addresses: bootstrap_addr.iter().cloned().collect(),
target_connections: cluster_size.try_into().unwrap(),
enable_kademlia: true,
},
membership: MembershipConfig {
static_subnets: vec![],
max_subnets: 10,
publish_interval: Duration::from_secs(5),
min_time_between_publish: Duration::from_secs(1),
max_provider_age: Duration::from_secs(60),
},
content: ContentConfig {
rate_limit_bytes: 1 << 20,
rate_limit_period: Duration::from_secs(60),
},
};
config
}
/// Builds an in-memory transport for libp2p to communicate over.
fn build_transport(local_key: Keypair) -> Boxed<(PeerId, StreamMuxerBox)> {
let auth_config = plaintext::Config::new(&local_key);
let mplex_config = {
let mut mplex_config = libp2p_mplex::MplexConfig::new();
mplex_config.set_max_buffer_size(usize::MAX);
let yamux_config = yamux::Config::default();
// yamux_config.set_receive_window_size(16 * 1024 * 1024);
// yamux_config.set_max_buffer_size(16 * 1024 * 1024);
// yamux_config.set_window_update_mode(WindowUpdateMode::OnRead);
libp2p::core::upgrade::SelectUpgrade::new(yamux_config, mplex_config)
};
MemoryTransport::default()
.upgrade(libp2p::core::upgrade::Version::V1)
.authenticate(auth_config)
.multiplex(mplex_config)
.boxed()
}
/// Make a subnet under a rootnet.
fn make_subnet_id(actor_id: ActorID) -> SubnetID {
let act = Address::new_id(actor_id);
let root = SubnetID::new_root(0);
SubnetID::new_from_parent(&root, act)
}
/// Number of keys to insert into the test HAMT.
/// By default it's 8 bit wise, which means 2**8 = 256 values fit into a node before it grows.
///
/// XXX: At 1000 keys this doesn't work at the moment, the bitswap messages go for a while,
/// but don't reach completion. Setting it to a lower number now to unblock other tasks and
/// will investigate further as a separate issue.
const KEY_COUNT: u32 = 500;
/// Insert a HAMT into the block store of an agent.
fn insert_test_data(agent: &mut Agent) -> anyhow::Result<Cid> {
let mut hamt: Hamt<_, String, u32> = Hamt::new_with_bit_width(&agent.store, BIT_WIDTH);
// Insert enough data into the HAMT to make sure it grows from a single `Node`.
for i in 0..KEY_COUNT {
hamt.set(i, format!("value {i}"))?;
}
let cid = hamt.flush()?;
Ok(cid)
}
fn check_test_data(agent: &mut Agent, cid: &Cid) -> anyhow::Result<()> {
let hamt: Hamt<_, String, u32> = Hamt::load_with_bit_width(cid, &agent.store, BIT_WIDTH)?;
// Check all the data inserted by `insert_test_data`.
for i in 0..KEY_COUNT {
match hamt.get(&i)? {
None => return Err(anyhow!("key {i} is missing")),
Some(v) if *v != format!("value {i}") => return Err(anyhow!("unexpected value: {v}")),
_ => {}
}
}
Ok(())
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/ipld/resolver/tests/store/mod.rs | ipld/resolver/tests/store/mod.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: MIT
use std::{
collections::HashMap,
sync::{Arc, RwLock},
};
use anyhow::Result;
use fvm_ipld_blockstore::Blockstore;
use ipc_ipld_resolver::missing_blocks::missing_blocks;
use libipld::Cid;
use libp2p_bitswap::BitswapStore;
#[derive(Debug, Clone, Default)]
pub struct TestBlockstore {
blocks: Arc<RwLock<HashMap<Cid, Vec<u8>>>>,
}
impl Blockstore for TestBlockstore {
fn has(&self, k: &Cid) -> Result<bool> {
Ok(self.blocks.read().unwrap().contains_key(k))
}
fn get(&self, k: &Cid) -> Result<Option<Vec<u8>>> {
Ok(self.blocks.read().unwrap().get(k).cloned())
}
fn put_keyed(&self, k: &Cid, block: &[u8]) -> Result<()> {
self.blocks.write().unwrap().insert(*k, block.into());
Ok(())
}
}
pub type TestStoreParams = libipld::DefaultParams;
impl BitswapStore for TestBlockstore {
type Params = TestStoreParams;
fn contains(&mut self, cid: &Cid) -> Result<bool> {
Blockstore::has(self, cid)
}
fn get(&mut self, cid: &Cid) -> Result<Option<Vec<u8>>> {
Blockstore::get(self, cid)
}
fn insert(&mut self, block: &libipld::Block<Self::Params>) -> Result<()> {
Blockstore::put_keyed(self, block.cid(), block.data())
}
fn missing_blocks(&mut self, cid: &Cid) -> Result<Vec<Cid>> {
missing_blocks::<Self, Self::Params>(self, cid)
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/storage/src/lib.rs | fendermint/storage/src/lib.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::borrow::Cow;
use std::error::Error;
use std::hash::Hash;
use std::marker::PhantomData;
/// In-memory KV store backend.
#[cfg(feature = "inmem")]
pub mod im;
/// Common test utilities.
#[cfg(feature = "testing")]
#[allow(dead_code)]
pub mod testing;
/// Possible errors during key-value operations.
#[derive(Debug, thiserror::Error)]
pub enum KVError {
#[error("operation failed because there was a write conflict")]
Conflict,
#[error("transaction was aborted due to some business rule violation: {0}")]
Abort(Box<dyn Error + Send + Sync>),
#[error("data serialization error: {0}")]
Codec(Box<dyn Error + Send + Sync>),
#[error("unexpected error: {0}")]
Unexpected(Box<dyn Error + Send + Sync>),
}
pub type KVResult<T> = Result<T, KVError>;
/// Helper trait to reduce the number of generic parameters that infect anything
/// that has to use a KV store. It's a type family of all customizable types
/// that can vary by KV store implementation.
pub trait KVStore {
/// Type specifying in which collection to store some homogenous data set.
type Namespace: Clone + Hash + Eq;
/// The type used for storing data at rest, e.g. in binary format or JSON.
type Repr: Clone;
}
/// Encode data as binary with a serialization scheme.
pub trait Encode<T>
where
Self: KVStore,
{
fn to_repr(value: &T) -> KVResult<Cow<Self::Repr>>;
}
/// Decode data from binary with a serialization scheme.
pub trait Decode<T>
where
Self: KVStore,
{
fn from_repr(repr: &Self::Repr) -> KVResult<T>;
}
/// Encode and decode data.
///
/// Ideally this would be just a trait alias, but that's an unstable feature.
pub trait Codec<T>: Encode<T> + Decode<T> {}
/// Operations available on a read transaction.
pub trait KVRead<S: KVStore> {
fn get<K, V>(&self, ns: &S::Namespace, k: &K) -> KVResult<Option<V>>
where
S: Encode<K> + Decode<V>;
/// Iterate items in the namespace ordered by their representation.
///
/// TODO: Add parameters for iteration direction and bounds.
fn iterate<K, V>(&self, ns: &S::Namespace) -> impl Iterator<Item = KVResult<(K, V)>>
where
K: 'static,
V: 'static,
S: Decode<K> + Decode<V>,
<S as KVStore>::Repr: Ord + 'static;
}
/// Operations available on a write transaction.
pub trait KVWrite<S: KVStore>: KVRead<S> {
fn put<K, V>(&mut self, ns: &S::Namespace, k: &K, v: &V) -> KVResult<()>
where
S: Encode<K> + Encode<V>;
fn delete<K>(&mut self, ns: &S::Namespace, k: &K) -> KVResult<()>
where
S: Encode<K>;
}
/// Transaction running on a KV store, ending with a commit or a rollback.
/// This mimics the `Aux` interface in the STM module.
pub trait KVTransaction {
/// Abandon the changes of the transaction.
fn rollback(self) -> KVResult<()>;
/// Check for write conflicts, then commit the changes.
///
/// Returns `KVError::Conflict` if the commit failed due to some keys
/// having changed during the transaction.
fn commit(self) -> KVResult<()>;
}
/// Interface for stores that support read-only transactions.
///
/// Any resources held by the read transaction should be released when it's dropped.
pub trait KVReadable<S: KVStore> {
type Tx<'a>: KVRead<S>
where
Self: 'a;
/// Start a read-only transaction.
fn read(&self) -> Self::Tx<'_>;
}
/// Interface for stores that support read-write transactions.
pub trait KVWritable<S: KVStore> {
type Tx<'a>: KVWrite<S> + KVTransaction
where
Self: 'a;
/// Start a read-write tranasction.
fn write(&self) -> Self::Tx<'_>;
/// Start a read-write transaction, use it, then commit.
fn with_write<F, T>(&self, f: F) -> KVResult<T>
where
F: FnOnce(&mut Self::Tx<'_>) -> KVResult<T>,
{
let mut tx = self.write();
let res = f(&mut tx)?;
tx.commit()?;
Ok(res)
}
}
/// A collection of homogenous objects under the same namespace.
#[derive(Clone)]
pub struct KVCollection<S: KVStore, K, V> {
ns: S::Namespace,
phantom_k: PhantomData<K>,
phantom_v: PhantomData<V>,
}
impl<S: KVStore, K, V> KVCollection<S, K, V>
where
S: Encode<K> + Encode<V> + Decode<V>,
{
pub fn new(ns: S::Namespace) -> Self {
Self {
ns,
phantom_k: PhantomData,
phantom_v: PhantomData,
}
}
pub fn get(&self, kv: &impl KVRead<S>, k: &K) -> KVResult<Option<V>> {
kv.get(&self.ns, k)
}
pub fn put(&self, kv: &mut impl KVWrite<S>, k: &K, v: &V) -> KVResult<()> {
kv.put(&self.ns, k, v)
}
pub fn delete(&self, kv: &mut impl KVWrite<S>, k: &K) -> KVResult<()> {
kv.delete(&self.ns, k)
}
pub fn iterate<'a, 'b>(
&'a self,
kv: &'b impl KVRead<S>,
) -> impl Iterator<Item = KVResult<(K, V)>> + 'b
where
S::Repr: Ord + 'static,
S: Decode<K>,
K: 'static,
V: 'static,
'a: 'b,
{
kv.iterate::<K, V>(&self.ns)
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/storage/src/im.rs | fendermint/storage/src/im.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::{
hash::Hash,
marker::PhantomData,
mem,
sync::{Arc, Mutex, MutexGuard},
thread,
};
use crate::{
Decode, Encode, KVError, KVRead, KVReadable, KVResult, KVStore, KVTransaction, KVWritable,
KVWrite,
};
/// Read-only mode.
pub struct Read;
/// Read-write mode.
pub struct Write;
/// Immutable data multimap.
type IDataMap<S> = im::HashMap<
<S as KVStore>::Namespace,
im::HashMap<<S as KVStore>::Repr, Arc<<S as KVStore>::Repr>>,
>;
/// Given some `KVStore` type, the `InMemoryBackend` can be used to
/// emulate the same interface, but keep all the data in memory.
/// This can facilitate unit tests, but can be used for transient
/// storage as well, although STM has more granular access, and
/// the performance of this thing is likely to be terrible.
///
/// By default it serializes write transactions, which is required
/// for its correctness, but it can be disabled for the sake of
/// testing, or if writes only happen from the same task and
/// never concurrently.
///
/// Alternatively we could change the transaction implementation
/// to track individual puts/deletes and apply them in batch
/// at commit time. In that case if puts are commutative then
/// we could do multiple writes at the same time.
#[derive(Clone)]
pub struct InMemoryBackend<S: KVStore> {
data: Arc<Mutex<IDataMap<S>>>,
write_token: Arc<Mutex<()>>,
lock_writes: bool,
}
impl<S: KVStore> InMemoryBackend<S> {
pub fn new(lock_writes: bool) -> Self {
Self {
data: Arc::new(Mutex::new(Default::default())),
write_token: Arc::new(Mutex::new(())),
lock_writes,
}
}
}
impl<S: KVStore> Default for InMemoryBackend<S> {
fn default() -> Self {
// Locking is the only safe way to use writes from multiple threads.
Self::new(true)
}
}
impl<S: KVStore> KVReadable<S> for InMemoryBackend<S>
where
S::Repr: Hash + Eq,
{
type Tx<'a> = Transaction<'a, S, Read> where Self: 'a;
/// Take a fresh snapshot, to isolate the effects of any further writes
/// to the datastore from this read transaction.
fn read(&self) -> Transaction<S, Read> {
Transaction {
backend: self,
data: self.data.lock().unwrap().clone(),
token: None,
_mode: Read,
}
}
}
impl<S: KVStore> KVWritable<S> for InMemoryBackend<S>
where
S::Repr: Hash + Eq,
{
type Tx<'a>
= Transaction<'a, S, Write>
where
Self: 'a;
/// Take a snapshot to accumulate writes until they are committed.
/// Take a write-lock on the data if necessary, but beware it doesn't work well with STM.
fn write(&self) -> Transaction<S, Write> {
// Take this lock first, otherwise we might be blocking `data` from anyone being able to commit.
let token = if self.lock_writes {
Some(self.write_token.lock().unwrap())
} else {
None
};
Transaction {
backend: self,
data: self.data.lock().unwrap().clone(),
token,
_mode: Write,
}
}
}
/// A transaction that can be read-only with no write lock taken,
/// or read-write, releasing the lock when it goes out of scope.
pub struct Transaction<'a, S: KVStore, M> {
backend: &'a InMemoryBackend<S>,
data: IDataMap<S>,
token: Option<MutexGuard<'a, ()>>,
_mode: M,
}
impl<'a, S: KVStore> KVTransaction for Transaction<'a, S, Write> {
// An exclusive lock has already been taken.
fn commit(mut self) -> KVResult<()> {
let mut guard = self.backend.data.lock().unwrap();
*guard = mem::take(&mut self.data);
mem::take(&mut self.token);
Ok(())
}
fn rollback(mut self) -> KVResult<()> {
mem::take(&mut self.token);
Ok(())
}
}
impl<'a, S: KVStore, M> Drop for Transaction<'a, S, M> {
fn drop(&mut self) {
if self.token.is_some() && !thread::panicking() {
panic!("Transaction prematurely dropped. Must call `.commit()` or `.rollback()`.");
}
}
}
impl<'a, S: KVStore, M> KVRead<S> for Transaction<'a, S, M>
where
S::Repr: Hash + Eq,
{
fn get<K, V>(&self, ns: &S::Namespace, k: &K) -> KVResult<Option<V>>
where
S: Encode<K> + Decode<V>,
{
if let Some(m) = self.data.get(ns) {
let kr = S::to_repr(k)?;
let v = m.get(&kr).map(|v| S::from_repr(v));
return v.transpose();
}
Ok(None)
}
fn iterate<K, V>(&self, ns: &S::Namespace) -> impl Iterator<Item = KVResult<(K, V)>>
where
S: Decode<K> + Decode<V>,
<S as KVStore>::Repr: Ord + 'static,
K: 'static,
V: 'static,
{
if let Some(m) = self.data.get(ns) {
let mut items = m.iter().map(|(k, v)| (k, v.as_ref())).collect::<Vec<_>>();
items.sort_by(|a, b| a.0.cmp(b.0));
KVIter::<S, K, V>::new(items)
} else {
KVIter::empty()
}
}
}
impl<'a, S: KVStore> KVWrite<S> for Transaction<'a, S, Write>
where
S::Repr: Hash + Eq,
{
fn put<K, V>(&mut self, ns: &S::Namespace, k: &K, v: &V) -> KVResult<()>
where
S: Encode<K> + Encode<V>,
{
let mut m = self.data.get(ns).cloned().unwrap_or_default();
let kr = S::to_repr(k)?;
let vr = S::to_repr(v)?;
m.insert(kr.into_owned(), Arc::new(vr.into_owned()));
self.data.insert(ns.clone(), m);
Ok(())
}
fn delete<K>(&mut self, ns: &S::Namespace, k: &K) -> KVResult<()>
where
S: Encode<K>,
{
if let Some(mut m) = self.data.get(ns).cloned() {
let kr = S::to_repr(k)?;
m.remove(&kr);
self.data.insert(ns.clone(), m);
}
Ok(())
}
}
struct KVIter<'a, S: KVStore, K, V> {
items: Vec<(&'a S::Repr, &'a S::Repr)>,
next: usize,
phantom_v: PhantomData<V>,
phantom_k: PhantomData<K>,
}
impl<'a, S, K, V> KVIter<'a, S, K, V>
where
S: KVStore,
{
pub fn new(items: Vec<(&'a S::Repr, &'a S::Repr)>) -> Self {
KVIter {
items,
next: 0,
phantom_v: PhantomData,
phantom_k: PhantomData,
}
}
pub fn empty() -> Self {
Self::new(vec![])
}
}
impl<'a, S, K, V> Iterator for KVIter<'a, S, K, V>
where
S: KVStore + Decode<K> + Decode<V>,
{
type Item = Result<(K, V), KVError>;
fn next(&mut self) -> Option<Self::Item> {
if let Some((k, v)) = self.items.get(self.next) {
self.next += 1;
let kv = S::from_repr(k).and_then(|k| S::from_repr(v).map(|v| (k, v)));
Some(kv)
} else {
None
}
}
}
#[cfg(all(feature = "inmem", test))]
mod tests {
use std::borrow::Cow;
use crate::{im::InMemoryBackend, Codec, Decode, Encode, KVError, KVResult, KVStore};
use quickcheck_macros::quickcheck;
use serde::{de::DeserializeOwned, Serialize};
use crate::testing::*;
#[derive(Clone)]
struct TestKVStore;
impl KVStore for TestKVStore {
type Namespace = TestNamespace;
type Repr = Vec<u8>;
}
impl<T: Serialize> Encode<T> for TestKVStore {
fn to_repr(value: &T) -> KVResult<Cow<Self::Repr>> {
fvm_ipld_encoding::to_vec(value)
.map_err(|e| KVError::Codec(Box::new(e)))
.map(Cow::Owned)
}
}
impl<T: DeserializeOwned> Decode<T> for TestKVStore {
fn from_repr(repr: &Self::Repr) -> KVResult<T> {
fvm_ipld_encoding::from_slice(repr).map_err(|e| KVError::Codec(Box::new(e)))
}
}
impl<T> Codec<T> for TestKVStore where TestKVStore: Encode<T> + Decode<T> {}
#[quickcheck]
fn writable(data: TestData) -> bool {
let backend = InMemoryBackend::<TestKVStore>::default();
check_writable(&backend, data)
}
#[quickcheck]
fn write_isolation(data: TestDataMulti<2>) -> bool {
// XXX: It isn't safe to use this backend without locking writes if writes are concurrent.
// It's just here to try the test on something.
let backend = InMemoryBackend::<TestKVStore>::new(false);
check_write_isolation(&backend, data)
}
#[quickcheck]
fn write_isolation_concurrent(data1: TestData, data2: TestData) -> bool {
let backend = InMemoryBackend::<TestKVStore>::default();
check_write_isolation_concurrent(&backend, data1, data2)
}
#[quickcheck]
fn write_serialization_concurrent(data1: TestData, data2: TestData) -> bool {
let backend = InMemoryBackend::<TestKVStore>::default();
check_write_serialization_concurrent(&backend, data1, data2)
}
#[quickcheck]
fn read_isolation(data: TestData) -> bool {
let backend = InMemoryBackend::<TestKVStore>::default();
check_read_isolation(&backend, data)
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/storage/src/testing.rs | fendermint/storage/src/testing.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use crate::{
Codec, KVCollection, KVError, KVRead, KVReadable, KVStore, KVTransaction, KVWritable, KVWrite,
};
use quickcheck::{Arbitrary, Gen};
use std::collections::HashMap;
use std::fmt::Debug;
use std::hash::Hash;
use std::thread;
/// We'll see how this works out. We would have to wrap any KVStore
/// with something that can handle strings as namespaces.
pub type TestNamespace = &'static str;
/// Return all namespaces used by the tests, so they can be pre-allocated, if necessary.
pub fn test_namespaces() -> &'static [&'static str] {
["fizz", "buzz", "spam", "eggs"].as_slice()
}
/// Test operations on some collections with known types,
/// so we can have the simplest possible model implementation.
#[derive(Clone, Debug)]
pub enum TestOpKV<K, V> {
Get(K),
Put(K, V),
Del(K),
Iter,
}
#[derive(Clone, Debug)]
pub enum TestOpNs {
/// String-to-Int
S2I(TestNamespace, TestOpKV<String, u8>),
/// Int-to-String
I2S(TestNamespace, TestOpKV<u8, String>),
Rollback,
}
#[derive(Clone, Debug)]
pub struct TestData {
ops: Vec<TestOpNs>,
}
/// Generate commands from a limited set of keys so there's a
/// high probability that we get/delete what we put earlier.
impl Arbitrary for TestOpNs {
fn arbitrary(g: &mut Gen) -> Self {
use TestOpKV::*;
use TestOpNs::*;
match u8::arbitrary(g) % 100 {
i if i < 47 => {
let ns = g.choose(&["spam", "eggs"]).unwrap();
let k = *g.choose(&["foo", "bar", "baz"]).unwrap();
match u8::arbitrary(g) % 10 {
i if i < 3 => S2I(ns, Get(k.to_owned())),
i if i < 4 => S2I(ns, Iter),
i if i < 9 => S2I(ns, Put(k.to_owned(), Arbitrary::arbitrary(g))),
_ => S2I(ns, Del(k.to_owned())),
}
}
i if i < 94 => {
let ns = g.choose(&["fizz", "buzz"]).unwrap();
let k = u8::arbitrary(g) % 3;
match u8::arbitrary(g) % 10 {
i if i < 3 => I2S(ns, Get(k)),
i if i < 4 => I2S(ns, Iter),
i if i < 9 => {
let sz = u8::arbitrary(g) % 5;
let s = (0..sz).map(|_| char::arbitrary(g)).collect();
I2S(ns, Put(k, s))
}
_ => I2S(ns, Del(k)),
}
}
_ => Rollback,
}
}
}
impl Arbitrary for TestData {
fn arbitrary(g: &mut Gen) -> Self {
TestData {
ops: Arbitrary::arbitrary(g),
}
}
}
/// Test data for multiple transactions, interspersed.
#[derive(Clone, Debug)]
pub struct TestDataMulti<const N: usize> {
ops: Vec<(usize, TestOpNs)>,
}
impl<const N: usize> Arbitrary for TestDataMulti<N> {
fn arbitrary(g: &mut Gen) -> Self {
let mut ops = Vec::new();
for i in 0..N {
let data = TestData::arbitrary(g);
ops.extend(data.ops.into_iter().map(|op| (i32::arbitrary(g), i, op)));
}
ops.sort_by_key(|(r, _, _)| *r);
TestDataMulti {
ops: ops.into_iter().map(|(_, i, op)| (i, op)).collect(),
}
}
}
pub struct TestDataStore;
impl KVStore for TestDataStore {
type Namespace = TestNamespace;
type Repr = Vec<u8>;
}
#[derive(Default)]
struct Model {
s2i: HashMap<TestNamespace, HashMap<String, u8>>,
i2s: HashMap<TestNamespace, HashMap<u8, String>>,
}
struct Collections<S: KVStore> {
s2i: HashMap<TestNamespace, KVCollection<S, String, u8>>,
i2s: HashMap<TestNamespace, KVCollection<S, u8, String>>,
}
impl<S: KVStore> Default for Collections<S> {
fn default() -> Self {
Self {
s2i: HashMap::new(),
i2s: HashMap::new(),
}
}
}
impl<S> Collections<S>
where
S: KVStore<Namespace = TestNamespace> + Clone + Codec<String> + Codec<u8>,
{
fn s2i(&mut self, ns: TestNamespace) -> &KVCollection<S, String, u8> {
self.s2i.entry(ns).or_insert_with(|| KVCollection::new(ns))
}
fn i2s(&mut self, ns: TestNamespace) -> &KVCollection<S, u8, String> {
self.i2s.entry(ns).or_insert_with(|| KVCollection::new(ns))
}
}
/// State machine test for an implementation of a `KVWritable` using a sequence of random ops.
pub fn check_writable<S>(sut: &impl KVWritable<S>, data: TestData) -> bool
where
S: KVStore<Namespace = TestNamespace> + Clone + Codec<String> + Codec<u8>,
S::Repr: Ord + 'static,
{
let mut model = Model::default();
// Creating a collection doesn't add much to the test but at least we exercise this path.
let mut colls = Collections::<S>::default();
// Start the transaction.
let mut tx = sut.write();
let mut ok = true;
for d in data.ops {
match d {
TestOpNs::S2I(ns, op) => {
let coll = colls.s2i(ns);
if !apply_both(&mut tx, &mut model.s2i, coll, ns, op) {
ok = false;
}
}
TestOpNs::I2S(ns, op) => {
let coll = colls.i2s(ns);
if !apply_both(&mut tx, &mut model.i2s, coll, ns, op) {
ok = false;
}
}
TestOpNs::Rollback => {
//println!("ROLLBACK");
model = Model::default();
tx.rollback().unwrap();
tx = sut.write();
}
}
}
tx.rollback().unwrap();
ok
}
/// Check that two write transactions don't see each others' changes.
///
/// This test assumes that write transactions can be executed concurrently, that
/// they don't block each other. If that's not the case don't call this test.
pub fn check_write_isolation<S>(sut: &impl KVWritable<S>, data: TestDataMulti<2>) -> bool
where
S: KVStore<Namespace = TestNamespace> + Clone + Codec<String> + Codec<u8>,
S::Repr: Ord + 'static,
{
let mut colls = Collections::<S>::default();
let mut model1 = Model::default();
let mut model2 = Model::default();
let mut tx1 = sut.write();
let mut tx2 = sut.write();
let mut ok = true;
for (i, op) in data.ops {
let tx = if i == 0 { &mut tx1 } else { &mut tx2 };
let model = if i == 0 { &mut model1 } else { &mut model2 };
match op {
TestOpNs::S2I(ns, op) => {
let coll = colls.s2i(ns);
if !apply_both(tx, &mut model.s2i, coll, ns, op) {
ok = false;
}
}
TestOpNs::I2S(ns, op) => {
let coll = colls.i2s(ns);
if !apply_both(tx, &mut model.i2s, coll, ns, op) {
ok = false;
}
}
TestOpNs::Rollback => {}
}
}
tx1.rollback().unwrap();
tx2.rollback().unwrap();
ok
}
/// Check that two write transactions don't see each others' changes when executed on different threads.
pub fn check_write_isolation_concurrent<S, B>(sut: &B, data1: TestData, data2: TestData) -> bool
where
S: KVStore<Namespace = TestNamespace> + Clone + Codec<String> + Codec<u8>,
S::Repr: Ord + 'static,
B: KVWritable<S> + Clone + Send + 'static,
{
let sut2 = sut.clone();
let t = thread::spawn(move || check_writable(&sut2, data2));
let c1 = check_writable(sut, data1);
let c2 = t.join().unwrap();
c1 && c2
}
/// Check that two write transactions are serializable, their effects don't get lost and aren't interspersed.
pub fn check_write_serialization_concurrent<S, B>(sut: &B, data1: TestData, data2: TestData) -> bool
where
S: KVStore<Namespace = TestNamespace> + Clone + Codec<String> + Codec<u8>,
B: KVWritable<S> + KVReadable<S> + Clone + Send + 'static,
{
// Tests can now fail during writes because they realise some other transaction has already committed.
let try_apply_sut = |sut: &B, data: &TestData| -> Result<(), KVError> {
let mut tx = sut.write();
for op in data.ops.iter() {
match op {
TestOpNs::S2I(ns, TestOpKV::Put(k, v)) => tx.put(ns, k, v)?,
TestOpNs::S2I(ns, TestOpKV::Del(k)) => tx.delete(ns, k)?,
TestOpNs::I2S(ns, TestOpKV::Put(k, v)) => tx.put(ns, k, v)?,
TestOpNs::I2S(ns, TestOpKV::Del(k)) => tx.delete(ns, k)?,
_ => (),
}
}
tx.commit()
};
// Try to apply once, if it fails due to conflict, retry, otherwise panic.
let apply_sut = move |sut: &B, data: &TestData| match try_apply_sut(sut, data) {
Err(KVError::Conflict) => try_apply_sut(sut, data).unwrap(),
Err(other) => panic!("error applying test data: {other:?}"),
Ok(()) => (),
};
let sutc = sut.clone();
let data2c = data2.clone();
let t = thread::spawn(move || apply_sut(&sutc, &data2c));
apply_sut(sut, &data1);
t.join().unwrap();
// The changes were applied in one order or the other.
let tx = sut.read();
let apply_model = |a: &TestData, b: &TestData| -> bool {
let mut model = Model::default();
// First apply all the writes
for op in a.ops.iter().chain(b.ops.iter()).cloned() {
match op {
TestOpNs::S2I(ns, TestOpKV::Put(k, v)) => {
model.s2i.entry(ns).or_default().insert(k, v);
}
TestOpNs::S2I(ns, TestOpKV::Del(k)) => {
model.s2i.entry(ns).or_default().remove(&k);
}
TestOpNs::I2S(ns, TestOpKV::Put(k, v)) => {
model.i2s.entry(ns).or_default().insert(k, v);
}
TestOpNs::I2S(ns, TestOpKV::Del(k)) => {
model.i2s.entry(ns).or_default().remove(&k);
}
_ => (),
}
}
// Then just the reads on the final state.
for op in a.ops.iter().chain(b.ops.iter()) {
match op {
TestOpNs::S2I(ns, TestOpKV::Get(k)) => {
let expected = tx.get::<String, u8>(ns, k).unwrap();
let found = model.s2i.get(ns).and_then(|m| m.get(k)).cloned();
if found != expected {
return false;
}
}
TestOpNs::I2S(ns, TestOpKV::Get(k)) => {
let expected = tx.get::<u8, String>(ns, k).unwrap();
let found = model.i2s.get(ns).and_then(|m| m.get(k)).cloned();
if found != expected {
return false;
}
}
_ => (),
}
}
true
};
let ok = apply_model(&data1, &data2) || apply_model(&data2, &data1);
drop(tx);
ok
}
/// Check that read transactions don't see changes from write transactions.
///
/// This test assumes that read and write transactions can be executed concurrently,
/// that they don't block each other. If that's not the case don't call this test.
pub fn check_read_isolation<S, B>(sut: &B, data: TestData) -> bool
where
S: KVStore<Namespace = TestNamespace> + Clone + Codec<String> + Codec<u8>,
S::Repr: Ord + 'static,
B: KVWritable<S> + KVReadable<S>,
{
let mut model = Model::default();
let mut colls = Collections::<S>::default();
let mut txw = sut.write();
let mut txr = sut.read();
let mut gets = Vec::new();
let mut ok = true;
for op in data.ops {
if let TestOpNs::S2I(ns, op) = op {
let coll = colls.s2i(ns);
apply_both(&mut txw, &mut model.s2i, coll, ns, op.clone());
if let TestOpKV::Get(k) = &op {
if coll.get(&txr, k).unwrap().is_some() {
ok = false;
}
gets.push((ns, op));
}
}
}
// Commit the writes, but they should still not be visible to the reads that started earlier.
txw.commit().unwrap();
for (ns, op) in &gets {
let coll = colls.s2i(ns);
if let TestOpKV::Get(k) = op {
let found = coll.get(&txr, k).unwrap();
if found.is_some() {
ok = false;
}
}
}
// Finish the reads and start another read transaction. Now the writes should be visible.
drop(txr);
txr = sut.read();
for (ns, op) in &gets {
let coll = colls.s2i(ns);
if let TestOpKV::Get(k) = op {
let found = coll.get(&txr, k).unwrap();
let expected = model.s2i.get(ns).and_then(|m| m.get(k)).cloned();
if found != expected {
ok = false;
}
}
}
ok
}
/// Apply an operation on the model and the KV store, checking that the results are the same where possible.
fn apply_both<S, K, V>(
tx: &mut impl KVWrite<S>,
model: &mut HashMap<TestNamespace, HashMap<K, V>>,
coll: &KVCollection<S, K, V>,
ns: TestNamespace,
op: TestOpKV<K, V>,
) -> bool
where
S: KVStore<Namespace = TestNamespace> + Clone + Codec<K> + Codec<V>,
K: Clone + Hash + Eq + 'static,
V: Clone + PartialEq + 'static,
S::Repr: Ord + 'static,
{
match op {
TestOpKV::Get(k) => {
let found = coll.get(tx, &k).unwrap();
let expected = model.get(ns).and_then(|m| m.get(&k)).cloned();
//println!("GET {:?}/{:?}: {:?} ?= {:?}", ns, k, found, expected);
if found != expected {
return false;
}
}
TestOpKV::Put(k, v) => {
//println!("PUT {:?}/{:?}: {:?}", ns, k, v);
coll.put(tx, &k, &v).unwrap();
model.entry(ns).or_default().insert(k, v);
}
TestOpKV::Del(k) => {
//println!("DEL {:?}/{:?}", ns, k);
coll.delete(tx, &k).unwrap();
model.entry(ns).or_default().remove(&k);
}
TestOpKV::Iter => {
let found = coll.iterate(tx).collect::<Result<Vec<_>, _>>().unwrap();
let expected = if let Some(m) = model.get(ns) {
let mut expected = m
.iter()
.map(|(k, v)| (k.clone(), v.clone()))
.collect::<Vec<_>>();
expected.sort_by(|a, b| {
let ka = S::to_repr(&a.0).unwrap();
let kb = S::to_repr(&b.0).unwrap();
ka.cmp(&kb)
});
expected
} else {
Vec::new()
};
if found != expected {
return false;
}
}
}
true
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/options/src/config.rs | fendermint/app/options/src/config.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use clap::Args;
#[derive(Args, Debug)]
pub struct ConfigArgs;
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/options/src/genesis.rs | fendermint/app/options/src/genesis.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::path::PathBuf;
use clap::{Args, Subcommand, ValueEnum};
use ipc_api::subnet_id::SubnetID;
use super::parse::{
parse_eth_address, parse_full_fil, parse_network_version, parse_percentage, parse_signer_addr,
parse_token_amount,
};
use fendermint_vm_genesis::SignerAddr;
use fvm_shared::{address::Address, econ::TokenAmount, version::NetworkVersion};
#[derive(Debug, Clone, ValueEnum)]
pub enum AccountKind {
Regular,
Ethereum,
}
#[derive(Subcommand, Debug)]
pub enum GenesisCommands {
/// Create a new Genesis file, with accounts and validators to be added later.
New(GenesisNewArgs),
/// Add an account to the genesis file.
AddAccount(GenesisAddAccountArgs),
/// Add a multi-sig account to the genesis file.
AddMultisig(GenesisAddMultisigArgs),
/// Add a validator to the genesis file.
AddValidator(GenesisAddValidatorArgs),
/// Set the EAM actor permission mode.
SetEamPermissions(GenesisSetEAMPermissionsArgs),
/// IPC commands.
Ipc {
#[command(subcommand)]
command: GenesisIpcCommands,
},
/// Convert the genesis file into the format expected by Tendermint.
IntoTendermint(GenesisIntoTendermintArgs),
}
#[derive(Args, Debug)]
pub struct GenesisSetEAMPermissionsArgs {
#[arg(
long,
short,
default_value = "unrestricted",
help = "Permission mode (unrestricted/allowlist) that controls who can deploy contracts in the subnet"
)]
pub mode: String,
#[arg(
long,
short,
value_delimiter = ',',
value_parser = parse_signer_addr,
help = "List of addresses that can deploy contract. Field is ignored if mode is unrestricted"
)]
pub addresses: Vec<SignerAddr>,
}
#[derive(Args, Debug)]
pub struct GenesisArgs {
/// Path to the genesis JSON file.
#[arg(long, short)]
pub genesis_file: PathBuf,
#[command(subcommand)]
pub command: GenesisCommands,
}
#[derive(Args, Debug)]
pub struct GenesisNewArgs {
/// Genesis timestamp as seconds since Unix epoch.
#[arg(long, short)]
pub timestamp: u64,
/// Name of the network and chain.
#[arg(long, short = 'n')]
pub chain_name: String,
/// Network version, governs which set of built-in actors to use.
#[arg(long, short = 'v', default_value = "21", value_parser = parse_network_version)]
pub network_version: NetworkVersion,
/// Base fee for running transactions in atto.
#[arg(long, short = 'f', value_parser = parse_token_amount)]
pub base_fee: TokenAmount,
/// Number of decimals to use during converting FIL to Power.
#[arg(long, short)]
pub power_scale: i8,
}
#[derive(Args, Debug)]
pub struct GenesisAddAccountArgs {
/// Path to the Secp256k1 public key exported in base64 format.
#[arg(long, short)]
pub public_key: PathBuf,
/// Initial balance in full FIL units.
#[arg(long, short, value_parser = parse_full_fil)]
pub balance: TokenAmount,
/// Indicate whether the account is a regular or ethereum account.
#[arg(long, short, default_value = "regular")]
pub kind: AccountKind,
}
#[derive(Args, Debug)]
pub struct GenesisAddMultisigArgs {
/// Path to the Secp256k1 public key exported in base64 format, one for each signatory.
#[arg(long, short)]
pub public_key: Vec<PathBuf>,
/// Initial balance in full FIL units.
#[arg(long, short, value_parser = parse_full_fil)]
pub balance: TokenAmount,
/// Number of signatures required.
#[arg(long, short)]
pub threshold: u64,
/// Linear unlock duration in block heights.
#[arg(long, short = 'd')]
pub vesting_duration: u64,
/// Linear unlock start block height.
#[arg(long, short = 's')]
pub vesting_start: u64,
}
#[derive(Args, Debug)]
pub struct GenesisAddValidatorArgs {
/// Path to the Secp256k1 public key exported in base64 format.
#[arg(long, short)]
pub public_key: PathBuf,
/// The collateral staked by the validator, lending it its voting power.
#[arg(long, short = 'v', value_parser = parse_full_fil)]
pub power: TokenAmount,
}
#[derive(Args, Debug)]
pub struct GenesisIntoTendermintArgs {
/// Output file name for the Tendermint genesis JSON file.
#[arg(long, short)]
pub out: PathBuf,
/// Maximum block size in bytes.
#[arg(long, default_value_t = 22020096)]
pub block_max_bytes: u64,
}
#[derive(Subcommand, Debug, Clone)]
pub enum GenesisIpcCommands {
/// Set all gateway parameters.
Gateway(GenesisIpcGatewayArgs),
/// Fetch the genesis parameters of a subnet from the parent.
FromParent(Box<GenesisFromParentArgs>),
}
#[derive(Args, Debug, Clone)]
pub struct GenesisIpcGatewayArgs {
/// Set the current subnet ID, which is the path from the root to the subnet actor in the parent.
#[arg(long, short)]
pub subnet_id: SubnetID,
#[arg(long, short)]
pub bottom_up_check_period: u64,
/// Message fee in atto.
#[arg(long, short = 'f', value_parser = parse_token_amount)]
pub msg_fee: TokenAmount,
/// Quorum majority percentage [51 - 100]
#[arg(long, short, value_parser = parse_percentage::<u8>)]
pub majority_percentage: u8,
/// Maximum number of active validators.
#[arg(long, short = 'v', default_value = "100")]
pub active_validators_limit: u16,
}
#[derive(Args, Debug, Clone)]
pub struct GenesisFromParentArgs {
/// Child subnet for with the genesis file is being created
#[arg(long, short)]
pub subnet_id: SubnetID,
/// Endpoint to the RPC of the child subnet's parent
#[arg(long, short)]
pub parent_endpoint: url::Url,
/// HTTP basic authentication token.
#[arg(long)]
pub parent_auth_token: Option<String>,
/// IPC gateway of the parent; 20 byte Ethereum address in 0x prefixed hex format
#[arg(long, value_parser = parse_eth_address, default_value = "0xff00000000000000000000000000000000000064")]
pub parent_gateway: Address,
/// IPC registry of the parent; 20 byte Ethereum address in 0x prefixed hex format
#[arg(long, value_parser = parse_eth_address, default_value = "0xff00000000000000000000000000000000000065")]
pub parent_registry: Address,
/// Network version, governs which set of built-in actors to use.
#[arg(long, short = 'v', default_value = "21", value_parser = parse_network_version)]
pub network_version: NetworkVersion,
/// Base fee for running transactions in atto.
#[arg(long, short = 'f', value_parser = parse_token_amount, default_value = "1000")]
pub base_fee: TokenAmount,
/// Number of decimals to use during converting FIL to Power.
#[arg(long, default_value = "3")]
pub power_scale: i8,
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/options/src/key.rs | fendermint/app/options/src/key.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::path::PathBuf;
use clap::{Args, Subcommand};
#[derive(Subcommand, Debug)]
pub enum KeyCommands {
/// Generate a new Secp256k1 key pair and export them to files in base64 format.
Gen(KeyGenArgs),
/// Convert a secret key file from base64 into the format expected by Tendermint.
IntoTendermint(KeyIntoTendermintArgs),
/// Convert a public key file from base64 into an f1 Address format an print it to STDOUT.
Address(KeyAddressArgs),
/// Get the peer ID corresponding to a node ID and its network address and print it to a local file.
AddPeer(AddPeer),
/// Converts a hex encoded Ethereum private key into a Base64 encoded Fendermint keypair.
#[clap(alias = "eth-to-fendermint")]
FromEth(KeyFromEthArgs),
/// Converts a Base64 encoded Fendermint private key into a hex encoded Ethereum secret key, public key and address (20 bytes).
IntoEth(KeyIntoEthArgs),
/// Show the libp2p peer ID derived from a Secp256k1 public key.
ShowPeerId(KeyShowPeerIdArgs),
}
#[derive(Args, Debug)]
pub struct KeyArgs {
#[command(subcommand)]
pub command: KeyCommands,
}
#[derive(Args, Debug)]
pub struct AddPeer {
/// The path to a CometBFT node key file.
#[arg(long, short = 'n')]
pub node_key_file: PathBuf,
/// The path to a temporal local file where the peer IDs will be added.
/// The file will be created if it doesn't exist.
#[arg(long, short)]
pub local_peers_file: PathBuf,
/// The target CometBFT node network interface in the following format `IP:Port`.
/// For example: `192.168.10.7:26656`.
#[arg(long, short)]
pub network_addr: String,
}
#[derive(Args, Debug)]
pub struct KeyGenArgs {
/// Name used to distinguish the files from other exported keys.
#[arg(long, short)]
pub name: String,
/// Directory to export the key files to; it must exist.
#[arg(long, short, default_value = ".")]
pub out_dir: PathBuf,
}
#[derive(Args, Debug)]
pub struct KeyFromEthArgs {
/// Path to the file that stores the private key (hex format)
#[arg(long, short)]
pub secret_key: PathBuf,
/// Name used to distinguish the files from other exported keys.
#[arg(long, short)]
pub name: String,
/// Directory to export the key files to; it must exist.
#[arg(long, short, default_value = ".")]
pub out_dir: PathBuf,
}
#[derive(Args, Debug)]
pub struct KeyIntoEthArgs {
/// Path to the file that stores the private key (base64 format)
#[arg(long, short)]
pub secret_key: PathBuf,
/// Name used to distinguish the files from other exported keys.
#[arg(long, short)]
pub name: String,
/// Directory to export the key files to; it must exist.
#[arg(long, short, default_value = ".")]
pub out_dir: PathBuf,
}
#[derive(Args, Debug)]
pub struct KeyIntoTendermintArgs {
/// Path to the secret key we want to convert to Tendermint format.
#[arg(long, short)]
pub secret_key: PathBuf,
/// Output file name for the Tendermint private validator key JSON file.
#[arg(long, short)]
pub out: PathBuf,
}
#[derive(Args, Debug)]
pub struct KeyAddressArgs {
/// Path to the public key we want to convert to f1 format.
#[arg(long, short)]
pub public_key: PathBuf,
}
#[derive(Args, Debug)]
pub struct KeyShowPeerIdArgs {
/// Path to the public key we want to convert to a libp2p peer ID.
#[arg(long, short)]
pub public_key: PathBuf,
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/options/src/lib.rs | fendermint/app/options/src/lib.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::path::PathBuf;
use clap::{Args, Parser, Subcommand};
use config::ConfigArgs;
use debug::DebugArgs;
use fvm_shared::address::Network;
use lazy_static::lazy_static;
use tracing_subscriber::EnvFilter;
use self::{
eth::EthArgs, genesis::GenesisArgs, key::KeyArgs, materializer::MaterializerArgs, rpc::RpcArgs,
run::RunArgs,
};
pub mod config;
pub mod debug;
pub mod eth;
pub mod genesis;
pub mod key;
pub mod materializer;
pub mod rpc;
pub mod run;
mod log;
mod parse;
use log::{parse_log_level, LogLevel};
use parse::parse_network;
lazy_static! {
static ref ENV_ALIASES: Vec<(&'static str, Vec<&'static str>)> = vec![
("FM_NETWORK", vec!["IPC_NETWORK", "NETWORK"]),
("FM_LOG_LEVEL", vec!["LOG_LEVEL", "RUST_LOG"])
];
}
/// Parse the main arguments by:
/// 0. Detecting aliased env vars
/// 1. Parsing the [GlobalOptions]
/// 2. Setting any system wide parameters based on the globals
/// 3. Parsing and returning the final [Options]
pub fn parse() -> Options {
set_env_from_aliases();
let opts: GlobalOptions = GlobalOptions::parse();
fvm_shared::address::set_current_network(opts.global.network);
let opts: Options = Options::parse();
opts
}
/// Assign value to env vars from aliases, if the canonic key doesn't exist but the alias does.
fn set_env_from_aliases() {
'keys: for (key, aliases) in ENV_ALIASES.iter() {
for alias in aliases {
if let (Err(_), Ok(value)) = (std::env::var(key), std::env::var(alias)) {
std::env::set_var(key, value);
continue 'keys;
}
}
}
}
#[derive(Args, Debug)]
pub struct GlobalArgs {
/// Set the FVM Address Network. It's value affects whether `f` (main) or `t` (test) prefixed addresses are accepted.
#[arg(short, long, default_value = "mainnet", env = "FM_NETWORK", value_parser = parse_network)]
pub network: Network,
}
/// A version of options that does partial matching on the arguments, with its only interest
/// being the capture of global parameters that need to take effect first, before we parse [Options],
/// because their value affects how others arse parsed.
///
/// This one doesn't handle `--help` or `help` so that it is passed on to the next parser,
/// where the full set of commands and arguments can be printed properly.
#[derive(Parser, Debug)]
#[command(version, disable_help_flag = true)]
pub struct GlobalOptions {
#[command(flatten)]
pub global: GlobalArgs,
/// Capture all the normal commands, basically to ingore them.
#[arg(allow_hyphen_values = true, trailing_var_arg = true)]
pub cmd: Vec<String>,
}
#[derive(Parser, Debug)]
#[command(version)]
pub struct Options {
/// Set a custom directory for data and configuration files.
#[arg(
short = 'd',
long,
default_value = "~/.fendermint",
env = "FM_HOME_DIR"
)]
pub home_dir: PathBuf,
/// Set a custom directory for configuration files
#[arg(long, env = "FM_CONFIG_DIR")]
config_dir: Option<PathBuf>,
/// Set a custom directory for ipc log files.
#[arg(long, env = "FM_LOG_DIR")]
pub log_dir: Option<PathBuf>,
/// Set a custom prefix for ipc log files.
#[arg(long, env = "FM_LOG_FILE_PREFIX")]
pub log_file_prefix: Option<String>,
/// Optionally override the default configuration.
#[arg(short, long, default_value = "dev")]
pub mode: String,
/// Set the logging level of the console.
#[arg(
short = 'l',
long,
default_value = "info",
value_enum,
env = "FM_LOG_LEVEL",
help = "Standard log levels, or a comma separated list of filters, e.g. 'debug,tower_abci=warn,libp2p::gossipsub=info'",
value_parser = parse_log_level,
)]
log_level: LogLevel,
/// Set the logging level of the log file. If missing, it defaults to the same level as the console.
#[arg(
long,
value_enum,
env = "FM_LOG_FILE_LEVEL",
value_parser = parse_log_level,
)]
log_file_level: Option<LogLevel>,
/// Global options repeated here for discoverability, so they show up in `--help` among the others.
#[command(flatten)]
pub global: GlobalArgs,
#[command(subcommand)]
pub command: Commands,
}
impl Options {
/// Tracing filter for the console.
///
/// Coalescing everything into a filter instead of either a level or a filter
/// because the `tracing_subscriber` setup methods like `with_filter` and `with_level`
/// produce different static types and it's not obvious how to use them as alternatives.
pub fn log_console_filter(&self) -> anyhow::Result<EnvFilter> {
self.log_level.to_filter()
}
/// Tracing filter for the log file.
pub fn log_file_filter(&self) -> anyhow::Result<EnvFilter> {
if let Some(ref level) = self.log_file_level {
level.to_filter()
} else {
self.log_console_filter()
}
}
/// Path to the configuration directories.
///
/// If not specified then returns the default under the home directory.
pub fn config_dir(&self) -> PathBuf {
self.config_dir
.as_ref()
.cloned()
.unwrap_or(self.home_dir.join("config"))
}
/// Check if metrics are supposed to be collected.
pub fn metrics_enabled(&self) -> bool {
matches!(self.command, Commands::Run(_) | Commands::Eth(_))
}
}
#[allow(clippy::large_enum_variant)]
#[derive(Subcommand, Debug)]
pub enum Commands {
/// Parse the configuration file and print it to the console.
Config(ConfigArgs),
/// Arbitrary commands that aid in debugging.
Debug(DebugArgs),
/// Run the `App`, listening to ABCI requests from Tendermint.
Run(RunArgs),
/// Subcommands related to the construction of signing keys.
Key(KeyArgs),
/// Subcommands related to the construction of Genesis files.
Genesis(GenesisArgs),
/// Subcommands related to sending JSON-RPC commands/queries to Tendermint.
Rpc(RpcArgs),
/// Subcommands related to the Ethereum API facade.
Eth(EthArgs),
/// Subcommands related to the Testnet Materializer.
#[clap(aliases = &["mat", "matr", "mate"])]
Materializer(MaterializerArgs),
}
#[cfg(test)]
mod tests {
use crate::*;
use clap::Parser;
use fvm_shared::address::Network;
use tracing::level_filters::LevelFilter;
/// Set some env vars, run a fallible piece of code, then unset the variables otherwise they would affect the next test.
pub fn with_env_vars<F, T>(vars: &[(&str, &str)], f: F) -> T
where
F: FnOnce() -> T,
{
for (k, v) in vars.iter() {
std::env::set_var(k, v);
}
let result = f();
for (k, _) in vars {
std::env::remove_var(k);
}
result
}
#[test]
fn parse_global() {
let cmd = "fendermint --network testnet genesis --genesis-file ./genesis.json ipc gateway --subnet-id /r123/t0456 -b 10 -t 10 -f 10 -m 65";
let opts: GlobalOptions = GlobalOptions::parse_from(cmd.split_ascii_whitespace());
assert_eq!(opts.global.network, Network::Testnet);
}
#[test]
fn global_options_ignore_help() {
let cmd = "fendermint --help";
let _opts: GlobalOptions = GlobalOptions::parse_from(cmd.split_ascii_whitespace());
}
#[test]
fn network_from_env() {
for (key, _) in ENV_ALIASES.iter() {
std::env::remove_var(key);
}
let examples = [
(vec![], Network::Mainnet),
(vec![("IPC_NETWORK", "testnet")], Network::Testnet),
(vec![("NETWORK", "testnet")], Network::Testnet),
(vec![("FM_NETWORK", "testnet")], Network::Testnet),
(
vec![("IPC_NETWORK", "testnet"), ("FM_NETWORK", "mainnet")],
Network::Mainnet,
),
];
for (i, (vars, network)) in examples.iter().enumerate() {
let opts = with_env_vars(vars, || {
set_env_from_aliases();
let opts: GlobalOptions = GlobalOptions::parse_from(["fendermint", "run"]);
opts
});
assert_eq!(opts.global.network, *network, "example {i}");
}
}
#[test]
fn options_handle_help() {
let cmd = "fendermint --help";
// This test would fail with a panic if we have a misconfiguration in our options.
// On successfully parsing `--help` with `parse_from` the library would `.exit()` the test framework itself,
// which is why we must use `try_parse_from`. An error results in a panic from `parse_from` and an `Err`
// from this, but `--help` is not an `Ok`, since we aren't getting `Options`; it's an `Err` with a help message.
let e = Options::try_parse_from(cmd.split_ascii_whitespace())
.expect_err("--help is not Options");
assert!(e.to_string().contains("Usage:"), "unexpected help: {e}");
}
#[test]
fn parse_log_level() {
let parse_filter = |cmd: &str| {
let opts: Options = Options::parse_from(cmd.split_ascii_whitespace());
opts.log_console_filter().expect("filter should parse")
};
let assert_level = |cmd: &str, level: LevelFilter| {
let filter = parse_filter(cmd);
assert_eq!(filter.max_level_hint(), Some(level))
};
assert_level("fendermint --log-level debug run", LevelFilter::DEBUG);
assert_level("fendermint --log-level off run", LevelFilter::OFF);
assert_level(
"fendermint --log-level libp2p=warn,error run",
LevelFilter::WARN,
);
assert_level("fendermint --log-level info run", LevelFilter::INFO);
}
#[test]
fn parse_invalid_log_level() {
// NOTE: `nonsense` in itself is interpreted as a target. Maybe we should mandate at least `=` in it?
let cmd = "fendermint --log-level nonsense/123 run";
Options::try_parse_from(cmd.split_ascii_whitespace()).expect_err("should not parse");
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/options/src/parse.rs | fendermint/app/options/src/parse.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::str::FromStr;
use bytes::Bytes;
use cid::Cid;
use num_traits::{FromPrimitive, Num};
use fendermint_vm_genesis::SignerAddr;
use fvm_shared::{
address::{set_current_network, Address, Network},
bigint::BigInt,
econ::TokenAmount,
version::NetworkVersion,
};
/// Decimals for filecoin in nano
const FIL_AMOUNT_NANO_DIGITS: u32 = 9;
pub fn parse_network_version(s: &str) -> Result<NetworkVersion, String> {
let nv: u32 = s
.parse()
.map_err(|_| format!("`{s}` isn't a network version"))?;
if nv >= 21 {
Ok(NetworkVersion::from(nv))
} else {
Err("the minimum network version is 21".to_owned())
}
}
pub fn parse_token_amount(s: &str) -> Result<TokenAmount, String> {
BigInt::from_str_radix(s, 10)
.map_err(|e| format!("not a token amount: {e}"))
.map(TokenAmount::from_atto)
}
pub fn parse_full_fil(s: &str) -> Result<TokenAmount, String> {
let f: Result<f64, _> = s.parse();
if f.is_err() {
return Err("input not a token amount".to_owned());
}
let nano = f64::trunc(f.unwrap() * (10u64.pow(FIL_AMOUNT_NANO_DIGITS) as f64));
Ok(TokenAmount::from_nano(nano as u128))
}
pub fn parse_cid(s: &str) -> Result<Cid, String> {
Cid::from_str(s).map_err(|e| format!("error parsing CID: {e}"))
}
pub fn parse_address(s: &str) -> Result<Address, String> {
match s.chars().next() {
Some('f') => set_current_network(Network::Mainnet),
Some('t') => set_current_network(Network::Testnet),
_ => (),
}
Address::from_str(s).map_err(|e| format!("error parsing address: {e}"))
}
pub fn parse_signer_addr(s: &str) -> Result<SignerAddr, String> {
Address::from_str(s)
.map(SignerAddr)
.map_err(|e| format!("error parsing addresses: {e}"))
}
pub fn parse_bytes(s: &str) -> Result<Bytes, String> {
match hex::decode(s) {
Ok(bz) => Ok(Bytes::from(bz)),
Err(e) => Err(format!("error parsing raw bytes as hex: {e}")),
}
}
/// Parse a percentage value [0-100]
pub fn parse_percentage<T>(s: &str) -> Result<T, String>
where
T: Num + FromStr + PartialOrd + TryFrom<u8>,
<T as FromStr>::Err: std::fmt::Display,
<T as TryFrom<u8>>::Error: std::fmt::Debug,
{
match T::from_str(s) {
Ok(p) if p > T::zero() && p <= T::try_from(100u8).unwrap() => Ok(p),
Ok(_) => Err("percentage out of range".to_owned()),
Err(e) => Err(format!("error parsing as percentage: {e}")),
}
}
/// Parse the FVM network and set the global value.
pub fn parse_network(s: &str) -> Result<Network, String> {
match s.to_lowercase().as_str() {
"main" | "mainnet" | "f" => Ok(Network::Mainnet),
"test" | "testnet" | "t" => Ok(Network::Testnet),
n => {
let n: u8 = n
.parse()
.map_err(|e| format!("expected 0 or 1 for network: {e}"))?;
let n = Network::from_u8(n).ok_or_else(|| format!("unexpected network: {s}"))?;
Ok(n)
}
}
}
pub fn parse_eth_address(s: &str) -> Result<Address, String> {
match ipc_types::EthAddress::from_str(s) {
Ok(a) => Ok(a.into()),
Err(e) => Err(format!("not a valid ethereum address: {e}")),
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/options/src/eth.rs | fendermint/app/options/src/eth.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use clap::{Args, Subcommand};
use tendermint_rpc::{Url, WebSocketClientUrl};
#[derive(Args, Debug)]
pub struct EthArgs {
#[command(subcommand)]
pub command: EthCommands,
}
#[derive(Subcommand, Debug, Clone)]
pub enum EthCommands {
/// Run the Ethereum JSON-RPC facade.
Run {
/// The URL of the Tendermint node's RPC endpoint.
#[arg(
long,
short,
default_value = "http://127.0.0.1:26657",
env = "TENDERMINT_RPC_URL"
)]
http_url: Url,
/// The URL of the Tendermint node's WebSocket endpoint.
#[arg(
long,
short,
default_value = "ws://127.0.0.1:26657/websocket",
env = "TENDERMINT_WS_URL"
)]
ws_url: WebSocketClientUrl,
/// Seconds to wait between trying to connect to the websocket.
#[arg(long, short = 'd', default_value = "5")]
connect_retry_delay: u64,
},
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/options/src/log.rs | fendermint/app/options/src/log.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use clap::{builder::PossibleValue, ValueEnum};
use lazy_static::lazy_static;
use tracing_subscriber::EnvFilter;
/// Standard log levels, or something we can pass to <https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html>
///
/// To be fair all of these could be handled by the `EnvFilter`, even `off`,
/// however I also wanted to leave it here as an example of implementing `ValueEnum` manually,
/// and perhaps we have simpler usecases where we only want to simply match levels.
#[derive(Debug, Clone)]
pub enum LogLevel {
Off,
Error,
Warn,
Info,
Debug,
Trace,
Filter(String),
}
impl LogLevel {
pub fn as_str(&self) -> &str {
match self {
LogLevel::Off => "off",
LogLevel::Error => "error",
LogLevel::Warn => "warn",
LogLevel::Info => "info",
LogLevel::Debug => "debug",
LogLevel::Trace => "trace",
LogLevel::Filter(s) => s.as_str(),
}
}
pub fn to_filter(&self) -> anyhow::Result<EnvFilter> {
// At this point the filter should have been parsed before,
// but if we created a log level directly, it can fail.
// We fail if it doesn't parse because presumably we _want_ to see those things.
Ok(EnvFilter::try_new(self.as_str())?)
}
}
impl ValueEnum for LogLevel {
fn value_variants<'a>() -> &'a [Self] {
lazy_static! {
static ref VARIANTS: Vec<LogLevel> = vec![
LogLevel::Off,
LogLevel::Error,
LogLevel::Warn,
LogLevel::Info,
LogLevel::Debug,
LogLevel::Trace,
];
}
&VARIANTS
}
fn to_possible_value(&self) -> Option<PossibleValue> {
if let LogLevel::Filter(_) = self {
None
} else {
Some(PossibleValue::new(self.as_str().to_string()))
}
}
}
pub fn parse_log_level(s: &str) -> Result<LogLevel, String> {
if let Ok(lvl) = ValueEnum::from_str(s, true) {
return Ok(lvl);
}
// `EnvFilter` is not `Clone`, so we can't store it, but we can use it to validate early.
if let Err(e) = EnvFilter::try_new(s) {
Err(e.to_string())
} else {
Ok(LogLevel::Filter(s.to_string()))
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/options/src/materializer.rs | fendermint/app/options/src/materializer.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::path::PathBuf;
use clap::{Args, Subcommand};
use fendermint_materializer::{AccountId, TestnetId};
#[derive(Args, Debug)]
pub struct MaterializerArgs {
/// Path to the directory where the materializer can store its artifacts.
///
/// This must be the same between materializer invocations.
#[arg(
long,
short,
env = "FM_MATERIALIZER__DATA_DIR",
default_value = "~/.ipc/materializer"
)]
pub data_dir: PathBuf,
/// Seed for random values in the materialized testnet.
#[arg(long, short, env = "FM_MATERIALIZER__SEED", default_value = "0")]
pub seed: u64,
#[command(subcommand)]
pub command: MaterializerCommands,
}
#[derive(Subcommand, Debug)]
pub enum MaterializerCommands {
/// Validate a testnet manifest.
Validate(MaterializerValidateArgs),
/// Setup a testnet.
Setup(MaterializerSetupArgs),
/// Tear down a testnet.
#[clap(aliases = &["teardown", "rm"])]
Remove(MaterializerRemoveArgs),
/// Import an existing secret key into a testnet; for example to use an already funded account on Calibration net.
ImportKey(MaterializerImportKeyArgs),
}
#[derive(Args, Debug)]
pub struct MaterializerValidateArgs {
/// Path to the manifest file.
///
/// The format of the manifest (e.g. JSON or YAML) will be determined based on the file extension.
#[arg(long, short)]
pub manifest_file: PathBuf,
}
#[derive(Args, Debug)]
pub struct MaterializerSetupArgs {
/// Path to the manifest file.
///
/// The format of the manifest (e.g. JSON or YAML) will be determined based on the file extension.
///
/// The name of the manifest (without the extension) will act as the testnet ID.
#[arg(long, short)]
pub manifest_file: PathBuf,
/// Run validation before attempting to set up the testnet.
#[arg(long, short, default_value = "false")]
pub validate: bool,
}
#[derive(Args, Debug)]
pub struct MaterializerRemoveArgs {
/// ID of the testnet to remove.
#[arg(long, short)]
pub testnet_id: TestnetId,
}
#[derive(Args, Debug)]
pub struct MaterializerImportKeyArgs {
/// Path to the manifest file.
///
/// This is used to determine the testnet ID as well as to check that the account exists.
#[arg(long, short)]
pub manifest_file: PathBuf,
/// Path to the Secp256k1 private key exported in base64 or hexadecimal format.
#[arg(long, short)]
pub secret_key: PathBuf,
/// Run validation before attempting to set up the testnet.
#[arg(long, short)]
pub account_id: AccountId,
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/options/src/debug.rs | fendermint/app/options/src/debug.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::path::PathBuf;
use crate::parse::parse_eth_address;
use clap::{Args, Subcommand};
use fvm_shared::address::Address;
use ipc_api::subnet_id::SubnetID;
#[derive(Args, Debug)]
pub struct DebugArgs {
#[command(subcommand)]
pub command: DebugCommands,
}
#[derive(Subcommand, Debug)]
pub enum DebugCommands {
/// IPC commands.
Ipc {
#[command(subcommand)]
command: DebugIpcCommands,
},
}
#[derive(Subcommand, Debug, Clone)]
pub enum DebugIpcCommands {
/// Fetch topdown events from the parent and export them to JSON.
///
/// This can be used to construct an upgrade to impute missing events.
ExportTopDownEvents(Box<DebugExportTopDownEventsArgs>),
}
#[derive(Args, Debug, Clone)]
pub struct DebugExportTopDownEventsArgs {
/// Child subnet for with the events will be fetched
#[arg(long, short)]
pub subnet_id: SubnetID,
/// Endpoint to the RPC of the child subnet's parent
#[arg(long, short)]
pub parent_endpoint: url::Url,
/// HTTP basic authentication token.
#[arg(long)]
pub parent_auth_token: Option<String>,
/// IPC gateway of the parent; 20 byte Ethereum address in 0x prefixed hex format
#[arg(long, value_parser = parse_eth_address)]
pub parent_gateway: Address,
/// IPC registry of the parent; 20 byte Ethereum address in 0x prefixed hex format
#[arg(long, value_parser = parse_eth_address)]
pub parent_registry: Address,
/// The first block to query for events.
#[arg(long)]
pub start_block_height: u64,
/// The last block to query for events.
#[arg(long)]
pub end_block_height: u64,
/// Location of the JSON file to write events to.
#[arg(long)]
pub events_file: PathBuf,
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/options/src/run.rs | fendermint/app/options/src/run.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use clap::Args;
#[derive(Args, Debug)]
pub struct RunArgs;
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/options/src/rpc.rs | fendermint/app/options/src/rpc.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::path::PathBuf;
use bytes::Bytes;
use cid::Cid;
use clap::{Args, Subcommand, ValueEnum};
use fvm_ipld_encoding::RawBytes;
use fvm_shared::{address::Address, econ::TokenAmount, MethodNum};
use tendermint_rpc::Url;
use crate::{
genesis::AccountKind,
parse::{parse_address, parse_bytes, parse_cid, parse_full_fil, parse_token_amount},
};
#[derive(Args, Debug)]
pub struct RpcArgs {
/// The URL of the Tendermint node's RPC endpoint.
#[arg(
long,
short,
default_value = "http://127.0.0.1:26657",
env = "TENDERMINT_RPC_URL"
)]
pub url: Url,
/// An optional HTTP/S proxy through which to submit requests to the
/// Tendermint node's RPC endpoint.
#[arg(long)]
pub proxy_url: Option<Url>,
#[command(subcommand)]
pub command: RpcCommands,
}
#[derive(Subcommand, Debug, Clone)]
pub enum RpcCommands {
/// Send an ABCI query.
Query {
/// Block height to query; 0 means latest.
#[arg(long, short = 'b', default_value_t = 0)]
height: u64,
#[command(subcommand)]
command: RpcQueryCommands,
},
/// Transfer tokens between accounts.
Transfer {
/// Address of the actor to send the message to.
#[arg(long, short, value_parser = parse_address)]
to: Address,
#[command(flatten)]
args: TransArgs,
},
/// Send a message (a.k.a. transaction) to an actor.
Transaction {
/// Address of the actor to send the message to.
#[arg(long, short, value_parser = parse_address)]
to: Address,
/// Method number to invoke on the actor.
#[arg(long, short)]
method_number: MethodNum,
/// Raw IPLD byte parameters to pass to the method, in hexadecimal format.
#[arg(long, short, value_parser = parse_bytes)]
params: RawBytes,
#[command(flatten)]
args: TransArgs,
},
/// Subcommands related to FEVM.
Fevm {
#[command(subcommand)]
command: RpcFevmCommands,
#[command(flatten)]
args: TransArgs,
},
}
#[derive(Subcommand, Debug, Clone)]
pub enum RpcQueryCommands {
/// Get raw IPLD content; print it as base64 string.
Ipld {
/// CID key of the IPLD content to retrieve.
#[arg(long, short, value_parser = parse_cid)]
cid: Cid,
},
/// Get the state of an actor; print it as JSON.
ActorState {
/// Address of the actor to query.
#[arg(long, short, value_parser = parse_address)]
address: Address,
},
/// Get the slowly changing state parameters.
StateParams,
}
#[derive(Subcommand, Debug, Clone)]
pub enum RpcFevmCommands {
/// Deploy an EVM contract from source; print the results as JSON.
Create {
/// Path to a compiled Solidity contract, expected to be in hexadecimal format.
#[arg(long, short)]
contract: PathBuf,
/// ABI encoded constructor arguments passed to the EVM, expected to be in hexadecimal format.
#[arg(long, short, value_parser = parse_bytes, default_value = "")]
constructor_args: Bytes,
},
/// Invoke an EVM contract; print the results as JSON with the return data rendered in hexadecimal format.
Invoke {
#[command(flatten)]
args: FevmArgs,
},
/// Call an EVM contract without a transaction; print the results as JSON with the return data rendered in hexadecimal format.
Call {
#[command(flatten)]
args: FevmArgs,
/// Block height to query; 0 means latest.
#[arg(long, short = 'b', default_value_t = 0)]
height: u64,
},
/// Estimate the gas required to execute a FEVM invocation.
EstimateGas {
#[command(flatten)]
args: FevmArgs,
/// Block height to query; 0 means latest.
#[arg(long, short = 'b', default_value_t = 0)]
height: u64,
},
}
/// Arguments common to FEVM method calls.
#[derive(Args, Debug, Clone)]
pub struct FevmArgs {
/// Either the actor ID based or the EAM delegated address of the contract to call.
#[arg(long, short)]
pub contract: Address,
/// ABI encoded method hash, expected to be in hexadecimal format.
#[arg(long, short, value_parser = parse_bytes)]
pub method: Bytes,
/// ABI encoded call arguments passed to the EVM, expected to be in hexadecimal format.
#[arg(long, short, value_parser = parse_bytes, default_value = "")]
pub method_args: Bytes,
}
/// Arguments common to transactions and transfers.
#[derive(Args, Debug, Clone)]
pub struct TransArgs {
/// Name of chain the for which the message will be signed.
#[arg(long, short, env = "FM_CHAIN_NAME")]
pub chain_name: String,
/// Amount of tokens to send, in full FIL, not atto.
#[arg(long, short, value_parser = parse_full_fil, default_value = "0")]
pub value: TokenAmount,
/// Path to the secret key of the sender to sign the transaction.
#[arg(long, short)]
pub secret_key: PathBuf,
/// Indicate whether its a regular or ethereum account.
#[arg(long, short, default_value = "regular")]
pub account_kind: AccountKind,
/// Sender account nonce.
#[arg(long, short = 'n')]
pub sequence: u64,
/// Maximum amount of gas that can be charged.
#[arg(long, default_value_t = 10_000_000_000)] // Default from ref-fvm testkit.
pub gas_limit: u64,
/// Price of gas.
///
/// Any discrepancy between this and the base fee is paid for
/// by the validator who puts the transaction into the block.
#[arg(long, value_parser = parse_token_amount, default_value = "0")]
pub gas_fee_cap: TokenAmount,
/// Gas premium.
#[arg(long, value_parser = parse_token_amount, default_value = "0")]
pub gas_premium: TokenAmount,
/// Whether to wait for the results from Tendermint or not.
#[arg(long, short, default_value = "commit")]
pub broadcast_mode: BroadcastMode,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, ValueEnum)]
pub enum BroadcastMode {
/// Do no wait for the results.
Async,
/// Wait for the result of `check_tx`.
Sync,
/// Wait for the result of `deliver_tx`.
Commit,
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/options/examples/network.rs | fendermint/app/options/examples/network.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! Examples of passing CLI options. Some are tricky and require some values to be parsed first.
//! These examples are here so we have an easier way to test them than having to compile the app.
//!
//! ```text
//! cargo run --example network -- --help
//! cargo run --example network -- --network 1 genesis --genesis-file ./genesis.json ipc gateway --subnet-id /r123/t0456 -b 10 -t 10 -c 1.5 -f 10 -m 65
//! FVM_NETWORK=testnet cargo run --example network -- genesis --genesis-file ./genesis.json ipc gateway --subnet-id /r123/t0456 -b 10 -t 10 -c 1.5 -f 10 -m 65
//! ```
use clap::Parser;
use fendermint_app_options::{GlobalOptions, Options};
pub fn main() {
let opts: GlobalOptions = GlobalOptions::parse();
println!("command: {:?}", opts.cmd);
let n = opts.global.network;
println!("setting current network: {n:?}");
fvm_shared::address::set_current_network(n);
let opts: Options = Options::parse();
println!("{opts:?}");
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/settings/src/fvm.rs | fendermint/app/settings/src/fvm.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use fvm_shared::econ::TokenAmount;
use serde::Deserialize;
use serde_with::serde_as;
use crate::IsHumanReadable;
#[serde_as]
#[derive(Debug, Deserialize, Clone)]
pub struct FvmSettings {
/// Overestimation rate applied to gas estimations to ensure that the
/// message goes through
pub gas_overestimation_rate: f64,
/// Gas search step increase used to find the optimal gas limit.
/// It determines how fine-grained we want the gas estimation to be.
pub gas_search_step: f64,
/// Indicate whether transactions should be fully executed during the checks performed
/// when they are added to the mempool, or just the most basic ones are performed.
///
/// Enabling this option is required to fully support "pending" queries in the Ethereum API,
/// otherwise only the nonces and balances are projected into a partial state.
pub exec_in_check: bool,
/// Gas fee used when broadcasting transactions.
#[serde_as(as = "IsHumanReadable")]
pub gas_fee_cap: TokenAmount,
/// Gas premium used when broadcasting transactions.
#[serde_as(as = "IsHumanReadable")]
pub gas_premium: TokenAmount,
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/settings/src/lib.rs | fendermint/app/settings/src/lib.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use anyhow::{anyhow, Context};
use config::{Config, ConfigError, Environment, File};
use fvm_shared::address::Address;
use fvm_shared::econ::TokenAmount;
use ipc_api::subnet_id::SubnetID;
use serde::{Deserialize, Serialize};
use serde_with::{serde_as, DurationSeconds};
use std::fmt::{Display, Formatter};
use std::net::{SocketAddr, ToSocketAddrs};
use std::path::{Path, PathBuf};
use std::time::Duration;
use tendermint_rpc::Url;
use testing::TestingSettings;
use utils::EnvInterpol;
use fendermint_vm_encoding::{human_readable_delegate, human_readable_str};
use fendermint_vm_topdown::BlockHeight;
use self::eth::EthSettings;
use self::fvm::FvmSettings;
use self::resolver::ResolverSettings;
use ipc_provider::config::deserialize::deserialize_eth_address_from_str;
pub mod eth;
pub mod fvm;
pub mod resolver;
pub mod testing;
pub mod utils;
/// Marker to be used with the `#[serde_as(as = "IsHumanReadable")]` annotations.
///
/// We can't just import `fendermint_vm_encoding::IsHumanReadable` because we can't implement traits for it here,
/// however we can use the `human_readable_delegate!` macro to delegate from this to that for the types we need
/// and it will look the same.
struct IsHumanReadable;
human_readable_str!(SubnetID);
human_readable_delegate!(TokenAmount);
#[derive(Debug, Deserialize, Clone)]
pub struct SocketAddress {
pub host: String,
pub port: u32,
}
impl Display for SocketAddress {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "{}:{}", self.host, self.port)
}
}
impl std::net::ToSocketAddrs for SocketAddress {
type Iter = <String as std::net::ToSocketAddrs>::Iter;
fn to_socket_addrs(&self) -> std::io::Result<Self::Iter> {
self.to_string().to_socket_addrs()
}
}
impl TryInto<std::net::SocketAddr> for SocketAddress {
type Error = std::io::Error;
fn try_into(self) -> Result<SocketAddr, Self::Error> {
self.to_socket_addrs()?
.next()
.ok_or_else(|| std::io::Error::from(std::io::ErrorKind::AddrNotAvailable))
}
}
#[derive(Debug, Deserialize, Clone)]
#[serde(rename_all = "lowercase")]
/// Indicate the FVM account kind for generating addresses from a key.
pub enum AccountKind {
/// Has an f1 address.
Regular,
/// Has an f410 address.
Ethereum,
}
/// A Secp256k1 key used to sign transactions,
/// with the account kind showing if it's a regular or an ethereum key.
#[derive(Debug, Deserialize, Clone)]
pub struct SigningKey {
path: PathBuf,
pub kind: AccountKind,
}
home_relative!(SigningKey { path });
/// A BLS key used to sign transactions,
#[derive(Debug, Deserialize, Clone)]
pub struct BlsSigningKey {
path: PathBuf,
}
home_relative!(BlsSigningKey { path });
#[derive(Debug, Deserialize, Clone)]
pub struct AbciSettings {
pub listen: SocketAddress,
/// Queue size for each ABCI component.
pub bound: usize,
/// Maximum number of messages allowed in a block.
pub block_max_msgs: usize,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "lowercase")]
/// Indicate the FVM account kind for generating addresses from a key.
///
/// See https://github.com/facebook/rocksdb/wiki/Compaction
pub enum DbCompaction {
/// Good when most keys don't change.
Level,
Universal,
Fifo,
/// Auto-compaction disabled, has to be called manually.
None,
}
impl Display for DbCompaction {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}",
serde_json::to_value(self)
.map_err(|e| {
tracing::error!("cannot format DB compaction to json: {e}");
std::fmt::Error
})?
.as_str()
.ok_or(std::fmt::Error)?
)
}
}
#[derive(Debug, Deserialize, Clone)]
pub struct DbSettings {
/// Length of the app state history to keep in the database before pruning; 0 means unlimited.
///
/// This affects how long we can go back in state queries.
pub state_hist_size: u64,
/// How to compact the datastore.
pub compaction_style: DbCompaction,
}
/// Settings affecting how we deal with failures in trying to send transactions to the local CometBFT node.
/// It is not expected to be unavailable, however we might get into race conditions about the nonce which
/// would need us to try creating a completely new transaction and try again.
#[serde_as]
#[derive(Debug, Deserialize, Clone)]
pub struct BroadcastSettings {
/// Number of times to retry broadcasting a transaction.
pub max_retries: u8,
/// Time to wait between retries. This should roughly correspond to the block interval.
#[serde_as(as = "DurationSeconds<u64>")]
pub retry_delay: Duration,
/// Any over-estimation to apply on top of the estimate returned by the API.
pub gas_overestimation_rate: f64,
}
#[serde_as]
#[derive(Debug, Deserialize, Clone)]
pub struct TopDownSettings {
/// The number of blocks to delay before reporting a height as final on the parent chain.
/// To propose a certain number of epochs delayed from the latest height, we see to be
/// conservative and avoid other from rejecting the proposal because they don't see the
/// height as final yet.
pub chain_head_delay: BlockHeight,
/// The number of blocks on top of `chain_head_delay` to wait before proposing a height
/// as final on the parent chain, to avoid slight disagreements between validators whether
/// a block is final, or not just yet.
pub proposal_delay: BlockHeight,
/// The max number of blocks one should make the topdown proposal
pub max_proposal_range: BlockHeight,
/// The max number of blocks to hold in memory for parent syncer
pub max_cache_blocks: Option<BlockHeight>,
/// Parent syncing cron period, in seconds
#[serde_as(as = "DurationSeconds<u64>")]
pub polling_interval: Duration,
/// Top down exponential back off retry base
#[serde_as(as = "DurationSeconds<u64>")]
pub exponential_back_off: Duration,
/// The max number of retries for exponential backoff before giving up
pub exponential_retry_limit: usize,
/// The parent rpc http endpoint
pub parent_http_endpoint: Url,
/// Timeout for calls to the parent Ethereum API.
#[serde_as(as = "Option<DurationSeconds<u64>>")]
pub parent_http_timeout: Option<Duration>,
/// Bearer token for any Authorization header.
pub parent_http_auth_token: Option<String>,
/// The parent registry address
#[serde(deserialize_with = "deserialize_eth_address_from_str")]
pub parent_registry: Address,
/// The parent gateway address
#[serde(deserialize_with = "deserialize_eth_address_from_str")]
pub parent_gateway: Address,
}
#[serde_as]
#[derive(Debug, Deserialize, Clone)]
pub struct IpcSettings {
#[serde_as(as = "IsHumanReadable")]
pub subnet_id: SubnetID,
/// Interval with which votes can be gossiped.
#[serde_as(as = "DurationSeconds<u64>")]
pub vote_interval: Duration,
/// Timeout after which the last vote is re-published.
#[serde_as(as = "DurationSeconds<u64>")]
pub vote_timeout: Duration,
/// The config for top down checkpoint. It's None if subnet id is root or not activating
/// any top down checkpoint related operations
pub topdown: Option<TopDownSettings>,
}
impl IpcSettings {
pub fn topdown_config(&self) -> anyhow::Result<&TopDownSettings> {
self.topdown
.as_ref()
.ok_or_else(|| anyhow!("top down config missing"))
}
}
#[serde_as]
#[derive(Debug, Deserialize, Clone)]
pub struct SnapshotSettings {
/// Enable the export and import of snapshots.
pub enabled: bool,
/// How often to attempt to export snapshots in terms of block height.
pub block_interval: BlockHeight,
/// Number of snapshots to keep before purging old ones.
pub hist_size: usize,
/// Target chunk size, in bytes.
pub chunk_size_bytes: usize,
/// How long to keep a snapshot from being purged after it has been requested by a peer.
#[serde_as(as = "DurationSeconds<u64>")]
pub last_access_hold: Duration,
/// How often to poll CometBFT to see whether it has caught up with the chain.
#[serde_as(as = "DurationSeconds<u64>")]
pub sync_poll_interval: Duration,
/// Temporary directory for downloads.
download_dir: Option<PathBuf>,
}
impl SnapshotSettings {
pub fn download_dir(&self) -> PathBuf {
self.download_dir.clone().unwrap_or(std::env::temp_dir())
}
}
#[derive(Debug, Deserialize, Clone)]
pub struct MetricsSettings {
/// Enable the export of metrics over HTTP.
pub enabled: bool,
/// HTTP listen address where Prometheus metrics are hosted.
pub listen: SocketAddress,
}
#[derive(Debug, Deserialize, Clone)]
pub struct Settings {
/// Home directory configured on the CLI, to which all paths in settings can be set relative.
home_dir: PathBuf,
/// Database files.
data_dir: PathBuf,
/// State snapshots.
snapshots_dir: PathBuf,
/// Solidity contracts.
contracts_dir: PathBuf,
/// Builtin-actors CAR file.
builtin_actors_bundle: PathBuf,
/// Custom actors CAR file.
custom_actors_bundle: PathBuf,
/// Where to reach CometBFT for queries or broadcasting transactions.
tendermint_rpc_url: Url,
/// Block height where we should gracefully stop the node
pub halt_height: i64,
/// Secp256k1 private key used for signing transactions sent in the validator's name. Leave empty if not validating.
pub validator_key: Option<SigningKey>,
/// Path to bls private key used for signing tags.
pub bls_signing_key: Option<BlsSigningKey>,
pub abci: AbciSettings,
pub db: DbSettings,
pub metrics: MetricsSettings,
pub snapshots: SnapshotSettings,
pub eth: EthSettings,
pub fvm: FvmSettings,
pub resolver: ResolverSettings,
pub broadcast: BroadcastSettings,
pub ipc: IpcSettings,
pub testing: Option<TestingSettings>,
}
impl Settings {
home_relative!(
data_dir,
snapshots_dir,
contracts_dir,
builtin_actors_bundle,
custom_actors_bundle
);
/// Load the default configuration from a directory,
/// then potential overrides specific to the run mode,
/// then overrides from the local environment,
/// finally parse it into the [Settings] type.
pub fn new(config_dir: &Path, home_dir: &Path, run_mode: &str) -> Result<Self, ConfigError> {
Self::config(config_dir, home_dir, run_mode).and_then(Self::parse)
}
/// Load the configuration into a generic data structure.
fn config(config_dir: &Path, home_dir: &Path, run_mode: &str) -> Result<Config, ConfigError> {
Config::builder()
.add_source(EnvInterpol(File::from(config_dir.join("default"))))
// Optional mode specific overrides, checked into git.
.add_source(EnvInterpol(
File::from(config_dir.join(run_mode)).required(false),
))
// Optional local overrides, not checked into git.
.add_source(EnvInterpol(
File::from(config_dir.join("local")).required(false),
))
// Add in settings from the environment (with a prefix of FM)
// e.g. `FM_DB__DATA_DIR=./foo/bar ./target/app` would set the database location.
.add_source(EnvInterpol(
Environment::with_prefix("fm")
.prefix_separator("_")
.separator("__")
.ignore_empty(true) // otherwise "" will be parsed as a list item
.try_parsing(true) // required for list separator
.list_separator(",") // need to list keys explicitly below otherwise it can't pase simple `String` type
.with_list_parse_key("resolver.connection.external_addresses")
.with_list_parse_key("resolver.discovery.static_addresses")
.with_list_parse_key("resolver.membership.static_subnets"),
))
// Set the home directory based on what was passed to the CLI,
// so everything in the config can be relative to it.
// The `home_dir` key is not added to `default.toml` so there is no confusion
// about where it will be coming from.
.set_override("home_dir", home_dir.to_string_lossy().as_ref())?
.build()
}
/// Try to parse the config into [Settings].
fn parse(config: Config) -> Result<Self, ConfigError> {
// Deserialize (and thus freeze) the entire configuration.
config.try_deserialize()
}
/// The configured home directory.
pub fn home_dir(&self) -> &Path {
&self.home_dir
}
/// Tendermint RPC URL from the environment or the config file.
pub fn tendermint_rpc_url(&self) -> anyhow::Result<Url> {
// Prefer the "standard" env var used in the CLI.
match std::env::var("TENDERMINT_RPC_URL").ok() {
Some(url) => url.parse::<Url>().context("invalid Tendermint URL"),
None => Ok(self.tendermint_rpc_url.clone()),
}
}
/// Indicate whether we have configured the top-down syncer to run.
pub fn topdown_enabled(&self) -> bool {
!self.ipc.subnet_id.is_root() && self.ipc.topdown.is_some()
}
/// Indicate whether we have configured the IPLD Resolver to run.
pub fn resolver_enabled(&self) -> bool {
!self.resolver.connection.listen_addr.is_empty()
&& self.ipc.subnet_id != *ipc_api::subnet_id::UNDEF
}
}
// Run these tests serially because some of them modify the environment.
#[serial_test::serial]
#[cfg(test)]
mod tests {
use multiaddr::multiaddr;
use std::path::PathBuf;
use crate::utils::tests::with_env_vars;
use crate::DbCompaction;
use super::Settings;
fn try_parse_config(run_mode: &str) -> Result<Settings, config::ConfigError> {
let current_dir = PathBuf::from(".");
let default_dir = PathBuf::from("../config");
let c = Settings::config(&default_dir, ¤t_dir, run_mode)?;
// Trying to debug the following sporadic error on CI:
// thread 'tests::parse_test_config' panicked at fendermint/app/settings/src/lib.rs:315:36:
// failed to parse Settings: failed to parse: invalid digit found in string
// This turned out to be due to the environment variable manipulation below mixing with another test,
// which is why `#[serial]` was moved to the top.
eprintln!("CONFIG = {:?}", c.cache);
Settings::parse(c)
}
fn parse_config(run_mode: &str) -> Settings {
try_parse_config(run_mode).expect("failed to parse Settings")
}
#[test]
fn parse_default_config() {
let settings = parse_config("");
assert!(!settings.resolver_enabled());
}
#[test]
fn parse_test_config() {
let settings = parse_config("test");
assert!(settings.resolver_enabled());
}
#[test]
fn compaction_to_string() {
assert_eq!(DbCompaction::Level.to_string(), "level");
}
#[test]
fn parse_comma_separated() {
let settings = with_env_vars(vec![
("FM_RESOLVER__CONNECTION__EXTERNAL_ADDRESSES", "/ip4/198.51.100.0/tcp/4242/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N,/ip6/2604:1380:2000:7a00::1/udp/4001/quic/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb"),
("FM_RESOLVER__DISCOVERY__STATIC_ADDRESSES", "/ip4/198.51.100.1/tcp/4242/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N,/ip6/2604:1380:2000:7a00::2/udp/4001/quic/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb"),
// Set a normal string key as well to make sure we have configured the library correctly and it doesn't try to parse everything as a list.
("FM_RESOLVER__NETWORK__NETWORK_NAME", "test"),
], || try_parse_config("")).unwrap();
assert_eq!(settings.resolver.discovery.static_addresses.len(), 2);
assert_eq!(settings.resolver.connection.external_addresses.len(), 2);
}
#[test]
fn parse_empty_comma_separated() {
let settings = with_env_vars(
vec![
("FM_RESOLVER__DISCOVERY__STATIC_ADDRESSES", ""),
("FM_RESOLVER__CONNECTION__EXTERNAL_ADDRESSES", ""),
("FM_RESOLVER__MEMBERSHIP__STATIC_SUBNETS", ""),
],
|| try_parse_config(""),
)
.unwrap();
assert_eq!(settings.resolver.connection.external_addresses.len(), 0);
assert_eq!(settings.resolver.discovery.static_addresses.len(), 0);
assert_eq!(settings.resolver.membership.static_subnets.len(), 0);
}
#[test]
fn parse_with_interpolation() {
let settings = with_env_vars(
vec![
("FM_RESOLVER__DISCOVERY__STATIC_ADDRESSES", "/dns4/${SEED_1_HOST}/tcp/${SEED_1_PORT},/dns4/${SEED_2_HOST}/tcp/${SEED_2_PORT}"),
("SEED_1_HOST", "foo.io"),
("SEED_1_PORT", "1234"),
("SEED_2_HOST", "bar.ai"),
("SEED_2_PORT", "5678"),
],
|| try_parse_config(""),
)
.unwrap();
assert_eq!(settings.resolver.discovery.static_addresses.len(), 2);
assert_eq!(
settings.resolver.discovery.static_addresses[0],
multiaddr!(Dns4("foo.io"), Tcp(1234u16))
);
assert_eq!(
settings.resolver.discovery.static_addresses[1],
multiaddr!(Dns4("bar.ai"), Tcp(5678u16))
);
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/settings/src/eth.rs | fendermint/app/settings/src/eth.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use fvm_shared::econ::TokenAmount;
use serde::Deserialize;
use serde_with::{serde_as, DurationSeconds};
use std::time::Duration;
use crate::{IsHumanReadable, SocketAddress};
/// Ethereum API facade settings.
#[serde_as]
#[derive(Debug, Deserialize, Clone)]
pub struct EthSettings {
pub listen: SocketAddress,
#[serde_as(as = "DurationSeconds<u64>")]
pub filter_timeout: Duration,
pub cache_capacity: usize,
pub gas: GasOpt,
pub max_nonce_gap: u64,
}
#[serde_as]
#[derive(Debug, Clone, Deserialize)]
pub struct GasOpt {
/// Minimum gas fee in atto.
#[serde_as(as = "IsHumanReadable")]
pub min_gas_premium: TokenAmount,
pub num_blocks_max_prio_fee: u64,
pub max_fee_hist_size: u64,
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/settings/src/utils.rs | fendermint/app/settings/src/utils.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use config::{ConfigError, Source, Value, ValueKind};
use lazy_static::lazy_static;
use regex::Regex;
use std::path::{Path, PathBuf};
#[macro_export]
macro_rules! home_relative {
// Using this inside something that has a `.home_dir()` function.
($($name:ident),+) => {
$(
pub fn $name(&self) -> std::path::PathBuf {
$crate::utils::expand_path(&self.home_dir(), &self.$name)
}
)+
};
// Using this outside something that requires a `home_dir` parameter to be passed to it.
($settings:ty { $($name:ident),+ } ) => {
impl $settings {
$(
pub fn $name(&self, home_dir: &std::path::Path) -> std::path::PathBuf {
$crate::utils::expand_path(home_dir, &self.$name)
}
)+
}
};
}
/// Expand a path which can either be :
/// * absolute, e.g. "/foo/bar"
/// * relative to the system `$HOME` directory, e.g. "~/foo/bar"
/// * relative to the configured `--home-dir` directory, e.g. "foo/bar"
pub fn expand_path(home_dir: &Path, path: &Path) -> PathBuf {
if path.starts_with("/") {
PathBuf::from(path)
} else if path.starts_with("~") {
expand_tilde(path)
} else {
expand_tilde(home_dir.join(path))
}
}
/// Expand paths that begin with "~" to `$HOME`.
pub fn expand_tilde<P: AsRef<Path>>(path: P) -> PathBuf {
let p = path.as_ref().to_path_buf();
if !p.starts_with("~") {
return p;
}
if p == Path::new("~") {
return dirs::home_dir().unwrap_or(p);
}
dirs::home_dir()
.map(|mut h| {
if h == Path::new("/") {
// `~/foo` becomes just `/foo` instead of `//foo` if `/` is home.
p.strip_prefix("~").unwrap().to_path_buf()
} else {
h.push(p.strip_prefix("~/").unwrap());
h
}
})
.unwrap_or(p)
}
#[derive(Clone, Debug)]
pub struct EnvInterpol<T>(pub T);
impl<T: Source + Clone + Send + Sync + 'static> Source for EnvInterpol<T> {
fn clone_into_box(&self) -> Box<dyn Source + Send + Sync> {
Box::new(self.clone())
}
fn collect(&self) -> Result<config::Map<String, config::Value>, ConfigError> {
let mut values = self.0.collect()?;
for value in values.values_mut() {
interpolate_values(value);
}
Ok(values)
}
}
/// Find values in the string that can be interpolated, e.g. "${NOMAD_HOST_ADDRESS_cometbft_p2p}"
fn find_vars(value: &str) -> Vec<&str> {
lazy_static! {
/// Capture env variables like `${VARIABLE_NAME}`
static ref ENV_VAR_RE: Regex = Regex::new(r"\$\{([^}]+)\}").expect("env var regex parses");
}
ENV_VAR_RE
.captures_iter(value)
.map(|c| c.extract())
.map(|(_, [n])| n)
.collect()
}
/// Find variables and replace them from the environment.
///
/// Returns `None` if there are no env vars in the value.
fn interpolate_vars(value: &str) -> Option<String> {
let keys = find_vars(value);
if keys.is_empty() {
return None;
}
let mut value = value.to_string();
for k in keys {
if let Ok(v) = std::env::var(k) {
value = value.replace(&format!("${{{k}}}"), &v);
}
}
Some(value)
}
/// Find strings which have env vars in them and do the interpolation.
///
/// It does not change the kind of the values, ie. it doesn't try to parse
/// into primitives or arrays *after* the interpolation. It does recurse
/// into arrays, though, so if there are variables within array items,
/// they get replaced.
fn interpolate_values(value: &mut Value) {
match value.kind {
ValueKind::String(ref mut s) => {
if let Some(i) = interpolate_vars(s) {
// TODO: We could try to parse into primitive values,
// but the only reason we do it with `Environment` is to support list separators,
// otherwise it was fine with just strings, so I think we can skip this for now.
*s = i;
}
}
ValueKind::Array(ref mut vs) => {
for v in vs.iter_mut() {
interpolate_values(v);
}
}
// Leave anything else as it is.
_ => {}
}
}
#[cfg(test)]
pub(crate) mod tests {
use std::path::PathBuf;
use crate::utils::find_vars;
use super::{expand_tilde, interpolate_vars};
/// Set some env vars, run a fallible piece of code, then unset the variables otherwise they would affect the next test.
pub fn with_env_vars<F, T, E>(vars: Vec<(&str, &str)>, f: F) -> Result<T, E>
where
F: FnOnce() -> Result<T, E>,
{
for (k, v) in vars.iter() {
std::env::set_var(k, v);
}
let result = f();
for (k, _) in vars {
std::env::remove_var(k);
}
result
}
#[test]
fn tilde_expands_to_home() {
let home = std::env::var("HOME").expect("should work on Linux");
let home_project = PathBuf::from(format!("{}/.project", home));
assert_eq!(expand_tilde("~/.project"), home_project);
assert_eq!(expand_tilde("/foo/bar"), PathBuf::from("/foo/bar"));
assert_eq!(expand_tilde("~foo/bar"), PathBuf::from("~foo/bar"));
}
#[test]
fn test_find_vars() {
assert_eq!(
find_vars("FOO_${NAME}_${NUMBER}_BAR"),
vec!["NAME", "NUMBER"]
);
assert!(find_vars("FOO_${NAME").is_empty());
assert!(find_vars("FOO_$NAME").is_empty());
}
#[test]
fn test_interpolate_vars() {
let s = "FOO_${NAME}_${NUMBER}_BAR";
let i = with_env_vars::<_, _, ()>(vec![("NAME", "spam")], || Ok(interpolate_vars(s)))
.unwrap()
.expect("non empty vars");
assert_eq!(i, "FOO_spam_${NUMBER}_BAR");
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/settings/src/testing.rs | fendermint/app/settings/src/testing.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use serde::Deserialize;
#[derive(Debug, Deserialize, Clone)]
pub struct TestingSettings {
/// Indicate whether the chain metadata should be pushed into the ledger.
///
/// Doing so causes the ledger to change even on empty blocks, which will
/// cause another empty block to be created by CometBFT, perpetuating
/// it even if we don't want them.
///
/// See <https://docs.cometbft.com/v0.37/core/configuration#empty-blocks-vs-no-empty-blocks>
///
/// This is here for testing purposes only, it should be `true` by default to allow
/// the `evm` actor to execute the `BLOCKHASH` function.
pub push_chain_meta: bool,
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/settings/src/resolver.rs | fendermint/app/settings/src/resolver.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::{path::PathBuf, time::Duration};
use serde::Deserialize;
use serde_with::{serde_as, DurationSeconds};
use ipc_api::subnet_id::SubnetID;
use multiaddr::Multiaddr;
use crate::{home_relative, IsHumanReadable};
#[serde_as]
#[derive(Debug, Clone, Deserialize)]
pub struct ResolverSettings {
/// Time to wait between attempts to resolve a CID, in seconds.
#[serde_as(as = "DurationSeconds<u64>")]
pub retry_delay: Duration,
pub network: NetworkSettings,
pub discovery: DiscoverySettings,
pub membership: MembershipSettings,
pub connection: ConnectionSettings,
pub content: ContentSettings,
}
/// Settings describing the subnet hierarchy, not the physical network.
///
/// For physical network settings see [ConnectionSettings].
#[derive(Clone, Debug, Deserialize)]
pub struct NetworkSettings {
/// Cryptographic key used to sign messages.
///
/// This is the name of a Secp256k1 private key file,
/// relative to the `home_dir`.
local_key: PathBuf,
/// Network name to differentiate this peer group.
pub network_name: String,
}
home_relative!(NetworkSettings { local_key });
/// Configuration for [`discovery::Behaviour`].
#[derive(Clone, Debug, Deserialize)]
pub struct DiscoverySettings {
/// Custom nodes which never expire, e.g. bootstrap or reserved nodes.
///
/// The addresses must end with a `/p2p/<peer-id>` part.
pub static_addresses: Vec<Multiaddr>,
/// Number of connections at which point we pause further discovery lookups.
pub target_connections: usize,
/// Option to disable Kademlia, for example in a fixed static network.
pub enable_kademlia: bool,
}
/// Configuration for [`membership::Behaviour`].
#[serde_as]
#[derive(Clone, Debug, Deserialize)]
pub struct MembershipSettings {
/// User defined list of subnets which will never be pruned from the cache.
#[serde_as(as = "Vec<IsHumanReadable>")]
pub static_subnets: Vec<SubnetID>,
/// Maximum number of subnets to track in the cache.
pub max_subnets: usize,
/// Publish interval for supported subnets.
#[serde_as(as = "DurationSeconds<u64>")]
pub publish_interval: Duration,
/// Minimum time between publishing own provider record in reaction to new joiners.
#[serde_as(as = "DurationSeconds<u64>")]
pub min_time_between_publish: Duration,
/// Maximum age of provider records before the peer is removed without an update.
#[serde_as(as = "DurationSeconds<u64>")]
pub max_provider_age: Duration,
}
#[derive(Debug, Clone, Deserialize)]
pub struct ConnectionSettings {
/// The address where we will listen to incoming connections.
pub listen_addr: Multiaddr,
/// A list of known external addresses this node is reachable on.
pub external_addresses: Vec<Multiaddr>,
/// Maximum number of incoming connections.
pub max_incoming: u32,
/// Expected number of peers, for sizing the Bloom filter.
pub expected_peer_count: u32,
/// Maximum number of peers to send Bitswap requests to in a single attempt.
pub max_peers_per_query: u32,
/// Maximum number of events in the push-based broadcast channel before a slow
/// consumer gets an error because it's falling behind.
pub event_buffer_capacity: u32,
}
/// Configuration for [`content::Behaviour`].
#[serde_as]
#[derive(Debug, Clone, Deserialize)]
pub struct ContentSettings {
/// Number of bytes that can be consumed by remote peers in a time period.
///
/// 0 means no limit.
pub rate_limit_bytes: u32,
/// Length of the time period at which the consumption limit fills.
///
/// 0 means no limit.
#[serde_as(as = "DurationSeconds<u64>")]
pub rate_limit_period: Duration,
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/src/app.rs | fendermint/app/src/app.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::future::Future;
use std::path::PathBuf;
use std::sync::Arc;
use crate::events::{ExtendVote, NewBlock, ProposalProcessed};
use crate::AppExitCode;
use crate::BlockHeight;
use crate::{tmconv::*, VERSION};
use anyhow::{anyhow, Context, Result};
use async_stm::{atomically, atomically_or_err};
use async_trait::async_trait;
use bls_signatures::Serialize as _;
use cid::Cid;
use fendermint_abci::util::take_until_max_size;
use fendermint_abci::{AbciResult, Application};
use fendermint_storage::{
Codec, Encode, KVCollection, KVRead, KVReadable, KVStore, KVWritable, KVWrite,
};
use fendermint_tracing::emit;
use fendermint_vm_core::Timestamp;
use fendermint_vm_interpreter::bytes::{
BytesMessageApplyRes, BytesMessageCheckRes, BytesMessageQuery, BytesMessageQueryRes,
};
use fendermint_vm_interpreter::chain::{
cetf_tag_msg_to_chainmessage, ChainEnv,
ChainMessageApplyRet, IllegalMessage,
};
use fendermint_vm_interpreter::fvm::extend::{SignatureKind, SignedTags, TagKind, Tags};
use fendermint_vm_interpreter::fvm::state::cetf::get_tag_at_height;
use fendermint_vm_interpreter::fvm::state::{
empty_state_tree, CheckStateRef, FvmExecState, FvmGenesisState, FvmQueryState, FvmStateParams,
FvmUpdatableParams,
};
use fendermint_vm_interpreter::fvm::store::ReadOnlyBlockstore;
use fendermint_vm_interpreter::fvm::{FvmApplyRet, FvmGenesisOutput, PowerUpdates};
use fendermint_vm_interpreter::signed::InvalidSignature;
use fendermint_vm_interpreter::{
CheckInterpreter, ExecInterpreter, ExtendVoteInterpreter, GenesisInterpreter,
ProposalInterpreter, QueryInterpreter,
};
use fendermint_vm_message::query::FvmQueryHeight;
use fendermint_vm_snapshot::{SnapshotClient, SnapshotError};
use fvm::engine::MultiEngine;
use fvm_ipld_blockstore::Blockstore;
use fvm_ipld_encoding::{from_slice, to_vec};
use fvm_shared::chainid::ChainID;
use fvm_shared::clock::ChainEpoch;
use fvm_shared::econ::TokenAmount;
use fvm_shared::version::NetworkVersion;
use num_traits::Zero;
use serde::{Deserialize, Serialize};
use tendermint::abci::request::CheckTxKind;
use tendermint::abci::{request, response};
use tracing::instrument;
#[derive(Serialize)]
#[repr(u8)]
pub enum AppStoreKey {
State,
}
// TODO: What range should we use for our own error codes? Should we shift FVM errors?
#[derive(Debug)]
#[repr(u32)]
pub enum AppError {
/// Failed to deserialize the transaction.
InvalidEncoding = 51,
/// Failed to validate the user signature.
InvalidSignature = 52,
/// User sent a message they should not construct.
IllegalMessage = 53,
/// The genesis block hasn't been initialized yet.
NotInitialized = 54,
}
/// The application state record we keep a history of in the database.
#[derive(Serialize, Deserialize)]
pub struct AppState {
/// Last committed block height.
block_height: BlockHeight,
/// Oldest state hash height.
oldest_state_height: BlockHeight,
/// Last committed version of the evolving state of the FVM.
state_params: FvmStateParams,
}
impl AppState {
pub fn state_root(&self) -> Cid {
self.state_params.state_root
}
pub fn chain_id(&self) -> ChainID {
ChainID::from(self.state_params.chain_id)
}
pub fn app_hash(&self) -> tendermint::hash::AppHash {
to_app_hash(&self.state_params)
}
/// The state is effective at the *next* block, that is, the effects of block N are visible in the header of block N+1,
/// so the height of the state itself as a "post-state" is one higher than the block which we executed to create it.
pub fn state_height(&self) -> BlockHeight {
self.block_height + 1
}
}
pub struct AppConfig<S: KVStore> {
/// Namespace to store the current app state.
pub app_namespace: S::Namespace,
/// Namespace to store the app state history.
pub state_hist_namespace: S::Namespace,
/// Size of state history to keep; 0 means unlimited.
pub state_hist_size: u64,
/// Path to the Wasm bundle.
///
/// Only loaded once during genesis; later comes from the [`StateTree`].
pub builtin_actors_bundle: PathBuf,
/// Path to the custom actor WASM bundle.
pub custom_actors_bundle: PathBuf,
/// Block height where we should gracefully stop the node
pub halt_height: i64,
}
/// Handle ABCI requests.
#[derive(Clone)]
pub struct App<DB, SS, S, I>
where
SS: Blockstore + Clone + 'static,
S: KVStore,
{
/// Database backing all key-value operations.
db: Arc<DB>,
/// State store, backing all the smart contracts.
///
/// Must be kept separate from storage that can be influenced by network operations such as Bitswap;
/// nodes must be able to run transactions deterministically. By contrast the Bitswap store should
/// be able to read its own storage area as well as state storage, to serve content from both.
state_store: Arc<SS>,
/// Wasm engine cache.
multi_engine: Arc<MultiEngine>,
/// Path to the Wasm bundle.
///
/// Only loaded once during genesis; later comes from the [`StateTree`].
builtin_actors_bundle: PathBuf,
/// Path to the custom actor WASM bundle.
custom_actors_bundle: PathBuf,
/// Block height where we should gracefully stop the node
halt_height: i64,
/// Namespace to store app state.
namespace: S::Namespace,
/// Collection of past state parameters.
///
/// We store the state hash for the height of the block where it was committed,
/// which is different from how Tendermint Core will refer to it in queries,
/// shifted by one, because Tendermint Core will use the height where the hash
/// *appeared*, which is in the block *after* the one which was committed.
///
/// The state also contains things like timestamp and the network version,
/// so that we can retrospectively execute FVM messages at past block heights
/// in read-only mode.
state_hist: KVCollection<S, BlockHeight, FvmStateParams>,
/// Interpreter for block lifecycle events.
interpreter: Arc<I>,
/// Environment-like dependencies for the interpreter.
chain_env: ChainEnv,
/// Interface to the snapshotter, if enabled.
snapshots: Option<SnapshotClient>,
/// State accumulating changes during block execution.
exec_state: Arc<tokio::sync::Mutex<Option<FvmExecState<SS>>>>,
/// Projected (partial) state accumulating during transaction checks.
check_state: CheckStateRef<SS>,
/// How much history to keep.
///
/// Zero means unlimited.
state_hist_size: u64,
}
impl<DB, SS, S, I> App<DB, SS, S, I>
where
S: KVStore
+ Codec<AppState>
+ Encode<AppStoreKey>
+ Encode<BlockHeight>
+ Codec<FvmStateParams>,
DB: KVWritable<S> + KVReadable<S> + Clone + 'static,
SS: Blockstore + Clone + 'static,
{
pub fn new(
config: AppConfig<S>,
db: DB,
state_store: SS,
interpreter: I,
chain_env: ChainEnv,
snapshots: Option<SnapshotClient>,
) -> Result<Self> {
let app = Self {
db: Arc::new(db),
state_store: Arc::new(state_store),
multi_engine: Arc::new(MultiEngine::new(1)),
builtin_actors_bundle: config.builtin_actors_bundle,
custom_actors_bundle: config.custom_actors_bundle,
halt_height: config.halt_height,
namespace: config.app_namespace,
state_hist: KVCollection::new(config.state_hist_namespace),
state_hist_size: config.state_hist_size,
interpreter: Arc::new(interpreter),
chain_env,
snapshots,
exec_state: Arc::new(tokio::sync::Mutex::new(None)),
check_state: Arc::new(tokio::sync::Mutex::new(None)),
};
app.init_committed_state()?;
Ok(app)
}
}
impl<DB, SS, S, I> App<DB, SS, S, I>
where
S: KVStore
+ Codec<AppState>
+ Encode<AppStoreKey>
+ Encode<BlockHeight>
+ Codec<FvmStateParams>,
DB: KVWritable<S> + KVReadable<S> + 'static + Clone,
SS: Blockstore + 'static + Clone,
{
/// Get an owned clone of the state store.
fn state_store_clone(&self) -> SS {
self.state_store.as_ref().clone()
}
/// Ensure the store has some initial state.
fn init_committed_state(&self) -> Result<()> {
if self.get_committed_state()?.is_none() {
// We need to be careful never to run a query on this.
let mut state_tree = empty_state_tree(self.state_store_clone())
.context("failed to create empty state tree")?;
let state_root = state_tree.flush()?;
let state = AppState {
block_height: 0,
oldest_state_height: 0,
state_params: FvmStateParams {
timestamp: Timestamp(0),
state_root,
network_version: NetworkVersion::V0,
base_fee: TokenAmount::zero(),
circ_supply: TokenAmount::zero(),
chain_id: 0,
power_scale: 0,
app_version: 0,
},
};
self.set_committed_state(state)?;
}
Ok(())
}
/// Get the last committed state, if exists.
fn get_committed_state(&self) -> Result<Option<AppState>> {
let tx = self.db.read();
tx.get(&self.namespace, &AppStoreKey::State)
.context("get failed")
}
/// Get the last committed state; return error if it doesn't exist.
fn committed_state(&self) -> Result<AppState> {
match self.get_committed_state()? {
Some(state) => Ok(state),
None => Err(anyhow!("app state not found")),
}
}
/// Set the last committed state.
fn set_committed_state(&self, mut state: AppState) -> Result<()> {
self.db
.with_write(|tx| {
// Insert latest state history point at the `block_height + 1`,
// to be consistent with how CometBFT queries are supposed to work.
let state_height = state.state_height();
self.state_hist
.put(tx, &state_height, &state.state_params)?;
// Prune state history.
if self.state_hist_size > 0 && state_height >= self.state_hist_size {
let prune_height = state_height.saturating_sub(self.state_hist_size);
while state.oldest_state_height <= prune_height {
self.state_hist.delete(tx, &state.oldest_state_height)?;
state.oldest_state_height += 1;
}
}
// Update the application state.
tx.put(&self.namespace, &AppStoreKey::State, &state)?;
Ok(())
})
.context("commit failed")
}
/// Put the execution state during block execution. Has to be empty.
async fn put_exec_state(&self, state: FvmExecState<SS>) {
let mut guard = self.exec_state.lock().await;
assert!(guard.is_none(), "exec state not empty");
*guard = Some(state);
}
/// Take the execution state during block execution. Has to be non-empty.
async fn take_exec_state(&self) -> FvmExecState<SS> {
let mut guard = self.exec_state.lock().await;
guard.take().expect("exec state empty")
}
/// Take the execution state, update it, put it back, return the output.
async fn modify_exec_state<T, F, R>(&self, f: F) -> Result<T>
where
F: FnOnce((ChainEnv, FvmExecState<SS>)) -> R,
R: Future<Output = Result<((ChainEnv, FvmExecState<SS>), T)>>,
{
let mut guard = self.exec_state.lock().await;
let state = guard.take().expect("exec state empty");
// NOTE: There is no need to save the `ChainEnv`; it's shared with other, meant for cloning.
let ((_env, state), ret) = f((self.chain_env.clone(), state)).await?;
*guard = Some(state);
Ok(ret)
}
/// Get a read only fvm execution state. This is useful to perform query commands targeting
/// the latest state.
pub fn new_read_only_exec_state(
&self,
) -> Result<Option<FvmExecState<ReadOnlyBlockstore<Arc<SS>>>>> {
let maybe_app_state = self.get_committed_state()?;
Ok(if let Some(app_state) = maybe_app_state {
let block_height = app_state.block_height;
let state_params = app_state.state_params;
// wait for block production
if !Self::can_query_state(block_height, &state_params) {
return Ok(None);
}
let exec_state = FvmExecState::new(
ReadOnlyBlockstore::new(self.state_store.clone()),
self.multi_engine.as_ref(),
block_height as ChainEpoch,
state_params,
)
.context("error creating execution state")?;
Some(exec_state)
} else {
None
})
}
/// Look up a past state at a particular height Tendermint Core is looking for.
///
/// A height of zero means we are looking for the latest state.
/// The genesis block state is saved under height 1.
/// Under height 0 we saved the empty state, which we must not query,
/// because it doesn't contain any initialized state for the actors.
///
/// Returns the state params and the height of the block which committed it.
fn state_params_at_height(
&self,
height: FvmQueryHeight,
) -> Result<(FvmStateParams, BlockHeight)> {
if let FvmQueryHeight::Height(h) = height {
let tx = self.db.read();
let sh = self
.state_hist
.get(&tx, &h)
.context("error looking up history")?;
if let Some(p) = sh {
return Ok((p, h));
}
}
let state = self.committed_state()?;
Ok((state.state_params, state.block_height))
}
/// Check whether the state has been initialized by genesis.
///
/// We can't run queries on the initial empty state becase the actors haven't been inserted yet.
fn can_query_state(height: BlockHeight, params: &FvmStateParams) -> bool {
// It's really the empty state tree that would be the best indicator.
!(height == 0 && params.timestamp.0 == 0 && params.network_version == NetworkVersion::V0)
}
}
// NOTE: The `Application` interface doesn't allow failures at the moment. The protobuf
// of `Response` actually has an `Exception` type, so in theory we could use that, and
// Tendermint would break up the connection. However, before the response could reach it,
// the `tower-abci` library would throw an exception when it tried to convert a
// `Response::Exception` into a `ConsensusResponse` for example.
#[async_trait]
impl<DB, SS, S, I> Application for App<DB, SS, S, I>
where
S: KVStore
+ Codec<AppState>
+ Encode<AppStoreKey>
+ Encode<BlockHeight>
+ Codec<FvmStateParams>,
S::Namespace: Sync + Send,
DB: KVWritable<S> + KVReadable<S> + Clone + Send + Sync + 'static,
SS: Blockstore + Clone + Send + Sync + 'static,
I: GenesisInterpreter<
State = FvmGenesisState<SS>,
Genesis = Vec<u8>,
Output = FvmGenesisOutput,
>,
I: ProposalInterpreter<State = ChainEnv, Message = Vec<u8>>,
I: ExecInterpreter<
State = (ChainEnv, FvmExecState<SS>),
Message = Vec<u8>,
BeginOutput = FvmApplyRet,
DeliverOutput = BytesMessageApplyRes,
EndOutput = PowerUpdates,
>,
I: CheckInterpreter<
State = FvmExecState<ReadOnlyBlockstore<SS>>,
Message = Vec<u8>,
Output = BytesMessageCheckRes,
>,
I: QueryInterpreter<
State = FvmQueryState<SS>,
Query = BytesMessageQuery,
Output = BytesMessageQueryRes,
>,
I: ExtendVoteInterpreter<
State = FvmQueryState<SS>,
ExtendMessage = Tags,
ExtendOutput = SignedTags,
VerifyMessage = (tendermint::account::Id, Tags, SignedTags),
VerifyOutput = Option<bool>,
>,
{
async fn extend_vote(&self, request: request::ExtendVote) -> AbciResult<response::ExtendVote> {
tracing::debug!(
height = request.height.value(),
time = request.time.to_string(),
"extend vote"
);
// Skip at block 0 no state yet.
if request.height.value() == 0 {
tracing::info!("Extend vote at height 0. Skipping.");
return Ok(response::ExtendVote {
vote_extension: Default::default(),
});
}
let db = self.state_store_clone();
let height = FvmQueryHeight::from(0);
let (state_params, block_height) = self.state_params_at_height(height)?;
let state_root = state_params.state_root;
let state = FvmQueryState::new(
db,
self.multi_engine.clone(),
block_height.try_into()?,
state_params,
self.check_state.clone(),
height == FvmQueryHeight::Pending,
)
.context("error creating query state")?;
let db = self.state_store_clone();
let tags = Tags({
// Check for cetf tag
let mut tags = vec![];
// We haven't started execution yet so TM height is one ahead of FVM height.
let fvm_tags_height = block_height + 2;
let cetf_tag = get_tag_at_height(db, &state_root, block_height + 2)
.context(format!("failed to get tag at height {}", block_height + 2))?;
if let Some(tag) = cetf_tag {
tags.push(TagKind::Cetf(tag));
}
tags.push(TagKind::BlockHeight(fvm_tags_height));
tags
});
tracing::trace!("Tags to sign: {:?}", tags);
let signatures = self
.interpreter
.extend_vote(state, tags)
.await
.context("failed to extend vote")?;
tracing::trace!("Partially Signed Tags: {:?}", signatures);
// Either is_enabled is false or nothing to sign (TODO: We should force signing)
if signatures.0.is_empty() {
emit!(ExtendVote {
block_height: request.height.value(),
signed_tags: 0,
bytes: 0,
});
Ok(response::ExtendVote {
vote_extension: Default::default(),
})
} else {
let serialized = to_vec(&signatures).context("failed to serialize signatures")?;
emit!(ExtendVote {
block_height: request.height.value(),
signed_tags: signatures.0.len() as u64,
bytes: serialized.len() as u64,
});
Ok(response::ExtendVote {
vote_extension: serialized.into(),
})
}
}
async fn verify_vote_extension(
&self,
request: request::VerifyVoteExtension,
) -> AbciResult<response::VerifyVoteExtension> {
tracing::debug!(height = request.height.value(), "verify vote extension");
if request.height.value() == 0 && request.vote_extension.is_empty() {
tracing::info!("Verify vote extension at height 0. Skipping.");
return Ok(response::VerifyVoteExtension::Accept);
}
if request.vote_extension.is_empty() {
return Ok(response::VerifyVoteExtension::Accept);
}
let db = self.state_store_clone();
let height = FvmQueryHeight::from(0);
let (state_params, block_height) = self.state_params_at_height(height)?;
let state_root = state_params.state_root;
let state = FvmQueryState::new(
db.clone(),
self.multi_engine.clone(),
block_height.try_into()?,
state_params,
self.check_state.clone(),
height == FvmQueryHeight::Pending,
)?;
let sigs: SignedTags =
from_slice(&request.vote_extension).context("failed to deserialize signatures")?;
let fvm_tags_height = block_height + 2;
let tags = Tags(
sigs.0
.iter()
.map(|sig_kind| match sig_kind {
SignatureKind::Cetf(_) => {
let db = db.clone();
let tag = get_tag_at_height(db, &state_root, block_height + 2)
.context("failed to get tag at height")?
.ok_or_else(|| anyhow!("failed to get tag at height: None"))?;
Ok(TagKind::Cetf(tag))
}
SignatureKind::BlockHeight(_) => Ok(TagKind::BlockHeight(fvm_tags_height)),
})
.collect::<anyhow::Result<Vec<TagKind>>>()?,
);
let id = request.validator_address;
let (_, res) = self
.interpreter
.verify_vote_extension(state, (id, tags, sigs))
.await?;
match res {
Some(true) => Ok(response::VerifyVoteExtension::Accept),
Some(false) => Ok(response::VerifyVoteExtension::Reject),
None => Ok(response::VerifyVoteExtension::Accept),
}
}
/// Provide information about the ABCI application.
async fn info(&self, _request: request::Info) -> AbciResult<response::Info> {
let state = self.committed_state()?;
let height = tendermint::block::Height::try_from(state.block_height)?;
let info = response::Info {
data: "fendermint".to_string(),
version: VERSION.to_owned(),
app_version: state.state_params.app_version,
last_block_height: height,
last_block_app_hash: state.app_hash(),
};
Ok(info)
}
/// Called once upon genesis.
async fn init_chain(&self, request: request::InitChain) -> AbciResult<response::InitChain> {
let bundle = &self.builtin_actors_bundle;
let bundle = std::fs::read(bundle)
.map_err(|e| anyhow!("failed to load builtin bundle CAR from {bundle:?}: {e}"))?;
let custom_actors_bundle = &self.custom_actors_bundle;
let custom_actors_bundle = std::fs::read(custom_actors_bundle).map_err(|e| {
anyhow!("failed to load custom actor bundle CAR from {custom_actors_bundle:?}: {e}")
})?;
let state = FvmGenesisState::new(
self.state_store_clone(),
self.multi_engine.clone(),
&bundle,
&custom_actors_bundle,
)
.await
.context("failed to create genesis state")?;
tracing::info!(
manifest_root = format!("{}", state.manifest_data_cid),
"pre-genesis state created"
);
let genesis_bytes = request.app_state_bytes.to_vec();
let genesis_hash =
fendermint_vm_message::cid(&genesis_bytes).context("failed to compute genesis CID")?;
// Make it easy to spot any discrepancies between nodes.
tracing::info!(genesis_hash = genesis_hash.to_string(), "genesis");
let (state, out) = self
.interpreter
.init(state, genesis_bytes)
.await
.context("failed to init from genesis")?;
let state_root = state.commit().context("failed to commit genesis state")?;
let validators =
to_validator_updates(out.validators).context("failed to convert validators")?;
// Let's pretend that the genesis state is that of a fictive block at height 0.
// The record will be stored under height 1, and the record after the application
// of the actual block 1 will be at height 2, so they are distinct records.
// That is despite the fact that block 1 will share the timestamp with genesis,
// however it seems like it goes through a `prepare_proposal` phase too, which
// suggests it could have additional transactions affecting the state hash.
// By keeping them separate we can actually run queries at height=1 as well as height=2,
// to see the difference between `genesis.json` only and whatever else is in block 1.
let height: u64 = request.initial_height.into();
// Note that setting the `initial_height` to 0 doesn't seem to have an effect.
let height = height - 1;
let app_state = AppState {
block_height: height,
oldest_state_height: height,
state_params: FvmStateParams {
state_root,
timestamp: out.timestamp,
network_version: out.network_version,
base_fee: out.base_fee,
circ_supply: out.circ_supply,
chain_id: out.chain_id.into(),
power_scale: out.power_scale,
app_version: 0,
},
};
let response = response::InitChain {
consensus_params: None,
validators,
app_hash: app_state.app_hash(),
};
tracing::info!(
height,
state_root = app_state.state_root().to_string(),
app_hash = app_state.app_hash().to_string(),
timestamp = app_state.state_params.timestamp.0,
chain_id = app_state.state_params.chain_id,
"init chain"
);
self.set_committed_state(app_state)?;
Ok(response)
}
/// Query the application for data at the current or past height.
#[instrument(skip(self))]
async fn query(&self, request: request::Query) -> AbciResult<response::Query> {
let db = self.state_store_clone();
let height = FvmQueryHeight::from(request.height.value());
let (state_params, block_height) = self.state_params_at_height(height)?;
tracing::debug!(
query_height = request.height.value(),
block_height,
state_root = state_params.state_root.to_string(),
"running query"
);
// Don't run queries on the empty state, they won't work.
if !Self::can_query_state(block_height, &state_params) {
return Ok(invalid_query(
AppError::NotInitialized,
"The app hasn't been initialized yet.".to_owned(),
));
}
let state = FvmQueryState::new(
db,
self.multi_engine.clone(),
block_height.try_into()?,
state_params,
self.check_state.clone(),
height == FvmQueryHeight::Pending,
)
.context("error creating query state")?;
let qry = (request.path, request.data.to_vec());
let (_, result) = self
.interpreter
.query(state, qry)
.await
.context("error running query")?;
let response = match result {
Err(e) => invalid_query(AppError::InvalidEncoding, e.description),
Ok(result) => to_query(result, block_height)?,
};
Ok(response)
}
/// Check the given transaction before putting it into the local mempool.
async fn check_tx(&self, request: request::CheckTx) -> AbciResult<response::CheckTx> {
// Keep the guard through the check, so there can be only one at a time.
let mut guard = self.check_state.lock().await;
let state = match guard.take() {
Some(state) => state,
None => {
let db = self.state_store_clone();
let state = self.committed_state()?;
// This would create a partial state, but some client scenarios need the full one.
// FvmCheckState::new(db, state.state_root(), state.chain_id())
// .context("error creating check state")?
FvmExecState::new(
ReadOnlyBlockstore::new(db),
self.multi_engine.as_ref(),
state.block_height.try_into()?,
state.state_params,
)
.context("error creating check state")?
}
};
let (state, result) = self
.interpreter
.check(
state,
request.tx.to_vec(),
request.kind == CheckTxKind::Recheck,
)
.await
.context("error running check")?;
// Update the check state.
*guard = Some(state);
let response = match result {
Err(e) => invalid_check_tx(AppError::InvalidEncoding, e.description),
Ok(result) => match result {
Err(IllegalMessage) => invalid_check_tx(AppError::IllegalMessage, "".to_owned()),
Ok(Err(InvalidSignature(d))) => invalid_check_tx(AppError::InvalidSignature, d),
Ok(Ok(ret)) => to_check_tx(ret),
},
};
Ok(response)
}
/// Amend which transactions to put into the next block proposal.
async fn prepare_proposal(
&self,
request: request::PrepareProposal,
) -> AbciResult<response::PrepareProposal> {
tracing::debug!(
height = request.height.value(),
time = request.time.to_string(),
"prepare proposal"
);
let mut cetf_tx = vec![];
match request.local_last_commit {
Some(info) => {
// TODO: Better error handling
let votes = info
.votes
.into_iter()
.map(|vote| {
if vote.vote_extension.is_empty() {
vec![]
} else {
from_slice::<SignedTags>(&vote.vote_extension).unwrap().0
}
})
.flatten()
.collect::<Vec<_>>();
let cetf_sigs = votes
.iter()
.filter(|t| matches!(t, SignatureKind::Cetf(_)))
.map(SignatureKind::as_slice)
.map(bls_signatures::Signature::from_bytes)
.collect::<Result<Vec<_>, bls_signatures::Error>>()
.unwrap();
let height_sigs = votes
.iter()
.filter(|t| matches!(t, SignatureKind::BlockHeight(_)))
.map(SignatureKind::as_slice)
.map(bls_signatures::Signature::from_bytes)
.collect::<Result<Vec<_>, bls_signatures::Error>>()
.unwrap();
let agg_cetf_sig = if !cetf_sigs.is_empty() {
Some(bls_signatures::aggregate(&cetf_sigs).unwrap())
} else {
None
};
let agg_height_sig = if !height_sigs.is_empty() {
Some(bls_signatures::aggregate(&height_sigs).unwrap())
} else {
None
};
tracing::debug!(
r#"prepare proposal signature aggregation result (TM Height: {})):
agg_cetf_sig: {:?}
agg_height_sig: {:?}
"#,
request.height.value(),
agg_cetf_sig.as_ref().map(|s| s.as_bytes()),
agg_height_sig.as_ref().map(|s| s.as_bytes()),
);
if let Some(agg_cetf) = agg_cetf_sig {
cetf_tx.push(cetf_tag_msg_to_chainmessage(&(
request.height.value(),
agg_cetf,
))?);
};
// if let Some(agg_height) = agg_height_sig {
// skip adding block to sign to simplify demo
// cetf_tx.push(cetf_blockheight_tag_msg_to_chainmessage(&(
// request.height.value(),
// agg_height,
// ))?);
// };
}
None => {
tracing::info!("Prepare proposal with no local last commit");
}
}
let txs = cetf_tx.iter().map(|msg| to_vec(msg).unwrap());
let txs = txs
.chain(request.txs.into_iter().map(|tx| tx.to_vec()))
.collect();
let txs = self
.interpreter
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | true |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/src/tmconv.rs | fendermint/app/src/tmconv.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! Conversions to Tendermint data types.
use anyhow::{anyhow, bail, Context};
use fendermint_vm_core::Timestamp;
use fendermint_vm_genesis::{Power, Validator};
use fendermint_vm_interpreter::fvm::{
state::{BlockHash, FvmStateParams},
FvmApplyRet, FvmCheckRet, FvmQueryRet, PowerUpdates,
};
use fendermint_vm_message::signed::DomainHash;
use fendermint_vm_snapshot::{SnapshotItem, SnapshotManifest};
use fvm_shared::{address::Address, error::ExitCode, event::StampedEvent, ActorID};
use prost::Message;
use serde::{Deserialize, Serialize};
use std::{collections::HashMap, num::NonZeroU32};
use tendermint::{
abci::{response, types::ExecTxResult, Code, Event, EventAttribute},
AppHash,
};
use crate::{app::AppError, BlockHeight};
#[derive(Serialize, Deserialize, Debug, Clone)]
struct SnapshotMetadata {
size: u64,
state_params: FvmStateParams,
}
/// IPLD encoding of data types we know we must be able to encode.
macro_rules! ipld_encode {
($var:ident) => {
fvm_ipld_encoding::to_vec(&$var)
.map_err(|e| anyhow!("error IPLD encoding {}: {}", stringify!($var), e))?
};
}
/// Response to delivery where the input was blatantly invalid.
/// This indicates that the validator who made the block was Byzantine.
pub fn invalid_exec_tx_result(err: AppError, description: String) -> ExecTxResult {
tracing::info!(error = ?err, description, "invalid exec_tx_result");
ExecTxResult {
code: Code::Err(NonZeroU32::try_from(err as u32).expect("error codes are non-zero")),
info: description,
..Default::default()
}
}
/// Response to checks where the input was blatantly invalid.
/// This indicates that the user who sent the transaction is either attacking or has a faulty client.
pub fn invalid_check_tx(err: AppError, description: String) -> response::CheckTx {
tracing::info!(error = ?err, description, "invalid check_tx");
response::CheckTx {
code: Code::Err(NonZeroU32::try_from(err as u32).expect("error codes are non-zero")),
info: description,
..Default::default()
}
}
/// Response to queries where the input was blatantly invalid.
pub fn invalid_query(err: AppError, description: String) -> response::Query {
tracing::info!(error = ?err, description, "invalid query");
response::Query {
code: Code::Err(NonZeroU32::try_from(err as u32).expect("error codes are non-zero")),
info: description,
..Default::default()
}
}
pub fn to_exec_tx_result(
ret: FvmApplyRet,
domain_hash: Option<DomainHash>,
block_hash: Option<BlockHash>,
) -> ExecTxResult {
let receipt = ret.apply_ret.msg_receipt;
// Based on the sanity check in the `DefaultExecutor`.
// gas_cost = gas_fee_cap * gas_limit; this is how much the account is charged up front.
// &base_fee_burn + &over_estimation_burn + &refund + &miner_tip == gas_cost
// But that's in tokens. I guess the closes to what we want is the limit.
let gas_wanted: i64 = ret.gas_limit.try_into().unwrap_or(i64::MAX);
let gas_used: i64 = receipt.gas_used.try_into().unwrap_or(i64::MAX);
// This should return the `RawBytes` as-is, which is IPLD encoded content.
let data: bytes::Bytes = receipt.return_data.to_vec().into();
let mut events = to_events("event", ret.apply_ret.events, ret.emitters);
// Emit the block hash. It's not useful to subscribe by as it's a-priori unknown,
// but we can use it during subscription to fill in the block hash field which Ethereum
// subscriptions expect, and it's otherwise not available.
if let Some(h) = block_hash {
events.push(Event::new(
"block",
vec![EventAttribute::from((
"hash".to_string(),
hex::encode(h),
true,
))],
));
}
// Emit an event which causes Tendermint to index our transaction with a custom hash.
// In theory we could emit multiple values under `tx.hash`, but in subscriptions we are
// looking to emit the one expected by Ethereum clients.
if let Some(h) = domain_hash {
events.push(to_domain_hash_event(&h));
}
// Emit general message metadata.
events.push(to_message_event(ret.from, ret.to));
let message = ret
.apply_ret
.failure_info
.map(|i| i.to_string())
.filter(|s| !s.is_empty())
.unwrap_or_else(|| to_error_msg(receipt.exit_code).to_owned());
ExecTxResult {
code: to_code(receipt.exit_code),
data,
log: Default::default(),
info: message,
gas_wanted,
gas_used,
events,
codespace: Default::default(),
}
}
pub fn to_check_tx(ret: FvmCheckRet) -> response::CheckTx {
// Putting the message `log` because only `log` appears in the `tx_sync` JSON-RPC response.
let message = ret
.info
.filter(|s| !s.is_empty())
.unwrap_or_else(|| to_error_msg(ret.exit_code).to_owned());
// Potential error messages that arise in checking if contract execution is enabled are returned in the data.
// See https://github.com/gakonst/ethers-rs/commit/860100535812cbfe5e3cc417872392a6d76a159c for examples.
// Do this the same way as `to_deliver_tx`, serializing to IPLD.
let data: bytes::Bytes = ret.return_data.unwrap_or_default().to_vec().into();
response::CheckTx {
code: to_code(ret.exit_code),
log: message.clone(),
info: Default::default(),
data,
gas_wanted: ret.gas_limit.try_into().unwrap_or(i64::MAX),
sender: ret.sender.to_string(),
..Default::default()
}
}
pub fn to_finalize_block(
ret: FvmApplyRet,
tx_results: Vec<ExecTxResult>,
power_table: PowerUpdates,
app_hash: AppHash,
) -> anyhow::Result<response::FinalizeBlock> {
Ok(response::FinalizeBlock {
events: to_events("event", ret.apply_ret.events, ret.emitters),
tx_results,
validator_updates: to_validator_updates(power_table.0)
.context("failed to convert validator updates")?,
consensus_param_updates: Default::default(),
app_hash,
})
}
/// Convert events to key-value pairs.
///
/// Fot the EVM, they are returned like so:
///
/// ```text
/// StampedEvent { emitter: 103,
/// event: ActorEvent { entries: [
/// Entry { flags: FLAG_INDEXED_VALUE, key: "t1", value: RawBytes { 5820ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef } },
/// Entry { flags: FLAG_INDEXED_VALUE, key: "t2", value: RawBytes { 54ff00000000000000000000000000000000000065 } },
/// Entry { flags: FLAG_INDEXED_VALUE, key: "t3", value: RawBytes { 54ff00000000000000000000000000000000000066 } },
/// Entry { flags: FLAG_INDEXED_VALUE, key: "d", value: RawBytes { 582000000000000000000000000000000000000000000000000000000000000007d0 } }] } }
/// ```
///
/// The values are:
/// * "t1" will be the cbor encoded keccak-256 hash of the event signature Transfer(address,address,uint256)
/// * "t2" will be the first indexed argument, i.e. _from (cbor encoded byte array; needs padding to 32 bytes to work with ethers)
/// * "t3" will be the second indexed argument, i.e. _to (cbor encoded byte array; needs padding to 32 bytes to work with ethers)
/// * "d" is a cbor encoded byte array of all the remaining arguments
pub fn to_events(
kind: &str,
stamped_events: Vec<StampedEvent>,
emitters: HashMap<ActorID, Address>,
) -> Vec<Event> {
stamped_events
.into_iter()
.map(|se| {
let mut attrs = Vec::new();
attrs.push(EventAttribute::from((
"emitter.id".to_string(),
se.emitter.to_string(),
true,
)));
// This is emitted because some clients might want to subscribe to events
// based on the deterministic Ethereum address even before a contract is created.
if let Some(deleg_addr) = emitters.get(&se.emitter) {
attrs.push(EventAttribute::from((
"emitter.deleg".to_string(),
deleg_addr.to_string(),
true,
)));
}
for e in se.event.entries {
attrs.push(EventAttribute::from((
e.key,
hex::encode(e.value),
!e.flags.is_empty(),
)));
}
Event::new(kind.to_string(), attrs)
})
.collect()
}
/// Construct an indexable event from a custom transaction hash.
pub fn to_domain_hash_event(domain_hash: &DomainHash) -> Event {
let (k, v) = match domain_hash {
DomainHash::Eth(h) => ("eth", hex::encode(h)),
};
Event::new(k, vec![EventAttribute::from(("hash".to_string(), v, true))])
}
/// Event about the message itself.
pub fn to_message_event(from: Address, to: Address) -> Event {
let attr = |k: &str, v: Address| EventAttribute::from((k.to_string(), v.to_string(), true));
Event::new(
"message".to_string(),
vec![attr("from", from), attr("to", to)],
)
}
/// Map to query results.
pub fn to_query(ret: FvmQueryRet, block_height: BlockHeight) -> anyhow::Result<response::Query> {
let exit_code = match ret {
FvmQueryRet::Ipld(None) | FvmQueryRet::ActorState(None) => ExitCode::USR_NOT_FOUND,
FvmQueryRet::Ipld(_) | FvmQueryRet::ActorState(_) => ExitCode::OK,
// For calls and estimates, the caller needs to look into the `value` field to see the real exit code;
// the query itself is successful, even if the value represents a failure.
FvmQueryRet::Call(_) | FvmQueryRet::EstimateGas(_) => ExitCode::OK,
FvmQueryRet::StateParams(_) => ExitCode::OK,
FvmQueryRet::BuiltinActors(_) => ExitCode::OK,
};
// The return value has a `key` field which is supposed to be set to the data matched.
// Although at this point I don't have access to the input like the CID looked up,
// but I assume the query sender has. Rather than repeat everything, I'll add the key
// where it gives some extra information, like the actor ID, just to keep this option visible.
let (key, value) = match ret {
FvmQueryRet::Ipld(None) | FvmQueryRet::ActorState(None) => (Vec::new(), Vec::new()),
FvmQueryRet::Ipld(Some(bz)) => (Vec::new(), bz),
FvmQueryRet::ActorState(Some(x)) => {
let (id, st) = *x;
let k = ipld_encode!(id);
let v = ipld_encode!(st);
(k, v)
}
FvmQueryRet::Call(ret) => {
// Send back an entire Tendermint deliver_tx response, encoded as IPLD.
// This is so there is a single representation of a call result, instead
// of a normal delivery being one way and a query exposing `FvmApplyRet`.
let dtx = to_exec_tx_result(ret, None, None);
let dtx = tendermint_proto::abci::ExecTxResult::from(dtx);
let mut buf = bytes::BytesMut::new();
dtx.encode(&mut buf)?;
let bz = buf.to_vec();
// So the value is an IPLD encoded Protobuf byte vector.
let v = ipld_encode!(bz);
(Vec::new(), v)
}
FvmQueryRet::EstimateGas(est) => {
let v = ipld_encode!(est);
(Vec::new(), v)
}
FvmQueryRet::StateParams(sp) => {
let v = ipld_encode!(sp);
(Vec::new(), v)
}
FvmQueryRet::BuiltinActors(ba) => {
let v = ipld_encode!(ba);
(Vec::new(), v)
}
};
// The height here is the height of the block that was committed, not in which the app hash appeared.
let height = tendermint::block::Height::try_from(block_height).context("height too big")?;
let res = response::Query {
code: to_code(exit_code),
info: to_error_msg(exit_code).to_owned(),
key: key.into(),
value: value.into(),
height,
..Default::default()
};
Ok(res)
}
/// Project Genesis validators to Tendermint.
pub fn to_validator_updates(
validators: Vec<Validator<Power>>,
) -> anyhow::Result<Vec<tendermint::validator::Update>> {
let mut updates = vec![];
for v in validators {
updates.push(tendermint::validator::Update {
pub_key: tendermint::PublicKey::try_from(v.public_key)?,
power: tendermint::vote::Power::try_from(v.power.0)?,
});
}
Ok(updates)
}
pub fn to_timestamp(time: tendermint::time::Time) -> Timestamp {
Timestamp(
time.unix_timestamp()
.try_into()
.expect("negative timestamp"),
)
}
pub fn to_code(exit_code: ExitCode) -> Code {
if exit_code.is_success() {
Code::Ok
} else {
Code::Err(NonZeroU32::try_from(exit_code.value()).expect("error codes are non-zero"))
}
}
pub fn to_error_msg(exit_code: ExitCode) -> &'static str {
match exit_code {
ExitCode::OK => "",
ExitCode::SYS_SENDER_INVALID => "The message sender doesn't exist.",
ExitCode::SYS_SENDER_STATE_INVALID => "The message sender was not in a valid state to send this message. Either the nonce didn't match, or the sender didn't have funds to cover the message gas.",
ExitCode::SYS_ILLEGAL_INSTRUCTION => "The message receiver trapped (panicked).",
ExitCode::SYS_INVALID_RECEIVER => "The message receiver doesn't exist and can't be automatically created",
ExitCode::SYS_INSUFFICIENT_FUNDS => "The message sender didn't have the requisite funds.",
ExitCode::SYS_OUT_OF_GAS => "Message execution (including subcalls) used more gas than the specified limit.",
ExitCode::SYS_ILLEGAL_EXIT_CODE => "The message receiver aborted with a reserved exit code.",
ExitCode::SYS_ASSERTION_FAILED => "An internal VM assertion failed.",
ExitCode::SYS_MISSING_RETURN => "The actor returned a block handle that doesn't exist",
ExitCode::USR_ILLEGAL_ARGUMENT => "The method parameters are invalid.",
ExitCode::USR_NOT_FOUND => "The requested resource does not exist.",
ExitCode::USR_FORBIDDEN => "The requested operation is forbidden.",
ExitCode::USR_INSUFFICIENT_FUNDS => "The actor has insufficient funds to perform the requested operation.",
ExitCode::USR_ILLEGAL_STATE => "The actor's internal state is invalid.",
ExitCode::USR_SERIALIZATION => "There was a de/serialization failure within actor code.",
ExitCode::USR_UNHANDLED_MESSAGE => "The message cannot be handled (usually indicates an unhandled method number).",
ExitCode::USR_UNSPECIFIED => "The actor failed with an unspecified error.",
ExitCode::USR_ASSERTION_FAILED => "The actor failed a user-level assertion.",
ExitCode::USR_READ_ONLY => "The requested operation cannot be performed in 'read-only' mode.",
ExitCode::USR_NOT_PAYABLE => "The method cannot handle a transfer of value.",
_ => ""
}
}
pub fn to_snapshots(
snapshots: impl IntoIterator<Item = SnapshotItem>,
) -> anyhow::Result<response::ListSnapshots> {
let snapshots = snapshots
.into_iter()
.map(to_snapshot)
.collect::<Result<Vec<_>, _>>()?;
Ok(response::ListSnapshots { snapshots })
}
/// Convert a snapshot manifest to the Tendermint ABCI type.
pub fn to_snapshot(snapshot: SnapshotItem) -> anyhow::Result<tendermint::abci::types::Snapshot> {
// Put anything that doesn't fit into fields of the ABCI snapshot into the metadata.
let metadata = SnapshotMetadata {
size: snapshot.manifest.size,
state_params: snapshot.manifest.state_params,
};
Ok(tendermint::abci::types::Snapshot {
height: snapshot
.manifest
.block_height
.try_into()
.expect("height is valid"),
format: snapshot.manifest.version,
chunks: snapshot.manifest.chunks,
hash: snapshot.manifest.checksum.into(),
metadata: fvm_ipld_encoding::to_vec(&metadata)?.into(),
})
}
/// Parse a Tendermint ABCI snapshot offer to a manifest.
pub fn from_snapshot(
offer: tendermint::abci::request::OfferSnapshot,
) -> anyhow::Result<SnapshotManifest> {
let metadata = fvm_ipld_encoding::from_slice::<SnapshotMetadata>(&offer.snapshot.metadata)
.context("failed to parse snapshot metadata")?;
let app_hash = to_app_hash(&metadata.state_params);
if app_hash != offer.app_hash {
bail!(
"the application hash does not match the metadata; from-meta = {}, from-offer = {}",
app_hash,
offer.app_hash,
);
}
let checksum = tendermint::hash::Hash::try_from(offer.snapshot.hash)
.context("failed to parse checksum")?;
let manifest = SnapshotManifest {
block_height: offer.snapshot.height.value(),
size: metadata.size,
chunks: offer.snapshot.chunks,
checksum,
state_params: metadata.state_params,
version: offer.snapshot.format,
};
Ok(manifest)
}
/// Produce an appliction hash that is a commitment to all data replicated by consensus,
/// that is, all nodes participating in the network must agree on this otherwise we have
/// a consensus failure.
///
/// Notably it contains the actor state root _as well as_ some of the metadata maintained
/// outside the FVM, such as the timestamp and the circulating supply.
pub fn to_app_hash(state_params: &FvmStateParams) -> tendermint::hash::AppHash {
// Create an artifical CID from the FVM state params, which include everything that
// deterministically changes under consensus.
let state_params_cid =
fendermint_vm_message::cid(state_params).expect("state params have a CID");
// We could reduce it to a hash to ephasize that this is not something that we can return at the moment,
// but we could just as easily store the record in the Blockstore to make it retrievable.
// It is generally not a goal to serve the entire state over the IPLD Resolver or ABCI queries, though;
// for that we should rely on the CometBFT snapshot mechanism.
// But to keep our options open, we can return the hash as a CID that nobody can retrieve, and change our mind later.
// let state_params_hash = state_params_cid.hash();
let state_params_hash = state_params_cid.to_bytes();
tendermint::hash::AppHash::try_from(state_params_hash).expect("hash can be wrapped")
}
#[cfg(test)]
mod tests {
use fendermint_vm_snapshot::SnapshotItem;
use fvm_shared::error::ExitCode;
use tendermint::abci::request;
use crate::tmconv::to_error_msg;
use super::{from_snapshot, to_app_hash, to_snapshot};
#[test]
fn code_error_message() {
assert_eq!(to_error_msg(ExitCode::OK), "");
assert_eq!(
to_error_msg(ExitCode::SYS_SENDER_INVALID),
"The message sender doesn't exist."
);
}
#[quickcheck_macros::quickcheck]
fn abci_snapshot_metadata(snapshot: SnapshotItem) {
let abci_snapshot = to_snapshot(snapshot.clone()).unwrap();
let abci_offer = request::OfferSnapshot {
snapshot: abci_snapshot,
app_hash: to_app_hash(&snapshot.manifest.state_params),
};
let manifest = from_snapshot(abci_offer).unwrap();
assert_eq!(manifest, snapshot.manifest)
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/src/lib.rs | fendermint/app/src/lib.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
mod app;
pub mod events;
pub mod ipc;
pub mod metrics;
mod store;
mod tmconv;
pub use app::{App, AppConfig};
pub use store::{AppStore, BitswapBlockstore};
// Different type from `ChainEpoch` just because we might use epoch in a more traditional sense for checkpointing.
pub type BlockHeight = u64;
pub const VERSION: &str = env!("CARGO_PKG_VERSION");
#[derive(Debug)]
pub enum AppExitCode {
/// Fendermint exited normally
Ok = 0,
/// Fendermint exited with an unknown error
UnknownError = 1,
/// Fendermint exited since it reached a block height equal to halt_height
Halt = 2,
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/src/store.rs | fendermint/app/src/store.rs | use cid::Cid;
use ipc_ipld_resolver::missing_blocks::missing_blocks;
// Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use libp2p_bitswap::BitswapStore;
use std::borrow::Cow;
use fendermint_rocksdb::blockstore::NamespaceBlockstore;
use fendermint_storage::{Codec, Decode, Encode, KVError, KVResult, KVStore};
use fvm_ipld_blockstore::Blockstore;
use fvm_ipld_encoding::{de::DeserializeOwned, serde::Serialize};
/// [`KVStore`] type we use to store historial data in the database.
#[derive(Clone)]
pub struct AppStore;
impl KVStore for AppStore {
type Repr = Vec<u8>;
type Namespace = String;
}
impl<T> Codec<T> for AppStore where AppStore: Encode<T> + Decode<T> {}
/// CBOR serialization.
impl<T> Encode<T> for AppStore
where
T: Serialize,
{
fn to_repr(value: &T) -> KVResult<Cow<Self::Repr>> {
fvm_ipld_encoding::to_vec(value)
.map_err(|e| KVError::Codec(Box::new(e)))
.map(Cow::Owned)
}
}
/// CBOR deserialization.
impl<T> Decode<T> for AppStore
where
T: DeserializeOwned,
{
fn from_repr(repr: &Self::Repr) -> KVResult<T> {
fvm_ipld_encoding::from_slice(repr).map_err(|e| KVError::Codec(Box::new(e)))
}
}
/// A `Blockstore` and `BitswapStore` implementation we can pass to the IPLD Resolver.
pub struct BitswapBlockstore {
/// The `Blockstore` implementation where we the FVM actors store their data.
///
/// This must not be written to by Bitswap operations, because that could result
/// in some nodes having some data that others don't, which would lead to a
/// consensu failure. We can use read data from it, but not write to it.
state_store: NamespaceBlockstore,
/// The `Blockstore` implementation where Bitswap operations can write to.
bit_store: NamespaceBlockstore,
}
impl BitswapBlockstore {
pub fn new(state_store: NamespaceBlockstore, bit_store: NamespaceBlockstore) -> Self {
Self {
state_store,
bit_store,
}
}
}
impl Blockstore for BitswapBlockstore {
fn has(&self, k: &cid::Cid) -> anyhow::Result<bool> {
if self.bit_store.has(k)? {
Ok(true)
} else {
self.state_store.has(k)
}
}
fn get(&self, k: &cid::Cid) -> anyhow::Result<Option<Vec<u8>>> {
if let Some(data) = self.bit_store.get(k)? {
Ok(Some(data))
} else {
self.state_store.get(k)
}
}
fn put_keyed(&self, k: &cid::Cid, block: &[u8]) -> anyhow::Result<()> {
self.bit_store.put_keyed(k, block)
}
fn put_many_keyed<D, I>(&self, blocks: I) -> anyhow::Result<()>
where
Self: Sized,
D: AsRef<[u8]>,
I: IntoIterator<Item = (cid::Cid, D)>,
{
self.bit_store.put_many_keyed(blocks)
}
}
impl BitswapStore for BitswapBlockstore {
type Params = libipld::DefaultParams;
fn contains(&mut self, cid: &Cid) -> anyhow::Result<bool> {
Blockstore::has(self, cid)
}
fn get(&mut self, cid: &Cid) -> anyhow::Result<Option<Vec<u8>>> {
Blockstore::get(self, cid)
}
fn insert(&mut self, block: &libipld::Block<Self::Params>) -> anyhow::Result<()> {
Blockstore::put_keyed(self, block.cid(), block.data())
}
fn missing_blocks(&mut self, cid: &Cid) -> anyhow::Result<Vec<Cid>> {
missing_blocks::<Self, Self::Params>(self, cid)
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/src/main.rs | fendermint/app/src/main.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
pub use fendermint_app_options as options;
pub use fendermint_app_settings as settings;
use tracing_appender::{
non_blocking::WorkerGuard,
rolling::{RollingFileAppender, Rotation},
};
use tracing_subscriber::fmt::format::FmtSpan;
use tracing_subscriber::{fmt, layer::SubscriberExt, Layer};
mod cmd;
fn init_tracing(opts: &options::Options) -> Option<WorkerGuard> {
let console_filter = opts.log_console_filter().expect("invalid filter");
let file_filter = opts.log_file_filter().expect("invalid filter");
// log all traces to stderr (reserving stdout for any actual output such as from the CLI commands)
let console_layer = fmt::layer()
.with_writer(std::io::stderr)
.with_target(false)
.with_file(true)
.with_line_number(true)
.with_filter(console_filter);
// add a file layer if log_dir is set
let (file_layer, file_guard) = match &opts.log_dir {
Some(log_dir) => {
let filename = match &opts.log_file_prefix {
Some(prefix) => format!("{}-{}", prefix, "fendermint"),
None => "fendermint".to_string(),
};
let appender = RollingFileAppender::builder()
.filename_prefix(filename)
.filename_suffix("log")
.rotation(Rotation::DAILY)
.max_log_files(7)
.build(log_dir)
.expect("failed to initialize rolling file appender");
let (non_blocking, file_guard) = tracing_appender::non_blocking(appender);
let file_layer = fmt::layer()
.json()
.with_writer(non_blocking)
.with_span_events(FmtSpan::CLOSE)
.with_target(false)
.with_file(true)
.with_line_number(true)
.with_filter(file_filter);
(Some(file_layer), Some(file_guard))
}
None => (None, None),
};
let metrics_layer = if opts.metrics_enabled() {
Some(fendermint_app::metrics::layer())
} else {
None
};
let registry = tracing_subscriber::registry()
.with(console_layer)
.with(file_layer)
.with(metrics_layer);
tracing::subscriber::set_global_default(registry).expect("Unable to set a global collector");
file_guard
}
/// Install a panic handler that prints stuff to the logs, otherwise it only shows up in the console.
fn init_panic_handler() {
let default_hook = std::panic::take_hook();
std::panic::set_hook(Box::new(move |info| {
// Do the default first, just in case logging fails too.
default_hook(info);
// let stacktrace = std::backtrace::Backtrace::capture();
let stacktrace = std::backtrace::Backtrace::force_capture();
tracing::error!(
stacktrace = stacktrace.to_string(),
info = info.to_string(),
"panicking"
);
// We could exit the application if any of the background tokio tasks panic.
// However, they are not necessarily critical processes, the chain might still make progress.
// std::process::abort();
}))
}
#[tokio::main]
async fn main() {
let opts = options::parse();
let _guard = init_tracing(&opts);
init_panic_handler();
if let Err(e) = cmd::exec(&opts).await {
tracing::error!("failed to execute {:?}: {e:?}", opts);
std::process::exit(fendermint_app::AppExitCode::UnknownError as i32);
}
}
#[cfg(test)]
mod tests {
use cid::Cid;
use fendermint_rocksdb::{RocksDb, RocksDbConfig};
use fendermint_vm_interpreter::fvm::bundle::bundle_path;
use fvm::machine::Manifest;
use fvm_ipld_car::load_car_unchecked;
use fvm_ipld_encoding::CborStore;
#[tokio::test]
async fn load_car() {
// Just to see if dependencies compile together, see if we can load an actor bundle into a temporary RocksDB.
// Run it with `cargo run -p fendermint_app`
// Not loading the actors from the library any more. It would be possible, as long as dependencies are aligned.
// let bundle_car = actors_v10::BUNDLE_CAR;
let bundle_path = bundle_path();
let bundle_car = std::fs::read(&bundle_path)
.unwrap_or_else(|_| panic!("failed to load bundle CAR from {bundle_path:?}"));
let dir = tempfile::Builder::new()
.tempdir()
.expect("error creating temporary path for db");
let path = dir.path().join("rocksdb");
let open_db = || {
RocksDb::open(path.clone(), &RocksDbConfig::default()).expect("error creating RocksDB")
};
let db = open_db();
let cids = load_car_unchecked(&db, bundle_car.as_slice())
.await
.expect("error loading bundle CAR");
let bundle_root = cids.first().expect("there should be 1 CID");
// Close and reopen.
drop(db);
let db = open_db();
let (manifest_version, manifest_data_cid): (u32, Cid) = db
.get_cbor(bundle_root)
.expect("error getting bundle root")
.expect("bundle root was not in store");
Manifest::load(&db, &manifest_data_cid, manifest_version).expect("error loading manifest");
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/src/events.rs | fendermint/app/src/events.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use crate::BlockHeight;
/// Re-export other events, just to provide the visibility of where they are.
pub use fendermint_vm_event::{
NewBottomUpCheckpoint, NewParentView, ParentFinalityCommitted, ParentFinalityMissingQuorum,
};
/// Hex encoded block hash.
pub type BlockHashHex<'a> = &'a str;
#[derive(Debug, Default)]
pub struct ProposalProcessed<'a> {
pub is_accepted: bool,
pub block_height: BlockHeight,
pub block_hash: BlockHashHex<'a>,
pub num_txs: usize,
pub proposer: &'a str,
}
#[derive(Debug, Default)]
pub struct NewBlock {
pub block_height: BlockHeight,
}
#[derive(Debug, Default)]
pub struct ParentFinalityVoteAdded<'a> {
pub block_height: BlockHeight,
pub block_hash: BlockHashHex<'a>,
pub validator: &'a str,
}
#[derive(Debug, Default)]
pub struct ParentFinalityVoteIgnored<'a> {
pub block_height: BlockHeight,
pub block_hash: BlockHashHex<'a>,
pub validator: &'a str,
}
#[derive(Debug, Default)]
pub struct ExtendVote {
pub block_height: BlockHeight,
pub signed_tags: u64,
pub bytes: u64,
}
// TODO: Add new events for:
// * snapshots
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/src/ipc.rs | fendermint/app/src/ipc.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! IPC related execution
use crate::app::{AppState, AppStoreKey};
use crate::{App, BlockHeight};
use fendermint_storage::{Codec, Encode, KVReadable, KVStore, KVWritable};
use fendermint_vm_genesis::{Power, Validator};
use fendermint_vm_interpreter::fvm::state::ipc::GatewayCaller;
use fendermint_vm_interpreter::fvm::state::{FvmExecState, FvmStateParams};
use fendermint_vm_interpreter::fvm::store::ReadOnlyBlockstore;
use fendermint_vm_topdown::sync::ParentFinalityStateQuery;
use fendermint_vm_topdown::IPCParentFinality;
use fvm_ipld_blockstore::Blockstore;
use std::sync::Arc;
use serde::{Deserialize, Serialize};
/// All the things that can be voted on in a subnet.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum AppVote {
/// The validator considers a certain block final on the parent chain.
ParentFinality(IPCParentFinality),
}
/// Queries the LATEST COMMITTED parent finality from the storage
pub struct AppParentFinalityQuery<DB, SS, S, I>
where
SS: Blockstore + Clone + 'static,
S: KVStore,
{
/// The app to get state
app: App<DB, SS, S, I>,
gateway_caller: GatewayCaller<ReadOnlyBlockstore<Arc<SS>>>,
}
impl<DB, SS, S, I> AppParentFinalityQuery<DB, SS, S, I>
where
S: KVStore
+ Codec<AppState>
+ Encode<AppStoreKey>
+ Encode<BlockHeight>
+ Codec<FvmStateParams>,
DB: KVWritable<S> + KVReadable<S> + 'static + Clone,
SS: Blockstore + 'static + Clone,
{
pub fn new(app: App<DB, SS, S, I>) -> Self {
Self {
app,
gateway_caller: GatewayCaller::default(),
}
}
fn with_exec_state<F, T>(&self, f: F) -> anyhow::Result<Option<T>>
where
F: FnOnce(FvmExecState<ReadOnlyBlockstore<Arc<SS>>>) -> anyhow::Result<T>,
{
match self.app.new_read_only_exec_state()? {
Some(s) => f(s).map(Some),
None => Ok(None),
}
}
}
impl<DB, SS, S, I> ParentFinalityStateQuery for AppParentFinalityQuery<DB, SS, S, I>
where
S: KVStore
+ Codec<AppState>
+ Encode<AppStoreKey>
+ Encode<BlockHeight>
+ Codec<FvmStateParams>,
DB: KVWritable<S> + KVReadable<S> + 'static + Clone,
SS: Blockstore + 'static + Clone,
{
fn get_latest_committed_finality(&self) -> anyhow::Result<Option<IPCParentFinality>> {
self.with_exec_state(|mut exec_state| {
self.gateway_caller
.get_latest_parent_finality(&mut exec_state)
})
}
fn get_power_table(&self) -> anyhow::Result<Option<Vec<Validator<Power>>>> {
self.with_exec_state(|mut exec_state| {
self.gateway_caller
.current_power_table(&mut exec_state)
.map(|(_, pt)| pt)
})
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/src/cmd/config.rs | fendermint/app/src/cmd/config.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use fendermint_app_options::config::ConfigArgs;
use crate::{cmd, settings::Settings};
cmd! {
ConfigArgs(self, settings) {
print_settings(settings)
}
}
fn print_settings(settings: Settings) -> anyhow::Result<()> {
// Currently the `Settings` doesn't support `Serialize`,
// but if it did we could choose a format to print in.
println!("{settings:?}");
Ok(())
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/src/cmd/genesis.rs | fendermint/app/src/cmd/genesis.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use anyhow::{anyhow, Context};
use fendermint_crypto::PublicKey;
use fvm_shared::address::Address;
use ipc_provider::config::subnet::{EVMSubnet, SubnetConfig};
use ipc_provider::IpcProvider;
use std::path::PathBuf;
use fendermint_vm_actor_interface::eam::EthAddress;
use fendermint_vm_core::{chainid, Timestamp};
use fendermint_vm_genesis::{
ipc, Account, Actor, ActorMeta, Collateral, Genesis, Multisig, PermissionMode, SignerAddr,
Validator, ValidatorKey,
};
use crate::cmd;
use crate::options::genesis::*;
use super::key::read_public_key;
cmd! {
GenesisArgs(self) {
let genesis_file = self.genesis_file.clone();
match &self.command {
GenesisCommands::New(args) => args.exec(genesis_file).await,
GenesisCommands::AddAccount(args) => args.exec(genesis_file).await,
GenesisCommands::AddMultisig(args) => args.exec(genesis_file).await,
GenesisCommands::AddValidator(args) => args.exec(genesis_file).await,
GenesisCommands::IntoTendermint(args) => args.exec(genesis_file).await,
GenesisCommands::SetEamPermissions(args) => args.exec(genesis_file).await,
GenesisCommands::Ipc { command } => command.exec(genesis_file).await,
}
}
}
cmd! {
GenesisNewArgs(self, genesis_file: PathBuf) {
let genesis = Genesis {
timestamp: Timestamp(self.timestamp),
chain_name: self.chain_name.clone(),
network_version: self.network_version,
base_fee: self.base_fee.clone(),
power_scale: self.power_scale,
validators: Vec::new(),
accounts: Vec::new(),
eam_permission_mode: PermissionMode::Unrestricted,
ipc: None,
};
let json = serde_json::to_string_pretty(&genesis)?;
std::fs::write(genesis_file, json)?;
Ok(())
}
}
cmd! {
GenesisAddAccountArgs(self, genesis_file: PathBuf) {
add_account(&genesis_file, self)
}
}
cmd! {
GenesisAddMultisigArgs(self, genesis_file: PathBuf) {
add_multisig(&genesis_file, self)
}
}
cmd! {
GenesisAddValidatorArgs(self, genesis_file: PathBuf) {
add_validator(&genesis_file, self)
}
}
cmd! {
GenesisIntoTendermintArgs(self, genesis_file: PathBuf) {
into_tendermint(&genesis_file, self)
}
}
cmd! {
GenesisSetEAMPermissionsArgs(self, genesis_file: PathBuf) {
set_eam_permissions(&genesis_file, self)
}
}
cmd! {
GenesisIpcCommands(self, genesis_file: PathBuf) {
match self {
GenesisIpcCommands::Gateway(args) =>
set_ipc_gateway(&genesis_file, args),
GenesisIpcCommands::FromParent(args) =>
new_genesis_from_parent(&genesis_file, args).await,
}
}
}
fn add_account(genesis_file: &PathBuf, args: &GenesisAddAccountArgs) -> anyhow::Result<()> {
update_genesis(genesis_file, |mut genesis| {
let pk = read_public_key(&args.public_key)?;
let pk = pk.serialize();
let addr = match args.kind {
AccountKind::Regular => Address::new_secp256k1(&pk)?,
AccountKind::Ethereum => Address::from(EthAddress::new_secp256k1(&pk)?),
};
let meta = ActorMeta::Account(Account {
owner: SignerAddr(addr),
});
if genesis.accounts.iter().any(|a| a.meta == meta) {
return Err(anyhow!("account already exists in the genesis file"));
}
let actor = Actor {
meta,
balance: args.balance.clone(),
};
genesis.accounts.push(actor);
Ok(genesis)
})
}
fn add_multisig(genesis_file: &PathBuf, args: &GenesisAddMultisigArgs) -> anyhow::Result<()> {
update_genesis(genesis_file, |mut genesis| {
let mut signers = Vec::new();
for p in &args.public_key {
let pk = read_public_key(p)?;
let addr = SignerAddr(Address::new_secp256k1(&pk.serialize())?);
if signers.contains(&addr) {
return Err(anyhow!("duplicated signer: {}", p.to_string_lossy()));
}
signers.push(addr);
}
if signers.is_empty() {
return Err(anyhow!("there needs to be at least one signer"));
}
if signers.len() < args.threshold as usize {
return Err(anyhow!("threshold cannot be higher than number of signers"));
}
if args.threshold == 0 {
return Err(anyhow!("threshold must be positive"));
}
let ms = Multisig {
signers,
threshold: args.threshold,
vesting_duration: args.vesting_duration,
vesting_start: args.vesting_start,
};
let actor = Actor {
meta: ActorMeta::Multisig(ms),
balance: args.balance.clone(),
};
genesis.accounts.push(actor);
Ok(genesis)
})
}
fn add_validator(genesis_file: &PathBuf, args: &GenesisAddValidatorArgs) -> anyhow::Result<()> {
update_genesis(genesis_file, |mut genesis| {
let pk = read_public_key(&args.public_key)?;
let vk = ValidatorKey(pk);
if genesis.validators.iter().any(|v| v.public_key == vk) {
return Err(anyhow!("account already exists in the genesis file"));
}
let validator = Validator {
public_key: vk,
power: Collateral(args.power.clone()),
};
genesis.validators.push(validator);
Ok(genesis)
})
}
fn read_genesis(genesis_file: &PathBuf) -> anyhow::Result<Genesis> {
let json = std::fs::read_to_string(genesis_file).context("failed to read genesis")?;
let genesis = serde_json::from_str::<Genesis>(&json).context("failed to parse genesis")?;
Ok(genesis)
}
fn update_genesis<F>(genesis_file: &PathBuf, f: F) -> anyhow::Result<()>
where
F: FnOnce(Genesis) -> anyhow::Result<Genesis>,
{
let genesis = read_genesis(genesis_file)?;
let genesis = f(genesis)?;
let json = serde_json::to_string_pretty(&genesis)?;
std::fs::write(genesis_file, json)?;
Ok(())
}
fn set_eam_permissions(
genesis_file: &PathBuf,
args: &GenesisSetEAMPermissionsArgs,
) -> anyhow::Result<()> {
update_genesis(genesis_file, |mut genesis| {
genesis.eam_permission_mode = match args.mode.to_lowercase().as_str() {
"unrestricted" => PermissionMode::Unrestricted,
"allowlist" => {
let addresses = args.addresses.clone();
PermissionMode::AllowList { addresses }
}
_ => return Err(anyhow!("unknown eam permisison mode")),
};
Ok(genesis)
})
}
fn into_tendermint(genesis_file: &PathBuf, args: &GenesisIntoTendermintArgs) -> anyhow::Result<()> {
let genesis = read_genesis(genesis_file)?;
let genesis_json = serde_json::to_value(&genesis)?;
let chain_id: u64 = chainid::from_str_hashed(&genesis.chain_name)?.into();
let chain_id = chain_id.to_string();
let tmg = tendermint::Genesis {
genesis_time: tendermint::time::Time::from_unix_timestamp(genesis.timestamp.as_secs(), 0)?,
chain_id: tendermint::chain::Id::try_from(chain_id)?,
// CometBFT chains typically start from height 1. It doesn't seem to matter if we set this to 0,
// the `init_chain` ABCI method will still receive 1.
initial_height: 1,
// Values are based on the default produced by `tendermint init`
consensus_params: tendermint::consensus::Params {
block: tendermint::block::Size {
max_bytes: args.block_max_bytes,
max_gas: -1,
time_iota_ms: tendermint::block::Size::default_time_iota_ms(),
},
evidence: tendermint::evidence::Params {
max_age_num_blocks: 100000,
max_age_duration: tendermint::evidence::Duration(std::time::Duration::from_nanos(
172800000000000,
)),
max_bytes: 1048576,
},
validator: tendermint::consensus::params::ValidatorParams {
pub_key_types: vec![tendermint::public_key::Algorithm::Secp256k1],
},
version: Some(tendermint::consensus::params::VersionParams { app: 0 }),
abci: tendermint::consensus::params::AbciParams {
vote_extensions_enable_height: Some(Default::default()),
},
},
// Validators will be returnd from `init_chain`.
validators: Vec::new(),
// Hopefully leaving this empty will skip validation,
// otherwise we have to run the genesis in memory here and now.
app_hash: tendermint::AppHash::default(),
app_state: genesis_json,
};
let tmg_json = serde_json::to_string_pretty(&tmg)?;
std::fs::write(&args.out, tmg_json)?;
Ok(())
}
fn set_ipc_gateway(genesis_file: &PathBuf, args: &GenesisIpcGatewayArgs) -> anyhow::Result<()> {
update_genesis(genesis_file, |mut genesis| {
let gateway_params = ipc::GatewayParams {
subnet_id: args.subnet_id.clone(),
bottom_up_check_period: args.bottom_up_check_period,
majority_percentage: args.majority_percentage,
active_validators_limit: args.active_validators_limit,
};
let ipc_params = match genesis.ipc {
Some(mut ipc) => {
ipc.gateway = gateway_params;
ipc
}
None => ipc::IpcParams {
gateway: gateway_params,
},
};
genesis.ipc = Some(ipc_params);
Ok(genesis)
})
}
async fn new_genesis_from_parent(
genesis_file: &PathBuf,
args: &GenesisFromParentArgs,
) -> anyhow::Result<()> {
// provider with the parent.
let parent_provider = IpcProvider::new_with_subnet(
None,
ipc_provider::config::Subnet {
id: args
.subnet_id
.parent()
.ok_or_else(|| anyhow!("subnet is not a child"))?,
config: SubnetConfig::Fevm(EVMSubnet {
provider_http: args.parent_endpoint.clone(),
provider_timeout: None,
auth_token: args.parent_auth_token.clone(),
registry_addr: args.parent_registry,
gateway_addr: args.parent_gateway,
}),
},
)?;
let genesis_info = parent_provider.get_genesis_info(&args.subnet_id).await?;
// get gateway genesis
let ipc_params = ipc::IpcParams {
gateway: ipc::GatewayParams {
subnet_id: args.subnet_id.clone(),
bottom_up_check_period: genesis_info.bottom_up_checkpoint_period,
majority_percentage: genesis_info.majority_percentage,
active_validators_limit: genesis_info.active_validators_limit,
},
};
let mut genesis = Genesis {
// We set the genesis epoch as the genesis timestamp so it can be
// generated deterministically by all participants
// genesis_epoch should be a positive number, we can afford panicking
// here if this is not the case.
timestamp: Timestamp(genesis_info.genesis_epoch.try_into().unwrap()),
chain_name: args.subnet_id.to_string(),
network_version: args.network_version,
base_fee: args.base_fee.clone(),
power_scale: args.power_scale,
validators: Vec::new(),
accounts: Vec::new(),
eam_permission_mode: PermissionMode::Unrestricted,
ipc: Some(ipc_params),
};
for v in genesis_info.validators {
let pk = PublicKey::parse_slice(&v.metadata, None)?;
genesis.validators.push(Validator {
public_key: ValidatorKey(pk),
power: Collateral(v.weight),
})
}
for (a, b) in genesis_info.genesis_balances {
let meta = ActorMeta::Account(Account {
owner: SignerAddr(a),
});
let actor = Actor {
meta,
balance: b.clone(),
};
genesis.accounts.push(actor);
}
let json = serde_json::to_string_pretty(&genesis)?;
std::fs::write(genesis_file, json)?;
Ok(())
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/src/cmd/key.rs | fendermint/app/src/cmd/key.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use anyhow::{anyhow, Context};
use bls_signatures::Serialize;
use fendermint_app_options::key::KeyShowPeerIdArgs;
use fendermint_crypto::{from_b64, to_b64, PublicKey, SecretKey};
use fendermint_vm_actor_interface::eam::EthAddress;
use fvm_shared::address::Address;
use rand_chacha::{rand_core::SeedableRng, ChaCha20Rng};
use serde_json::json;
use std::path::Path;
use tendermint_config::NodeKey;
use crate::{
cmd,
options::key::{
AddPeer, KeyAddressArgs, KeyArgs, KeyCommands, KeyFromEthArgs, KeyGenArgs, KeyIntoEthArgs,
KeyIntoTendermintArgs,
},
};
cmd! {
KeyArgs(self) {
match &self.command {
KeyCommands::Gen(args) => args.exec(()).await,
KeyCommands::IntoTendermint(args) => args.exec(()).await,
KeyCommands::AddPeer(args) => args.exec(()).await,
KeyCommands::Address(args) => args.exec(()).await,
KeyCommands::FromEth(args) => args.exec(()).await,
KeyCommands::IntoEth(args) => args.exec(()).await,
KeyCommands::ShowPeerId(args) => args.exec(()).await,
}
}
}
cmd! {
KeyFromEthArgs(self) {
let sk = read_secret_key_hex(&self.secret_key)?;
let pk = sk.public_key();
export(&self.out_dir, &self.name, "sk", &secret_to_b64(&sk))?;
export(&self.out_dir, &self.name, "pk", &public_to_b64(&pk))?;
Ok(())
}
}
cmd! {
KeyIntoEthArgs(self) {
let sk = read_secret_key(&self.secret_key)?;
let pk = sk.public_key();
export(&self.out_dir, &self.name, "sk", &hex::encode(sk.serialize()))?;
export(&self.out_dir, &self.name, "pk", &hex::encode(pk.serialize()))?;
export(&self.out_dir, &self.name, "addr", &hex::encode(EthAddress::from(pk).0))?;
Ok(())
}
}
// TODO: Actually use this key. For now we just generate keys at genesis.
cmd! {
KeyGenArgs(self) {
let mut rng = ChaCha20Rng::from_entropy();
let sk = SecretKey::random(&mut rng);
let pk = sk.public_key();
let bls_sk = bls_signatures::PrivateKey::generate(&mut rng);
let bls_pk = bls_sk.public_key();
export(&self.out_dir, &self.name, "sk", &secret_to_b64(&sk))?;
export(&self.out_dir, &self.name, "pk", &public_to_b64(&pk))?;
export(&self.out_dir, &self.name, "bls.sk", &to_b64(&bls_sk.as_bytes()))?;
export(&self.out_dir, &self.name, "bls.pk", &to_b64(&bls_pk.as_bytes()))?;
Ok(())
}
}
cmd! {
KeyIntoTendermintArgs(self) {
let sk = read_secret_key(&self.secret_key)?;
let pk = sk.public_key();
let vk = tendermint::crypto::default::ecdsa_secp256k1::VerifyingKey::from_sec1_bytes(&pk.serialize())
.map_err(|e| anyhow!("failed to convert public key: {e}"))?;
let pub_key = tendermint::PublicKey::Secp256k1(vk);
let address = tendermint::account::Id::from(pub_key);
// tendermint-rs doesn't seem to handle Secp256k1 private keys;
// if it did, we could use tendermint_config::PrivateValidatorKey
// to encode the data structure. Tendermint should be okay with it
// though, as long as we match the expected keys in the JSON.
let priv_validator_key = json! ({
"address": address,
"pub_key": pub_key,
"priv_key": {
"type": "tendermint/PrivKeySecp256k1",
"value": secret_to_b64(&sk)
}
});
let json = serde_json::to_string_pretty(&priv_validator_key)?;
std::fs::write(&self.out, json)?;
Ok(())
}
}
cmd! {
AddPeer(self) {
let node_key = NodeKey::load_json_file(&self.node_key_file).context("failed to read node key file")?;
let peer_id = format!("{}@{}", node_key.node_id(), self.network_addr);
let mut peers = std::fs::read_to_string(&self.local_peers_file).unwrap_or_default();
if peers.is_empty() {
peers.push_str(&peer_id);
} else {
peers.push(',');
peers.push_str(peer_id.as_str());
}
std::fs::write(&self.local_peers_file, peers).context("failed to write to the peers file")?;
Ok(())
}
}
cmd! {
KeyAddressArgs(self) {
let pk = read_public_key(&self.public_key)?;
let addr = Address::new_secp256k1(&pk.serialize())?;
println!("{}", addr);
Ok(())
}
}
cmd! {
KeyShowPeerIdArgs(self) {
let pk = read_public_key(&self.public_key)?;
// Just using this type because it does the conversion we need.
let vk = ipc_ipld_resolver::ValidatorKey::from(pk);
let pk: libp2p::identity::PublicKey = vk.into();
let id = pk.to_peer_id();
println!("{}", id);
Ok(())
}
}
fn secret_to_b64(sk: &SecretKey) -> String {
to_b64(sk.serialize().as_ref())
}
fn public_to_b64(pk: &PublicKey) -> String {
to_b64(&pk.serialize_compressed())
}
fn b64_to_public(b64: &str) -> anyhow::Result<PublicKey> {
let json = serde_json::json!(b64);
let pk: PublicKey = serde_json::from_value(json)?;
Ok(pk)
}
fn b64_to_secret(b64: &str) -> anyhow::Result<SecretKey> {
let bz = from_b64(b64)?;
let sk = SecretKey::try_from(bz)?;
Ok(sk)
}
fn b64_to_bls_secret(b64: &str) -> anyhow::Result<bls_signatures::PrivateKey> {
let bz = from_b64(b64)?;
let sk = bls_signatures::PrivateKey::from_bytes(&bz)?;
Ok(sk)
}
pub fn read_public_key(public_key: &Path) -> anyhow::Result<PublicKey> {
let b64 = std::fs::read_to_string(public_key).context("failed to read public key")?;
let pk = b64_to_public(&b64).context("failed to parse public key")?;
Ok(pk)
}
pub fn read_secret_key_hex(private_key: &Path) -> anyhow::Result<SecretKey> {
let hex_str = std::fs::read_to_string(private_key).context("failed to read private key")?;
let mut hex_str = hex_str.trim();
if hex_str.starts_with("0x") {
hex_str = &hex_str[2..];
}
let raw_secret = hex::decode(hex_str).context("cannot decode hex private key")?;
let sk = SecretKey::try_from(raw_secret).context("failed to parse secret key")?;
Ok(sk)
}
pub fn read_secret_key(secret_key: &Path) -> anyhow::Result<SecretKey> {
let b64 = std::fs::read_to_string(secret_key).context("failed to read secret key")?;
let sk = b64_to_secret(&b64).context("failed to parse secret key")?;
Ok(sk)
}
pub fn read_bls_secret_key(secret_key: &Path) -> anyhow::Result<bls_signatures::PrivateKey> {
let b64 = std::fs::read_to_string(secret_key).context("failed to read secret key")?;
let sk = b64_to_bls_secret(&b64).context("failed to parse secret key")?;
Ok(sk)
}
fn export(output_dir: &Path, name: &str, ext: &str, b64: &str) -> anyhow::Result<()> {
let output_path = output_dir.join(format!("{name}.{ext}"));
std::fs::write(output_path, b64)?;
Ok(())
}
#[cfg(test)]
mod tests {
use fendermint_vm_genesis::ValidatorKey;
use quickcheck_macros::quickcheck;
use crate::cmd::key::b64_to_public;
use super::public_to_b64;
#[quickcheck]
fn prop_public_key_deserialize_to_genesis(vk: ValidatorKey) {
let b64 = public_to_b64(&vk.0);
let pk = b64_to_public(&b64).unwrap();
assert_eq!(pk, vk.0)
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/src/cmd/eth.rs | fendermint/app/src/cmd/eth.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::time::Duration;
use anyhow::Context;
use fendermint_eth_api::HybridClient;
use crate::{
cmd,
options::eth::{EthArgs, EthCommands},
settings::eth::EthSettings,
};
cmd! {
EthArgs(self, settings: EthSettings) {
match self.command.clone() {
EthCommands::Run { ws_url, http_url, connect_retry_delay } => {
let (client, driver) = HybridClient::new(http_url, ws_url, Duration::from_secs(connect_retry_delay)).context("failed to create HybridClient")?;
let driver_handle = tokio::spawn(async move { driver.run().await });
let result = run(settings, client).await;
// Await the driver's termination to ensure proper connection closure.
let _ = driver_handle.await;
result
}
}
}
}
/// Run the Ethereum API facade.
async fn run(settings: EthSettings, client: HybridClient) -> anyhow::Result<()> {
let gas = fendermint_eth_api::GasOpt {
min_gas_premium: settings.gas.min_gas_premium,
num_blocks_max_prio_fee: settings.gas.num_blocks_max_prio_fee,
max_fee_hist_size: settings.gas.max_fee_hist_size,
};
fendermint_eth_api::listen(
settings.listen,
client,
settings.filter_timeout,
settings.cache_capacity,
settings.max_nonce_gap,
gas,
)
.await
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/src/cmd/materializer.rs | fendermint/app/src/cmd/materializer.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::path::{Path, PathBuf};
use anyhow::{anyhow, bail};
use fendermint_app_options::materializer::*;
use fendermint_app_settings::utils::expand_tilde;
use fendermint_materializer::{
docker::{DockerMaterializer, DropPolicy},
logging::LoggingMaterializer,
manifest::Manifest,
materials::DefaultAccount,
testnet::Testnet,
AccountId, TestnetId, TestnetName,
};
use crate::cmd;
use super::key::{read_secret_key, read_secret_key_hex};
cmd! {
MaterializerArgs(self) {
let data_dir = expand_tilde(&self.data_dir);
let dm = || DockerMaterializer::new(&data_dir, self.seed).map(|m| m.with_policy(DropPolicy::PERSISTENT));
let lm = || dm().map(|m| LoggingMaterializer::new(m, "cli".to_string()));
match &self.command {
MaterializerCommands::Validate(args) => args.exec(()).await,
MaterializerCommands::Setup(args) => args.exec(lm()?).await,
MaterializerCommands::Remove(args) => args.exec(dm()?).await,
MaterializerCommands::ImportKey(args) => args.exec(data_dir).await,
}
}
}
cmd! {
MaterializerValidateArgs(self) {
validate(&self.manifest_file).await
}
}
cmd! {
MaterializerSetupArgs(self, m: LoggingMaterializer<DockerMaterializer>) {
setup(m, &self.manifest_file, self.validate).await
}
}
cmd! {
MaterializerRemoveArgs(self, m: DockerMaterializer) {
remove(m, self.testnet_id.clone()).await
}
}
cmd! {
MaterializerImportKeyArgs(self, data_dir: PathBuf) {
import_key(&data_dir, &self.secret_key, &self.manifest_file, &self.account_id)
}
}
/// Validate a manifest.
async fn validate(manifest_file: &Path) -> anyhow::Result<()> {
let (name, manifest) = read_manifest(manifest_file)?;
manifest.validate(&name).await
}
/// Setup a testnet.
async fn setup(
mut m: LoggingMaterializer<DockerMaterializer>,
manifest_file: &Path,
validate: bool,
) -> anyhow::Result<()> {
let (name, manifest) = read_manifest(manifest_file)?;
if validate {
manifest.validate(&name).await?;
}
let _testnet = Testnet::setup(&mut m, &name, &manifest).await?;
Ok(())
}
/// Remove a testnet.
async fn remove(mut m: DockerMaterializer, id: TestnetId) -> anyhow::Result<()> {
m.remove(&TestnetName::new(id)).await
}
/// Read a manifest file; use its file name as the testnet name.
fn read_manifest(manifest_file: &Path) -> anyhow::Result<(TestnetName, Manifest)> {
let testnet_id = manifest_file
.file_stem()
.ok_or_else(|| anyhow!("manifest file has no stem"))?
.to_string_lossy()
.to_string();
let name = TestnetName::new(testnet_id);
let manifest = Manifest::from_file(manifest_file)?;
Ok((name, manifest))
}
/// Import a secret key as one of the accounts in a manifest.
fn import_key(
data_dir: &Path,
secret_key: &Path,
manifest_file: &Path,
account_id: &AccountId,
) -> anyhow::Result<()> {
let (testnet_name, manifest) = read_manifest(manifest_file)?;
if !manifest.accounts.contains_key(account_id) {
bail!(
"account {account_id} cannot be found in the manifest at {}",
manifest_file.to_string_lossy()
);
}
let sk = read_secret_key(secret_key).or_else(|_| read_secret_key_hex(secret_key))?;
let _acc = DefaultAccount::create(data_dir, &testnet_name.account(account_id), sk)?;
Ok(())
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/src/cmd/debug.rs | fendermint/app/src/cmd/debug.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use anyhow::{anyhow, Context};
use fendermint_app_options::debug::{
DebugArgs, DebugCommands, DebugExportTopDownEventsArgs, DebugIpcCommands,
};
use fendermint_vm_topdown::proxy::IPCProviderProxy;
use ipc_provider::{
config::subnet::{EVMSubnet, SubnetConfig},
IpcProvider,
};
use crate::cmd;
cmd! {
DebugArgs(self) {
match &self.command {
DebugCommands::Ipc { command } => command.exec(()).await,
}
}
}
cmd! {
DebugIpcCommands(self) {
match self {
DebugIpcCommands::ExportTopDownEvents(args) =>
export_topdown_events(args).await
}
}
}
async fn export_topdown_events(args: &DebugExportTopDownEventsArgs) -> anyhow::Result<()> {
// Configuration for the child subnet on the parent network,
// based on how it's done in `run.rs` and the `genesis ipc from-parent` command.
let parent_provider = IpcProvider::new_with_subnet(
None,
ipc_provider::config::Subnet {
id: args
.subnet_id
.parent()
.ok_or_else(|| anyhow!("subnet is not a child"))?,
config: SubnetConfig::Fevm(EVMSubnet {
provider_http: args.parent_endpoint.clone(),
provider_timeout: None,
auth_token: args.parent_auth_token.clone(),
registry_addr: args.parent_registry,
gateway_addr: args.parent_gateway,
}),
},
)?;
let parent_proxy = IPCProviderProxy::new(parent_provider, args.subnet_id.clone())
.context("failed to create provider proxy")?;
let events = fendermint_vm_topdown::sync::fetch_topdown_events(
&parent_proxy,
args.start_block_height,
args.end_block_height,
)
.await
.context("failed to fetch topdown events")?;
let json = serde_json::to_string_pretty(&events)?;
std::fs::write(&args.events_file, json)?;
Ok(())
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/src/cmd/mod.rs | fendermint/app/src/cmd/mod.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! CLI command implementations.
use crate::{
options::{Commands, Options},
settings::{utils::expand_tilde, Settings},
};
use anyhow::{anyhow, Context};
use async_trait::async_trait;
pub mod config;
pub mod debug;
pub mod eth;
pub mod genesis;
pub mod key;
pub mod materializer;
pub mod rpc;
pub mod run;
#[async_trait]
pub trait Cmd {
type Settings;
async fn exec(&self, settings: Self::Settings) -> anyhow::Result<()>;
}
/// Convenience macro to simplify declaring commands that either need or don't need settings.
///
/// ```text
/// cmd! {
/// <arg-type>(self, settings: <settings-type>) {
/// <exec-body>
/// }
/// }
/// ```
#[macro_export]
macro_rules! cmd {
// A command which needs access to some settings.
($name:ident($self:ident, $settings_name:ident : $settings_type:ty) $exec:expr) => {
#[async_trait::async_trait]
impl $crate::cmd::Cmd for $name {
type Settings = $settings_type;
async fn exec(&$self, $settings_name: Self::Settings) -> anyhow::Result<()> {
$exec
}
}
};
// A command which works on the full `Settings`.
($name:ident($self:ident, $settings:ident) $exec:expr) => {
cmd!($name($self, $settings: $crate::settings::Settings) $exec);
};
// A command which is self-contained and doesn't need any settings.
($name:ident($self:ident) $exec:expr) => {
cmd!($name($self, _settings: ()) $exec);
};
}
/// Execute the command specified in the options.
pub async fn exec(opts: &Options) -> anyhow::Result<()> {
match &opts.command {
Commands::Config(args) => args.exec(settings(opts)?).await,
Commands::Debug(args) => args.exec(()).await,
Commands::Run(args) => args.exec(settings(opts)?).await,
Commands::Key(args) => args.exec(()).await,
Commands::Genesis(args) => args.exec(()).await,
Commands::Rpc(args) => args.exec(()).await,
Commands::Eth(args) => args.exec(settings(opts)?.eth).await,
Commands::Materializer(args) => args.exec(()).await,
}
}
/// Try to parse the settings in the configuration directory.
fn settings(opts: &Options) -> anyhow::Result<Settings> {
let config_dir = match expand_tilde(opts.config_dir()) {
d if !d.exists() => return Err(anyhow!("'{d:?}' does not exist")),
d if !d.is_dir() => return Err(anyhow!("'{d:?}' is a not a directory")),
d => d,
};
tracing::info!(
path = config_dir.to_string_lossy().into_owned(),
"reading configuration"
);
let settings =
Settings::new(&config_dir, &opts.home_dir, &opts.mode).context("error parsing settings")?;
Ok(settings)
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/src/cmd/run.rs | fendermint/app/src/cmd/run.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use anyhow::{anyhow, bail, Context};
use async_stm::atomically_or_err;
use fendermint_abci::ApplicationService;
use fendermint_app::events::{ParentFinalityVoteAdded, ParentFinalityVoteIgnored};
use fendermint_app::ipc::{AppParentFinalityQuery, AppVote};
use fendermint_app::{App, AppConfig, AppStore, BitswapBlockstore};
use fendermint_app_settings::AccountKind;
use fendermint_crypto::SecretKey;
use fendermint_rocksdb::{blockstore::NamespaceBlockstore, namespaces, RocksDb, RocksDbConfig};
use fendermint_tracing::emit;
use fendermint_vm_actor_interface::eam::EthAddress;
use fendermint_vm_interpreter::chain::ChainEnv;
use fendermint_vm_interpreter::fvm::upgrades::UpgradeScheduler;
use fendermint_vm_interpreter::{
bytes::{BytesMessageInterpreter, ProposalPrepareMode},
chain::{ChainMessageInterpreter, CheckpointPool},
fvm::{Broadcaster, FvmMessageInterpreter, ValidatorContext},
signed::SignedMessageInterpreter,
};
use fendermint_vm_resolver::ipld::IpldResolver;
use fendermint_vm_snapshot::{SnapshotManager, SnapshotParams};
use fendermint_vm_topdown::proxy::IPCProviderProxy;
use fendermint_vm_topdown::sync::launch_polling_syncer;
use fendermint_vm_topdown::voting::{publish_vote_loop, Error as VoteError, VoteTally};
use fendermint_vm_topdown::{CachedFinalityProvider, IPCParentFinality, Toggle};
use fvm_shared::address::{current_network, Address, Network};
use ipc_ipld_resolver::{Event as ResolverEvent, VoteRecord};
use ipc_provider::config::subnet::{EVMSubnet, SubnetConfig};
use ipc_provider::IpcProvider;
use libp2p::identity::secp256k1;
use libp2p::identity::Keypair;
use std::sync::Arc;
use tokio::sync::broadcast::error::RecvError;
use tower::ServiceBuilder;
use tracing::info;
use crate::cmd::key::{read_bls_secret_key, read_secret_key};
use crate::{cmd, options::run::RunArgs, settings::Settings};
cmd! {
RunArgs(self, settings) {
run(settings).await
}
}
// Database collection names.
namespaces! {
Namespaces {
app,
state_hist,
state_store,
bit_store
}
}
/// Run the Fendermint ABCI Application.
///
/// This method acts as our composition root.
async fn run(settings: Settings) -> anyhow::Result<()> {
let tendermint_rpc_url = settings.tendermint_rpc_url()?;
tracing::info!("Connecting to Tendermint at {tendermint_rpc_url}");
let tendermint_client: tendermint_rpc::HttpClient =
tendermint_rpc::HttpClient::new(tendermint_rpc_url)
.context("failed to create Tendermint client")?;
// Prometheus metrics
let metrics_registry = if settings.metrics.enabled {
let registry = prometheus::Registry::new();
fendermint_app::metrics::register_app_metrics(®istry)
.context("failed to register metrics")?;
Some(registry)
} else {
None
};
tracing::info!("Validator key: {:?}", settings.validator_key);
tracing::info!("BLS key: {:?}", settings.bls_signing_key);
let validator = match settings.validator_key {
Some(ref key) => {
let sk = key.path(settings.home_dir());
if sk.exists() && sk.is_file() {
let sk = read_secret_key(&sk).context("failed to read validator key")?;
let addr = to_address(&sk, &key.kind)?;
tracing::info!("validator key address: {addr} detected");
Some((sk, addr))
} else {
bail!("validator key does not exist: {}", sk.to_string_lossy());
}
}
None => {
tracing::debug!("validator key not configured");
None
}
};
let bls_private_key = match settings.bls_signing_key {
Some(ref key) => {
let sk = key.path(settings.home_dir());
if sk.exists() && sk.is_file() {
let sk = read_bls_secret_key(&sk).context("failed to read BLS key")?;
tracing::info!(
"cetf signing key detected with public key: {:#?}",
sk.public_key()
);
Some(sk)
} else {
bail!("cetf signing key does not exist: {}", sk.to_string_lossy());
}
}
None => {
tracing::debug!("cetf signing key not configured");
None
}
};
let validator_keypair = validator.as_ref().map(|(sk, _)| {
let mut bz = sk.serialize();
let sk = libp2p::identity::secp256k1::SecretKey::try_from_bytes(&mut bz)
.expect("secp256k1 secret key");
let kp = libp2p::identity::secp256k1::Keypair::from(sk);
libp2p::identity::Keypair::from(kp)
});
let validator_ctx = validator.map(|(sk, addr)| {
// For now we are using the validator key for submitting transactions.
// This allows us to identify transactions coming from empowered validators, to give priority to protocol related transactions.
let broadcaster = Broadcaster::new(
tendermint_client.clone(),
addr,
sk.clone(),
settings.fvm.gas_fee_cap.clone(),
settings.fvm.gas_premium.clone(),
settings.fvm.gas_overestimation_rate,
)
.with_max_retries(settings.broadcast.max_retries)
.with_retry_delay(settings.broadcast.retry_delay);
ValidatorContext::new(sk, bls_private_key.unwrap(), broadcaster)
});
let testing_settings = match settings.testing.as_ref() {
Some(_) if current_network() == Network::Mainnet => {
bail!("testing settings are not allowed on Mainnet");
}
other => other,
};
let interpreter = FvmMessageInterpreter::<NamespaceBlockstore, _>::new(
tendermint_client.clone(),
validator_ctx,
settings.contracts_dir(),
settings.fvm.gas_overestimation_rate,
settings.fvm.gas_search_step,
settings.fvm.exec_in_check,
UpgradeScheduler::new(),
)
.with_push_chain_meta(testing_settings.map_or(true, |t| t.push_chain_meta));
let interpreter = SignedMessageInterpreter::new(interpreter);
let interpreter = ChainMessageInterpreter::<_, NamespaceBlockstore>::new(interpreter);
let interpreter = BytesMessageInterpreter::new(
interpreter,
ProposalPrepareMode::PrependOnly,
false,
settings.abci.block_max_msgs,
);
let ns = Namespaces::default();
let db = open_db(&settings, &ns).context("error opening DB")?;
// Blockstore for actors.
let state_store =
NamespaceBlockstore::new(db.clone(), ns.state_store).context("error creating state DB")?;
let checkpoint_pool = CheckpointPool::new();
let parent_finality_votes = VoteTally::empty();
let topdown_enabled = settings.topdown_enabled();
// If enabled, start a resolver that communicates with the application through the resolve pool.
if settings.resolver_enabled() {
let mut service =
make_resolver_service(&settings, db.clone(), state_store.clone(), ns.bit_store)?;
// Register all metrics from the IPLD resolver stack
if let Some(ref registry) = metrics_registry {
service
.register_metrics(registry)
.context("failed to register IPLD resolver metrics")?;
}
let client = service.client();
let own_subnet_id = settings.ipc.subnet_id.clone();
client
.add_provided_subnet(own_subnet_id.clone())
.context("error adding own provided subnet.")?;
let resolver = IpldResolver::new(
client.clone(),
checkpoint_pool.queue(),
settings.resolver.retry_delay,
own_subnet_id.clone(),
);
if topdown_enabled {
if let Some(key) = validator_keypair {
let parent_finality_votes = parent_finality_votes.clone();
tracing::info!("starting the parent finality vote gossip loop...");
tokio::spawn(async move {
publish_vote_loop(
parent_finality_votes,
settings.ipc.vote_interval,
settings.ipc.vote_timeout,
key,
own_subnet_id,
client,
|height, block_hash| {
AppVote::ParentFinality(IPCParentFinality { height, block_hash })
},
)
.await
});
}
} else {
tracing::info!("parent finality vote gossip disabled");
}
tracing::info!("subscribing to gossip...");
let rx = service.subscribe();
let parent_finality_votes = parent_finality_votes.clone();
tokio::spawn(async move {
dispatch_resolver_events(rx, parent_finality_votes, topdown_enabled).await;
});
tracing::info!("starting the IPLD Resolver Service...");
tokio::spawn(async move {
if let Err(e) = service.run().await {
tracing::error!("IPLD Resolver Service failed: {e:#}")
}
});
tracing::info!("starting the IPLD Resolver...");
tokio::spawn(async move { resolver.run().await });
} else {
tracing::info!("IPLD Resolver disabled.")
}
let (parent_finality_provider, ipc_tuple) = if topdown_enabled {
info!("topdown finality enabled");
let topdown_config = settings.ipc.topdown_config()?;
let mut config = fendermint_vm_topdown::Config::new(
topdown_config.chain_head_delay,
topdown_config.polling_interval,
topdown_config.exponential_back_off,
topdown_config.exponential_retry_limit,
)
.with_proposal_delay(topdown_config.proposal_delay)
.with_max_proposal_range(topdown_config.max_proposal_range);
if let Some(v) = topdown_config.max_cache_blocks {
info!(value = v, "setting max cache blocks");
config = config.with_max_cache_blocks(v);
}
let ipc_provider = Arc::new(make_ipc_provider_proxy(&settings)?);
let finality_provider =
CachedFinalityProvider::uninitialized(config.clone(), ipc_provider.clone()).await?;
let p = Arc::new(Toggle::enabled(finality_provider));
(p, Some((ipc_provider, config)))
} else {
info!("topdown finality disabled");
(Arc::new(Toggle::disabled()), None)
};
// Start a snapshot manager in the background.
let snapshots = if settings.snapshots.enabled {
let (manager, client) = SnapshotManager::new(
state_store.clone(),
SnapshotParams {
snapshots_dir: settings.snapshots_dir(),
download_dir: settings.snapshots.download_dir(),
block_interval: settings.snapshots.block_interval,
chunk_size: settings.snapshots.chunk_size_bytes,
hist_size: settings.snapshots.hist_size,
last_access_hold: settings.snapshots.last_access_hold,
sync_poll_interval: settings.snapshots.sync_poll_interval,
},
)
.context("failed to create snapshot manager")?;
tracing::info!("starting the SnapshotManager...");
let tendermint_client = tendermint_client.clone();
tokio::spawn(async move { manager.run(tendermint_client).await });
Some(client)
} else {
info!("snapshots disabled");
None
};
let app: App<_, _, AppStore, _> = App::new(
AppConfig {
app_namespace: ns.app,
state_hist_namespace: ns.state_hist,
state_hist_size: settings.db.state_hist_size,
builtin_actors_bundle: settings.builtin_actors_bundle(),
custom_actors_bundle: settings.custom_actors_bundle(),
halt_height: settings.halt_height,
},
db,
state_store,
interpreter,
ChainEnv {
checkpoint_pool,
parent_finality_provider: parent_finality_provider.clone(),
parent_finality_votes: parent_finality_votes.clone(),
},
snapshots,
)?;
if let Some((agent_proxy, config)) = ipc_tuple {
let app_parent_finality_query = AppParentFinalityQuery::new(app.clone());
tokio::spawn(async move {
match launch_polling_syncer(
app_parent_finality_query,
config,
parent_finality_provider,
parent_finality_votes,
agent_proxy,
tendermint_client,
)
.await
{
Ok(_) => {}
Err(e) => tracing::error!("cannot launch polling syncer: {e}"),
}
});
}
// Start the metrics on a background thread.
if let Some(registry) = metrics_registry {
info!(
listen_addr = settings.metrics.listen.to_string(),
"serving metrics"
);
let mut builder = prometheus_exporter::Builder::new(settings.metrics.listen.try_into()?);
builder.with_registry(registry);
let _ = builder.start().context("failed to start metrics server")?;
} else {
info!("metrics disabled");
}
let service = ApplicationService(app);
// Split it into components.
let (consensus, mempool, snapshot, info) =
tower_abci::v038::split::service(service, settings.abci.bound);
// Hand those components to the ABCI server. This is where tower layers could be added.
// TODO: Check out the examples about load shedding in `info` requests.
let server = tower_abci::v038::Server::builder()
.consensus(
// Limiting the concurrency to 1 here because the `AplicationService::poll_ready` always
// reports `Ready`, because it doesn't know which request it's going to get.
// Not limiting the concurrency to 1 can lead to transactions being applied
// in different order across nodes. The buffer size has to be large enough
// to allow all in-flight requests to not block message handling in
// `tower_abci::Connection::run`, which could lead to deadlocks.
// With ABCI++ we need to be able to handle all block transactions plus the begin/end/commit
// around it. With ABCI 2.0 we'll get the block as a whole, which makes this easier.
ServiceBuilder::new()
.buffer(settings.abci.block_max_msgs + 3)
.concurrency_limit(1)
.service(consensus),
)
.snapshot(snapshot)
.mempool(mempool)
.info(info)
.finish()
.context("error creating ABCI server")?;
// Run the ABCI server.
server
.listen_tcp(settings.abci.listen.to_string())
.await
.map_err(|e| anyhow!("error listening: {e}"))?;
Ok(())
}
/// Open database with all
fn open_db(settings: &Settings, ns: &Namespaces) -> anyhow::Result<RocksDb> {
let path = settings.data_dir().join("rocksdb");
info!(
path = path.to_string_lossy().into_owned(),
"opening database"
);
let config = RocksDbConfig {
compaction_style: settings.db.compaction_style.to_string(),
..Default::default()
};
let db = RocksDb::open_cf(path, &config, ns.values().iter())?;
Ok(db)
}
fn make_resolver_service(
settings: &Settings,
db: RocksDb,
state_store: NamespaceBlockstore,
bit_store_ns: String,
) -> anyhow::Result<ipc_ipld_resolver::Service<libipld::DefaultParams, AppVote>> {
// Blockstore for Bitswap.
let bit_store = NamespaceBlockstore::new(db, bit_store_ns).context("error creating bit DB")?;
// Blockstore for Bitswap with a fallback on the actor store for reads.
let bitswap_store = BitswapBlockstore::new(state_store, bit_store);
let config = to_resolver_config(settings).context("error creating resolver config")?;
let service = ipc_ipld_resolver::Service::new(config, bitswap_store)
.context("error creating IPLD Resolver Service")?;
Ok(service)
}
fn make_ipc_provider_proxy(settings: &Settings) -> anyhow::Result<IPCProviderProxy> {
let topdown_config = settings.ipc.topdown_config()?;
let subnet = ipc_provider::config::Subnet {
id: settings
.ipc
.subnet_id
.parent()
.ok_or_else(|| anyhow!("subnet has no parent"))?,
config: SubnetConfig::Fevm(EVMSubnet {
provider_http: topdown_config
.parent_http_endpoint
.to_string()
.parse()
.unwrap(),
provider_timeout: topdown_config.parent_http_timeout,
auth_token: topdown_config.parent_http_auth_token.as_ref().cloned(),
registry_addr: topdown_config.parent_registry,
gateway_addr: topdown_config.parent_gateway,
}),
};
info!("init ipc provider with subnet: {}", subnet.id);
let ipc_provider = IpcProvider::new_with_subnet(None, subnet)?;
IPCProviderProxy::new(ipc_provider, settings.ipc.subnet_id.clone())
}
fn to_resolver_config(settings: &Settings) -> anyhow::Result<ipc_ipld_resolver::Config> {
use ipc_ipld_resolver::{
Config, ConnectionConfig, ContentConfig, DiscoveryConfig, MembershipConfig, NetworkConfig,
};
let r = &settings.resolver;
let local_key: Keypair = {
let path = r.network.local_key(settings.home_dir());
let sk = read_secret_key(&path)?;
let sk = secp256k1::SecretKey::try_from_bytes(sk.serialize())?;
secp256k1::Keypair::from(sk).into()
};
let network_name = format!(
"ipld-resolver-{}-{}",
settings.ipc.subnet_id.root_id(),
r.network.network_name
);
let config = Config {
connection: ConnectionConfig {
listen_addr: r.connection.listen_addr.clone(),
external_addresses: r.connection.external_addresses.clone(),
expected_peer_count: r.connection.expected_peer_count,
max_incoming: r.connection.max_incoming,
max_peers_per_query: r.connection.max_peers_per_query,
event_buffer_capacity: r.connection.event_buffer_capacity,
},
network: NetworkConfig {
local_key,
network_name,
},
discovery: DiscoveryConfig {
static_addresses: r.discovery.static_addresses.clone(),
target_connections: r.discovery.target_connections,
enable_kademlia: r.discovery.enable_kademlia,
},
membership: MembershipConfig {
static_subnets: r.membership.static_subnets.clone(),
max_subnets: r.membership.max_subnets,
publish_interval: r.membership.publish_interval,
min_time_between_publish: r.membership.min_time_between_publish,
max_provider_age: r.membership.max_provider_age,
},
content: ContentConfig {
rate_limit_bytes: r.content.rate_limit_bytes,
rate_limit_period: r.content.rate_limit_period,
},
};
Ok(config)
}
fn to_address(sk: &SecretKey, kind: &AccountKind) -> anyhow::Result<Address> {
let pk = sk.public_key().serialize();
match kind {
AccountKind::Regular => Ok(Address::new_secp256k1(&pk)?),
AccountKind::Ethereum => Ok(Address::from(EthAddress::new_secp256k1(&pk)?)),
}
}
async fn dispatch_resolver_events(
mut rx: tokio::sync::broadcast::Receiver<ResolverEvent<AppVote>>,
parent_finality_votes: VoteTally,
topdown_enabled: bool,
) {
loop {
match rx.recv().await {
Ok(event) => match event {
ResolverEvent::ReceivedPreemptive(_, _) => {}
ResolverEvent::ReceivedVote(vote) => {
dispatch_vote(*vote, &parent_finality_votes, topdown_enabled).await;
}
},
Err(RecvError::Lagged(n)) => {
tracing::warn!("the resolver service skipped {n} gossip events")
}
Err(RecvError::Closed) => {
tracing::error!("the resolver service stopped receiving gossip");
return;
}
}
}
}
async fn dispatch_vote(
vote: VoteRecord<AppVote>,
parent_finality_votes: &VoteTally,
topdown_enabled: bool,
) {
match vote.content {
AppVote::ParentFinality(f) => {
if !topdown_enabled {
tracing::debug!("ignoring vote; topdown disabled");
return;
}
let res = atomically_or_err(|| {
parent_finality_votes.add_vote(
vote.public_key.clone(),
f.height,
f.block_hash.clone(),
)
})
.await;
let added = match res {
Ok(added) => {
added
}
Err(e @ VoteError::Equivocation(_, _, _, _)) => {
tracing::warn!(error = e.to_string(), "failed to handle vote");
false
}
Err(e @ (
VoteError::Uninitialized // early vote, we're not ready yet
| VoteError::UnpoweredValidator(_) // maybe arrived too early or too late, or spam
| VoteError::UnexpectedBlock(_, _) // won't happen here
)) => {
tracing::debug!(error = e.to_string(), "failed to handle vote");
false
}
};
let block_height = f.height;
let block_hash = &hex::encode(&f.block_hash);
let validator = &format!("{:?}", vote.public_key);
if added {
emit!(
DEBUG,
ParentFinalityVoteAdded {
block_height,
block_hash,
validator,
}
)
} else {
emit!(
DEBUG,
ParentFinalityVoteIgnored {
block_height,
block_hash,
validator,
}
)
}
}
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/src/cmd/rpc.rs | fendermint/app/src/cmd/rpc.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::future::Future;
use std::path::PathBuf;
use std::pin::Pin;
use anyhow::Context;
use async_trait::async_trait;
use bytes::Bytes;
use fendermint_app_options::genesis::AccountKind;
use fendermint_crypto::{to_b64, SecretKey};
use fendermint_rpc::client::BoundFendermintClient;
use fendermint_rpc::tx::{
AsyncResponse, BoundClient, CallClient, CommitResponse, SyncResponse, TxAsync, TxClient,
TxCommit, TxSync,
};
use fendermint_vm_core::chainid;
use fendermint_vm_message::chain::ChainMessage;
use fendermint_vm_message::query::FvmQueryHeight;
use fvm_ipld_encoding::RawBytes;
use fvm_shared::address::Address;
use fvm_shared::econ::TokenAmount;
use fvm_shared::MethodNum;
use serde::Serialize;
use serde_json::json;
use tendermint::abci::types::ExecTxResult;
use tendermint::block::Height;
use tendermint_rpc::HttpClient;
use fendermint_rpc::message::{GasParams, SignedMessageFactory};
use fendermint_rpc::{client::FendermintClient, query::QueryClient};
use fendermint_vm_actor_interface::eam::{self, CreateReturn, EthAddress};
use crate::cmd;
use crate::options::rpc::{BroadcastMode, FevmArgs, RpcFevmCommands, TransArgs};
use crate::options::rpc::{RpcArgs, RpcCommands, RpcQueryCommands};
use super::key::read_secret_key;
cmd! {
RpcArgs(self) {
let client = FendermintClient::new_http(self.url.clone(), self.proxy_url.clone())?;
match self.command.clone() {
RpcCommands::Query { height, command } => {
let height = Height::try_from(height)?;
query(client, height, command).await
},
RpcCommands::Transfer { args, to } => {
transfer(client, args, to).await
},
RpcCommands::Transaction { args, to, method_number, params } => {
transaction(client, args, to, method_number, params.clone()).await
},
RpcCommands::Fevm { args, command } => match command {
RpcFevmCommands::Create { contract, constructor_args } => {
fevm_create(client, args, contract, constructor_args).await
}
RpcFevmCommands::Invoke { args: FevmArgs { contract, method, method_args }} => {
fevm_invoke(client, args, contract, method, method_args).await
}
RpcFevmCommands::Call { args: FevmArgs { contract, method, method_args }, height} => {
let height = Height::try_from(height)?;
fevm_call(client, args, contract, method, method_args, height).await
}
RpcFevmCommands::EstimateGas { args: FevmArgs { contract, method, method_args }, height} => {
let height = Height::try_from(height)?;
fevm_estimate_gas(client, args, contract, method, method_args, height).await
}
}
}
}
}
/// Run an ABCI query and print the results on STDOUT.
async fn query(
client: FendermintClient,
height: Height,
command: RpcQueryCommands,
) -> anyhow::Result<()> {
let height = FvmQueryHeight::from(height.value());
match command {
RpcQueryCommands::Ipld { cid } => match client.ipld(&cid, height).await? {
Some(data) => println!("{}", to_b64(&data)),
None => eprintln!("CID not found"),
},
RpcQueryCommands::ActorState { address } => {
match client.actor_state(&address, height).await?.value {
Some((id, state)) => {
let out = json! ({
"id": id,
"state": state,
});
print_json(&out)?;
}
None => {
eprintln!("actor not found")
}
}
}
RpcQueryCommands::StateParams => {
let res = client.state_params(height).await?;
let json = json!({ "response": res });
print_json(&json)?;
}
};
Ok(())
}
/// Create a client, make a call to Tendermint with a closure, then maybe extract some JSON
/// depending on the return value, finally print the result in JSON.
async fn broadcast_and_print<F, T, G>(
client: FendermintClient,
args: TransArgs,
f: F,
g: G,
) -> anyhow::Result<()>
where
F: FnOnce(
TransClient,
TokenAmount,
GasParams,
) -> Pin<Box<dyn Future<Output = anyhow::Result<BroadcastResponse<T>>> + Send>>,
G: FnOnce(T) -> serde_json::Value,
T: Sync + Send,
{
let client = TransClient::new(client, &args)?;
let gas_params = gas_params(&args);
let res = f(client, args.value, gas_params).await?;
let json = match res {
BroadcastResponse::Async(res) => json!({"response": res.response}),
BroadcastResponse::Sync(res) => json!({"response": res.response}),
BroadcastResponse::Commit(res) => {
let return_data = res.return_data.map(g).unwrap_or(serde_json::Value::Null);
json!({"response": res.response, "return_data": return_data})
}
};
print_json(&json)
}
/// Execute token transfer through RPC and print the response to STDOUT as JSON.
async fn transfer(client: FendermintClient, args: TransArgs, to: Address) -> anyhow::Result<()> {
broadcast_and_print(
client,
args,
|mut client, value, gas_params| {
Box::pin(async move { client.transfer(to, value, gas_params).await })
},
|_| serde_json::Value::Null,
)
.await
}
/// Execute a transaction through RPC and print the response to STDOUT as JSON.
///
/// If there was any data returned it's rendered in hexadecimal format.
async fn transaction(
client: FendermintClient,
args: TransArgs,
to: Address,
method_num: MethodNum,
params: RawBytes,
) -> anyhow::Result<()> {
broadcast_and_print(
client,
args,
|mut client, value, gas_params| {
Box::pin(async move {
client
.transaction(to, method_num, params, value, gas_params)
.await
})
},
|data| serde_json::Value::String(hex::encode(data.bytes())),
)
.await
}
/// Deploy an EVM contract through RPC and print the response to STDOUT as JSON.
///
/// The returned EVM contract addresses are included as a JSON object.
async fn fevm_create(
client: FendermintClient,
args: TransArgs,
contract: PathBuf,
constructor_args: Bytes,
) -> anyhow::Result<()> {
let contract_hex = std::fs::read_to_string(contract).context("failed to read contract")?;
let contract_bytes = hex::decode(contract_hex).context("failed to parse contract from hex")?;
let contract_bytes = Bytes::from(contract_bytes);
broadcast_and_print(
client,
args,
|mut client, value, gas_params| {
Box::pin(async move {
client
.fevm_create(contract_bytes, constructor_args, value, gas_params)
.await
})
},
create_return_to_json,
)
.await
}
/// Invoke an EVM contract through RPC and print the response to STDOUT as JSON.
async fn fevm_invoke(
client: FendermintClient,
args: TransArgs,
contract: Address,
method: Bytes,
method_args: Bytes,
) -> anyhow::Result<()> {
let calldata = Bytes::from([method, method_args].concat());
broadcast_and_print(
client,
args,
|mut client, value, gas_params| {
Box::pin(async move {
client
.fevm_invoke(contract, calldata, value, gas_params)
.await
})
},
|data| serde_json::Value::String(hex::encode(data)),
)
.await
}
/// Call an EVM contract through RPC and print the response to STDOUT as JSON.
async fn fevm_call(
client: FendermintClient,
args: TransArgs,
contract: Address,
method: Bytes,
method_args: Bytes,
height: Height,
) -> anyhow::Result<()> {
let calldata = Bytes::from([method, method_args].concat());
let mut client = TransClient::new(client, &args)?;
let gas_params = gas_params(&args);
let value = args.value;
let height = FvmQueryHeight::from(height.value());
let res = client
.inner
.fevm_call(contract, calldata, value, gas_params, height)
.await?;
let return_data = res
.return_data
.map(|bz| serde_json::Value::String(hex::encode(bz)))
.unwrap_or(serde_json::Value::Null);
let json = json!({"response": res.response, "return_data": return_data});
print_json(&json)
}
/// Estimate the gas of an EVM call through RPC and print the response to STDOUT as JSON.
async fn fevm_estimate_gas(
client: FendermintClient,
args: TransArgs,
contract: Address,
method: Bytes,
method_args: Bytes,
height: Height,
) -> anyhow::Result<()> {
let calldata = Bytes::from([method, method_args].concat());
let mut client = TransClient::new(client, &args)?;
let gas_params = gas_params(&args);
let value = args.value;
let height = FvmQueryHeight::from(height.value());
let res = client
.inner
.fevm_estimate_gas(contract, calldata, value, gas_params, height)
.await?;
let json = json!({ "response": res });
print_json(&json)
}
/// Print out pretty-printed JSON.
///
/// People can use `jq` to turn it into compact form if they want to save the results to a `.jsonline`
/// file, but the default of having human readable output seems more useful.
fn print_json<T: Serialize>(value: &T) -> anyhow::Result<()> {
let json = serde_json::to_string_pretty(&value)?;
println!("{}", json);
Ok(())
}
/// Print all the various addresses we can use to refer to an EVM contract.
fn create_return_to_json(ret: CreateReturn) -> serde_json::Value {
// The only reference I can point to about how to use them are the integration tests:
// https://github.com/filecoin-project/ref-fvm/pull/1507
// IIRC to call the contract we need to use the `actor_address` or the `delegated_address` in `to`.
json!({
"actor_id": ret.actor_id,
"actor_address": Address::new_id(ret.actor_id).to_string(),
"actor_id_as_eth_address": hex::encode(eam::EthAddress::from_id(ret.actor_id).0),
"eth_address": hex::encode(ret.eth_address.0),
"delegated_address": ret.delegated_address().to_string(),
"robust_address": ret.robust_address.map(|a| a.to_string())
})
}
pub enum BroadcastResponse<T> {
Async(AsyncResponse<T>),
Sync(SyncResponse<T>),
Commit(CommitResponse<T>),
}
struct BroadcastModeWrapper(BroadcastMode);
impl fendermint_rpc::tx::BroadcastMode for BroadcastModeWrapper {
type Response<T> = BroadcastResponse<T>;
}
struct TransClient {
inner: BoundFendermintClient<HttpClient>,
broadcast_mode: BroadcastModeWrapper,
}
impl TransClient {
pub fn new(client: FendermintClient, args: &TransArgs) -> anyhow::Result<Self> {
let sk = read_secret_key(&args.secret_key)?;
let addr = to_address(&sk, &args.account_kind)?;
let chain_id = chainid::from_str_hashed(&args.chain_name)?;
let mf = SignedMessageFactory::new(sk, addr, args.sequence, chain_id);
let client = client.bind(mf);
let client = Self {
inner: client,
broadcast_mode: BroadcastModeWrapper(args.broadcast_mode),
};
Ok(client)
}
}
impl BoundClient for TransClient {
fn message_factory_mut(&mut self) -> &mut SignedMessageFactory {
self.inner.message_factory_mut()
}
}
#[async_trait]
impl TxClient<BroadcastModeWrapper> for TransClient {
async fn perform<F, T>(&self, msg: ChainMessage, f: F) -> anyhow::Result<BroadcastResponse<T>>
where
F: FnOnce(&ExecTxResult) -> anyhow::Result<T> + Sync + Send,
T: Sync + Send,
{
match self.broadcast_mode.0 {
BroadcastMode::Async => {
let res = TxClient::<TxAsync>::perform(&self.inner, msg, f).await?;
Ok(BroadcastResponse::Async(res))
}
BroadcastMode::Sync => {
let res = TxClient::<TxSync>::perform(&self.inner, msg, f).await?;
Ok(BroadcastResponse::Sync(res))
}
BroadcastMode::Commit => {
let res = TxClient::<TxCommit>::perform(&self.inner, msg, f).await?;
Ok(BroadcastResponse::Commit(res))
}
}
}
}
fn gas_params(args: &TransArgs) -> GasParams {
GasParams {
gas_limit: args.gas_limit,
gas_fee_cap: args.gas_fee_cap.clone(),
gas_premium: args.gas_premium.clone(),
}
}
fn to_address(sk: &SecretKey, kind: &AccountKind) -> anyhow::Result<Address> {
let pk = sk.public_key().serialize();
match kind {
AccountKind::Regular => Ok(Address::new_secp256k1(&pk)?),
AccountKind::Ethereum => Ok(Address::from(EthAddress::new_secp256k1(&pk)?)),
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/src/metrics/mod.rs | fendermint/app/src/metrics/mod.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
mod prometheus;
mod tracing;
pub use prometheus::app::register_metrics as register_app_metrics;
pub use tracing::layer;
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/src/metrics/tracing.rs | fendermint/app/src/metrics/tracing.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! Subscribing to tracing events and turning them into metrics.
use std::marker::PhantomData;
use tracing::{Event, Subscriber};
use tracing_subscriber::{filter, layer, registry::LookupSpan, Layer};
use super::prometheus::app as am;
use crate::events::*;
/// Create a layer that handles events by incrementing metrics.
pub fn layer<S>() -> impl Layer<S>
where
S: Subscriber,
for<'a> S: LookupSpan<'a>,
{
MetricsLayer::new().with_filter(filter::filter_fn(|md| md.name().starts_with("event::")))
}
struct MetricsLayer<S> {
_subscriber: PhantomData<S>,
}
impl<S> MetricsLayer<S> {
pub fn new() -> Self {
Self {
_subscriber: PhantomData,
}
}
}
/// Check that the field exist on a type; if it doesn't this won't compile.
/// This ensures that we're mapping fields with the correct name.
macro_rules! check_field {
($event_ty:ident :: $field:ident) => {{
if false {
#[allow(clippy::needless_update)]
let _event = $event_ty {
$field: Default::default(),
..Default::default()
};
}
}};
}
/// Set a gague to an absolute value based on a field in an event.
macro_rules! set_gauge {
($event:ident, $event_ty:ident :: $field:ident, $gauge:expr) => {
check_field!($event_ty::$field);
let mut fld = visitors::FindU64::new(stringify!($field));
$event.record(&mut fld);
$gauge.set(fld.value as i64);
};
}
/// Set a gauge to the maximum of its value and a field in an event.
macro_rules! max_gauge {
($event:ident, $event_ty:ident :: $field:ident, $gauge:expr) => {
check_field!($event_ty::$field);
let mut fld = visitors::FindU64::new(stringify!($field));
$event.record(&mut fld);
let curr = $gauge.get();
$gauge.set(std::cmp::max(fld.value as i64, curr));
};
}
/// Increment a counter by the value of a field in the event.
macro_rules! inc_counter {
($event:ident, $event_ty:ident :: $field:ident, $counter:expr) => {
check_field!($event_ty::$field);
let mut fld = visitors::FindU64::new(stringify!($field));
$event.record(&mut fld);
$counter.inc_by(fld.value);
};
}
/// Increment a counter by 1.
///
/// The field is ignored, it's only here because of how the macros look like.
macro_rules! inc1_counter {
($event:ident, $event_ty:ident :: $field:ident, $counter:expr) => {
check_field!($event_ty::$field);
$counter.inc();
};
}
/// Produce the prefixed event name from the type name.
macro_rules! event_name {
($event_ty:ident) => {
concat!("event::", stringify!($event_ty))
};
}
/// Call one of the macros that set values on a metric.
macro_rules! event_mapping {
($op:ident, $event:ident, $event_ty:ident :: $field:ident, $metric:expr) => {
$op!($event, $event_ty::$field, $metric);
};
}
/// Match the event name to event DTO types and within the map fields to metrics.
macro_rules! event_match {
($event:ident { $( $event_ty:ident { $( $field:ident => $op:ident ! $metric:expr ),* $(,)? } ),* } ) => {
match $event.metadata().name() {
$(
event_name!($event_ty) => {
$(
event_mapping!($op, $event, $event_ty :: $field, $metric);
)*
}
)*
_ => {}
}
};
}
impl<S: Subscriber> Layer<S> for MetricsLayer<S> {
fn on_event(&self, event: &Event<'_>, _ctx: layer::Context<'_, S>) {
event_match!(event {
NewParentView {
block_height => set_gauge ! &am::TOPDOWN_VIEW_BLOCK_HEIGHT,
num_msgs => inc_counter ! &am::TOPDOWN_VIEW_NUM_MSGS,
num_validator_changes => inc_counter ! &am::TOPDOWN_VIEW_NUM_VAL_CHNGS,
},
ParentFinalityCommitted {
block_height => set_gauge ! &am::TOPDOWN_FINALIZED_BLOCK_HEIGHT,
},
ParentFinalityVoteAdded {
// This one can move up and down randomly as votes come in, but statistically should
// be less likely to be affected by Byzantine validators casting nonsense votes.
block_height => set_gauge ! &am::TOPDOWN_FINALITY_VOTE_BLOCK_HEIGHT,
// This one should only move up, showing the highest vote in the tally.
// It should be easy to produce this on Grafana as well from the one above.
block_height => max_gauge ! &am::TOPDOWN_FINALITY_VOTE_MAX_BLOCK_HEIGHT,
validator => inc1_counter ! &am::TOPDOWN_FINALITY_VOTE_ADDED,
},
ParentFinalityVoteIgnored {
validator => inc1_counter ! &am::TOPDOWN_FINALITY_VOTE_IGNORED,
},
ParentFinalityMissingQuorum {
block_hash => inc1_counter ! &am::TOPDOWN_FINALITY_MISSING_QUORUM,
},
NewBottomUpCheckpoint {
block_height => set_gauge ! &am::BOTTOMUP_CKPT_BLOCK_HEIGHT,
next_configuration_number => set_gauge ! &am::BOTTOMUP_CKPT_CONFIG_NUM,
num_msgs => inc_counter ! &am::BOTTOMUP_CKPT_NUM_MSGS,
},
NewBlock {
block_height => set_gauge ! &am::ABCI_COMMITTED_BLOCK_HEIGHT
}
});
}
}
mod visitors {
use tracing::field::{Field, Visit};
pub struct FindU64<'a> {
pub name: &'a str,
pub value: u64,
}
impl<'a> FindU64<'a> {
pub fn new(name: &'a str) -> Self {
Self { name, value: 0 }
}
}
// Looking for multiple values because the callsite might be passed as a literal which turns into an i64 for example.
impl<'a> Visit for FindU64<'a> {
fn record_u64(&mut self, field: &Field, value: u64) {
if field.name() == self.name {
self.value = value;
}
}
fn record_i64(&mut self, field: &Field, value: i64) {
if field.name() == self.name {
self.value = value as u64;
}
}
fn record_i128(&mut self, field: &Field, value: i128) {
if field.name() == self.name {
self.value = value as u64;
}
}
fn record_u128(&mut self, field: &Field, value: u128) {
if field.name() == self.name {
self.value = value as u64;
}
}
fn record_debug(&mut self, _field: &Field, _value: &dyn std::fmt::Debug) {}
}
}
#[cfg(test)]
mod tests {
use fendermint_tracing::emit;
use fendermint_vm_event::ParentFinalityCommitted;
use prometheus::IntGauge;
use tracing_subscriber::layer::SubscriberExt;
#[test]
fn test_metrics_layer() {
let gauge: &IntGauge = &super::super::prometheus::app::TOPDOWN_FINALIZED_BLOCK_HEIGHT;
let v0 = gauge.get();
gauge.inc();
let v1 = gauge.get();
assert!(v1 > v0, "gague should change without being registered");
let block_height = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs();
let subscriber = tracing_subscriber::registry().with(super::layer());
tracing::subscriber::with_default(subscriber, || {
emit! {
ParentFinalityCommitted { block_height, block_hash: "metrics-test-block" }
}
});
assert_eq!(
gauge.get() as u64,
block_height,
"metrics should be captured"
);
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/app/src/metrics/prometheus.rs | fendermint/app/src/metrics/prometheus.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! Prometheus metrics
macro_rules! metrics {
($($name:ident : $type:ty = $desc:literal);* $(;)?) => {
$(
paste! {
lazy_static! {
pub static ref $name: $type = $type::new(stringify!([< $name:lower >]), $desc).unwrap();
}
}
)*
pub fn register_metrics(registry: &Registry) -> anyhow::Result<()> {
$(registry.register(Box::new($name.clone()))?;)*
Ok(())
}
};
}
/// Metrics emitted by fendermint.
pub mod app {
use lazy_static::lazy_static;
use paste::paste;
use prometheus::{IntCounter, IntGauge, Registry};
metrics! {
TOPDOWN_VIEW_BLOCK_HEIGHT: IntGauge = "Highest parent subnet block observed";
TOPDOWN_VIEW_NUM_MSGS: IntCounter = "Number of top-down messages observed since start";
TOPDOWN_VIEW_NUM_VAL_CHNGS: IntCounter = "Number of top-down validator changes observed since start";
TOPDOWN_FINALIZED_BLOCK_HEIGHT: IntGauge = "Highest parent subnet block finalized";
TOPDOWN_FINALITY_VOTE_BLOCK_HEIGHT: IntGauge = "Block for which a finality vote has been received and added last";
TOPDOWN_FINALITY_VOTE_MAX_BLOCK_HEIGHT: IntGauge = "Highest block for which a finality vote has been received and added";
TOPDOWN_FINALITY_VOTE_ADDED: IntCounter = "Number of finality votes received and added since start";
TOPDOWN_FINALITY_VOTE_IGNORED: IntCounter = "Number of finality votes received and ignored since start";
TOPDOWN_FINALITY_MISSING_QUORUM: IntCounter = "Number of times we could have proposed but didn't because the quorum was missing";
BOTTOMUP_CKPT_BLOCK_HEIGHT: IntGauge = "Highest bottom-up checkpoint created";
BOTTOMUP_CKPT_CONFIG_NUM: IntGauge = "Highest configuration number checkpointed";
BOTTOMUP_CKPT_NUM_MSGS: IntCounter = "Number of bottom-up messages observed since start";
// This metrics is available in CometBFT as well, but it's something that should increase even without subnets,
// which can be a useful way to check if metrics work at all.
ABCI_COMMITTED_BLOCK_HEIGHT: IntGauge = "Highest committed block";
}
}
/// Metrics emitted by the Ethereum API facade.
pub mod eth {
// TODO: Define Ethereum metrics and events.
}
#[cfg(test)]
mod tests {
#[test]
fn can_register_metrics() {
let r = prometheus::Registry::new();
super::app::register_metrics(&r).unwrap();
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/tracing/src/lib.rs | fendermint/tracing/src/lib.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
/// Emit an event that conforms to a flat event structure type using the [tracing::event!](https://github.com/tokio-rs/tracing/blob/908cc432a5994f6e17c8f36e13c217dc40085704/tracing/src/macros.rs#L854) macro.
///
/// There should be a [Subnscriber](https://docs.rs/tracing/latest/tracing/trait.Subscriber.html) in the application root to check the
/// [Metadata::name](https://docs.rs/tracing/latest/tracing/struct.Metadata.html#method.name) of the event in the
/// [Event::metadata](https://docs.rs/tracing/latest/tracing/struct.Event.html#method.metadata).
///
/// Once the [valuable](https://github.com/tokio-rs/tracing/discussions/1906) feature is stable,
/// we won't have the restriction of flat events.
///
/// The emitted [tracing::Event] will contain the name of the event twice:
/// in the [tracing::metadata::Metadata::name] field as `"event::<name>"` and under the `event` key in the [tracing::field::ValueSet].
/// The rationale is that we can write a [tracing::Subscriber] that looks for the events it is interested in using
/// the `name`, or find all events by filtering on the `event::` prefix.
/// By default `name` would be `event <file>:<line>`, but it turns out it's impossible to ask the
/// [log formatter](https://github.com/tokio-rs/tracing/blob/908cc432a5994f6e17c8f36e13c217dc40085704/tracing-subscriber/src/fmt/format/mod.rs#L930)
/// to output the `name``, and for all other traces it would be redundant with the filename and line we print,
/// which are available separately on the metadata, hence the `event` key which will be displayed instead.
///
/// ### Example
///
/// ```ignore
/// pub struct NewBottomUpCheckpoint<'a> {
/// pub block_height: u64,
/// pub block_hash: &'a str,
/// }
///
/// let block_height = todo!();
/// let block_hash_hex = hex::encode(todo!());
///
/// emit!(NewBottomUpCheckpoint {
/// block_height,
/// block_hash: &block_hash_hex,
/// });
/// ```
#[macro_export]
macro_rules! emit {
($lvl:ident, $event:ident { $($field:ident $(: $value:expr)?),* $(,)? } ) => {{
// Make sure the emitted fields match the schema of the event.
if false {
let _event = $event {
$($field $(: $value)?),*
};
}
tracing::event!(
name: concat!("event::", stringify!($event)),
tracing::Level::$lvl,
{ event = tracing::field::display(stringify!($event)), $($field $(= $value)?),* }
)
}};
($event:ident { $($field:ident $(: $value:expr)?),* $(,)? } ) => {{
emit!(INFO, $event { $($field $(: $value)? ),* })
}};
}
#[cfg(test)]
mod tests {
#[allow(dead_code)]
struct TestEvent<'a> {
pub foo: u32,
pub bar: &'a str,
}
#[test]
fn test_emit() {
emit!(TestEvent {
foo: 123,
bar: "spam",
});
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/rpc/src/lib.rs | fendermint/rpc/src/lib.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use base64::{
alphabet,
engine::{DecodePaddingMode, GeneralPurpose, GeneralPurposeConfig},
};
pub mod client;
pub mod message;
pub mod query;
pub mod response;
pub mod tx;
pub use client::FendermintClient;
pub use query::QueryClient;
pub use tx::TxClient;
/// A [`base64::Engine`] using the [`alphabet::STANDARD`] base64 alphabet
/// padding bytes when writing but requireing no padding when reading.
const B64_ENGINE: base64::engine::GeneralPurpose = GeneralPurpose::new(
&alphabet::STANDARD,
GeneralPurposeConfig::new()
.with_encode_padding(true)
.with_decode_padding_mode(DecodePaddingMode::Indifferent),
);
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/rpc/src/response.rs | fendermint/rpc/src/response.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use anyhow::{anyhow, Context};
use base64::Engine;
use bytes::Bytes;
use fendermint_vm_actor_interface::eam::{self, CreateReturn};
use fvm_ipld_encoding::{BytesDe, RawBytes};
use tendermint::abci::types::ExecTxResult;
/// Parse what Tendermint returns in the `data` field of [`DeliverTx`] into bytes.
/// Somewhere along the way it replaces them with the bytes of a Base64 encoded string,
/// and `tendermint_rpc` does not undo that wrapping.
pub fn decode_data(data: &Bytes) -> anyhow::Result<RawBytes> {
let b64 = String::from_utf8(data.to_vec()).context("error parsing data as base64 string")?;
let data = base64::engine::general_purpose::STANDARD
.decode(b64)
.context("error parsing base64 to bytes")?;
Ok(RawBytes::from(data))
}
/// Apply the encoding that Tendermint does to the bytes inside [`DeliverTx`].
pub fn encode_data(data: &[u8]) -> Bytes {
let b64 = base64::engine::general_purpose::STANDARD.encode(data);
let bz = b64.as_bytes();
Bytes::copy_from_slice(bz)
}
/// Parse what Tendermint returns in the `data` field of [`DeliverTx`] as raw bytes.
///
/// Only call this after the `code` of both [`DeliverTx`] and [`CheckTx`] have been inspected!
pub fn decode_bytes(deliver_tx: &ExecTxResult) -> anyhow::Result<RawBytes> {
decode_data(&deliver_tx.data)
}
/// Parse what Tendermint returns in the `data` field of [`DeliverTx`] as [`CreateReturn`].
pub fn decode_fevm_create(deliver_tx: &ExecTxResult) -> anyhow::Result<CreateReturn> {
let data = decode_data(&deliver_tx.data)?;
fvm_ipld_encoding::from_slice::<eam::CreateReturn>(&data)
.map_err(|e| anyhow!("error parsing as CreateReturn: {e}"))
}
/// Parse what Tendermint returns in the `data` field of [`DeliverTx`] as raw ABI return value.
pub fn decode_fevm_invoke(deliver_tx: &ExecTxResult) -> anyhow::Result<Vec<u8>> {
let data = decode_data(&deliver_tx.data)?;
decode_fevm_return_data(data)
}
/// Parse what is in the `return_data` field, which is `RawBytes` containing IPLD encoded bytes, into the really raw content.
pub fn decode_fevm_return_data(data: RawBytes) -> anyhow::Result<Vec<u8>> {
// Some calls like transfers between Ethereum accounts don't return any data.
if data.is_empty() {
return Ok(data.into());
}
// This is the data return by the FEVM itself, not something wrapping another piece,
// that is, it's as if it was returning `CreateReturn`, it's returning `RawBytes` encoded as IPLD.
fvm_ipld_encoding::from_slice::<BytesDe>(&data)
.map(|bz| bz.0)
.map_err(|e| anyhow!("failed to deserialize bytes returned by FEVM method invocation: {e}"))
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/rpc/src/tx.rs | fendermint/rpc/src/tx.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::marker::PhantomData;
use anyhow::Context;
use async_trait::async_trait;
use bytes::Bytes;
use fendermint_vm_message::query::{FvmQueryHeight, GasEstimate};
use tendermint::abci::types::ExecTxResult;
use tendermint_rpc::endpoint::broadcast::{tx_async, tx_commit, tx_sync};
use fvm_ipld_encoding::RawBytes;
use fvm_shared::address::Address;
use fvm_shared::econ::TokenAmount;
use fvm_shared::MethodNum;
use fendermint_vm_actor_interface::eam::CreateReturn;
use fendermint_vm_message::chain::ChainMessage;
use crate::message::{GasParams, SignedMessageFactory};
use crate::query::{QueryClient, QueryResponse};
use crate::response::{decode_bytes, decode_fevm_create, decode_fevm_invoke};
/// Abstracting away what the return value is based on whether
/// we broadcast transactions in sync, async or commit mode.
pub trait BroadcastMode {
type Response<T>;
}
pub trait BoundClient {
fn message_factory_mut(&mut self) -> &mut SignedMessageFactory;
fn address(&mut self) -> Address {
*self.message_factory_mut().address()
}
}
/// Fendermint client for submitting transactions.
#[async_trait]
pub trait TxClient<M: BroadcastMode = TxCommit>: BoundClient + Send + Sync {
/// Transfer tokens to another account.
async fn transfer(
&mut self,
to: Address,
value: TokenAmount,
gas_params: GasParams,
) -> anyhow::Result<M::Response<()>> {
let mf = self.message_factory_mut();
let msg = mf.transfer(to, value, gas_params)?;
let fut = self.perform(msg, |_| Ok(()));
let res = fut.await?;
Ok(res)
}
/// Send a message to an actor.
async fn transaction(
&mut self,
to: Address,
method_num: MethodNum,
params: RawBytes,
value: TokenAmount,
gas_params: GasParams,
) -> anyhow::Result<M::Response<RawBytes>> {
let mf = self.message_factory_mut();
let msg = mf.transaction(to, method_num, params, value, gas_params)?;
let fut = self.perform(msg, decode_bytes);
let res = fut.await?;
Ok(res)
}
/// Deploy a FEVM contract.
async fn fevm_create(
&mut self,
contract: Bytes,
constructor_args: Bytes,
value: TokenAmount,
gas_params: GasParams,
) -> anyhow::Result<M::Response<CreateReturn>> {
let mf = self.message_factory_mut();
let msg = mf.fevm_create(contract, constructor_args, value, gas_params)?;
let fut = self.perform(msg, decode_fevm_create);
let res = fut.await?;
Ok(res)
}
/// Invoke a method on a FEVM contract.
async fn fevm_invoke(
&mut self,
contract: Address,
calldata: Bytes,
value: TokenAmount,
gas_params: GasParams,
) -> anyhow::Result<M::Response<Vec<u8>>> {
let mf = self.message_factory_mut();
let msg = mf.fevm_invoke(contract, calldata, value, gas_params)?;
let fut = self.perform(msg, decode_fevm_invoke);
let res = fut.await?;
Ok(res)
}
async fn perform<F, T>(&self, msg: ChainMessage, f: F) -> anyhow::Result<M::Response<T>>
where
F: FnOnce(&ExecTxResult) -> anyhow::Result<T> + Sync + Send,
T: Sync + Send;
}
/// Convenience trait to call FEVM methods in read-only mode, without doing a transaction.
#[async_trait]
pub trait CallClient: QueryClient + BoundClient {
/// Call a method on a FEVM contract without including a transaction on the blockchain.
async fn fevm_call(
&mut self,
contract: Address,
calldata: Bytes,
value: TokenAmount,
gas_params: GasParams,
height: FvmQueryHeight,
) -> anyhow::Result<CallResponse<Vec<u8>>> {
let msg = self
.message_factory_mut()
.fevm_call(contract, calldata, value, gas_params)?;
let response = self.call(msg, height).await?;
let return_data = if response.value.code.is_err() {
None
} else {
let return_data = decode_fevm_invoke(&response.value)
.context("error decoding data from deliver_tx in query")?;
Some(return_data)
};
let response = CallResponse {
response,
return_data,
};
Ok(response)
}
/// Estimate the gas limit of a FEVM call.
async fn fevm_estimate_gas(
&mut self,
contract: Address,
calldata: Bytes,
value: TokenAmount,
gas_params: GasParams,
height: FvmQueryHeight,
) -> anyhow::Result<QueryResponse<GasEstimate>> {
let msg = self
.message_factory_mut()
.fevm_call(contract, calldata, value, gas_params)?;
self.estimate_gas(msg, height).await
}
}
/// Auto-implement this trait for anything that satisfies the bounds.
impl<C> CallClient for C where C: QueryClient + BoundClient + Send + Sync {}
/// Return immediately after the transaction is broadcasted without waiting for check results.
pub struct TxAsync;
/// Wait for the check results before returning from broadcast.
pub struct TxSync;
/// Wait for the delivery results before returning from broadcast.
pub struct TxCommit;
pub struct AsyncResponse<T> {
/// Response from Tendermint.
pub response: tx_async::Response,
pub return_data: PhantomData<T>,
}
pub struct SyncResponse<T> {
/// Response from Tendermint.
pub response: tx_sync::Response,
pub return_data: PhantomData<T>,
}
pub struct CommitResponse<T> {
/// Response from Tendermint.
pub response: tx_commit::Response,
/// Parsed return data, if the response indicates success.
pub return_data: Option<T>,
}
pub struct CallResponse<T> {
/// Response from Tendermint.
pub response: QueryResponse<tendermint::abci::types::ExecTxResult>,
/// Parsed return data, if the response indicates success.
pub return_data: Option<T>,
}
impl BroadcastMode for TxAsync {
type Response<T> = AsyncResponse<T>;
}
impl BroadcastMode for TxSync {
type Response<T> = SyncResponse<T>;
}
impl BroadcastMode for TxCommit {
type Response<T> = CommitResponse<T>;
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/rpc/src/client.rs | fendermint/rpc/src/client.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::fmt::Display;
use std::marker::PhantomData;
use anyhow::{anyhow, Context};
use async_trait::async_trait;
use fendermint_vm_message::chain::ChainMessage;
use tendermint::abci::types::ExecTxResult;
use tendermint::block::Height;
use tendermint_rpc::{endpoint::abci_query::AbciQuery, Client, HttpClient, Scheme, Url};
use tendermint_rpc::{WebSocketClient, WebSocketClientDriver, WebSocketClientUrl};
use fendermint_vm_message::query::{FvmQuery, FvmQueryHeight};
use crate::message::SignedMessageFactory;
use crate::query::QueryClient;
use crate::tx::{
AsyncResponse, BoundClient, CommitResponse, SyncResponse, TxAsync, TxClient, TxCommit, TxSync,
};
// Retrieve the proxy URL with precedence:
// 1. If supplied, that's the proxy URL used.
// 2. If not supplied, but environment variable HTTP_PROXY or HTTPS_PROXY are
// supplied, then use the appropriate variable for the URL in question.
//
// Copied from `tendermint_rpc`.
fn get_http_proxy_url(url_scheme: Scheme, proxy_url: Option<Url>) -> anyhow::Result<Option<Url>> {
match proxy_url {
Some(u) => Ok(Some(u)),
None => match url_scheme {
Scheme::Http => std::env::var("HTTP_PROXY").ok(),
Scheme::Https => std::env::var("HTTPS_PROXY")
.ok()
.or_else(|| std::env::var("HTTP_PROXY").ok()),
_ => {
if std::env::var("HTTP_PROXY").is_ok() || std::env::var("HTTPS_PROXY").is_ok() {
tracing::warn!(
"Ignoring HTTP proxy environment variables for non-HTTP client connection"
);
}
None
}
}
.map(|u| u.parse::<Url>().map_err(|e| anyhow!(e)))
.transpose(),
}
}
/// Create a Tendermint HTTP client.
pub fn http_client(url: Url, proxy_url: Option<Url>) -> anyhow::Result<HttpClient> {
let proxy_url = get_http_proxy_url(url.scheme(), proxy_url)?;
let client = match proxy_url {
Some(proxy_url) => {
tracing::debug!(
"Using HTTP client with proxy {} to submit request to {}",
proxy_url,
url
);
HttpClient::new_with_proxy(url, proxy_url)?
}
None => {
tracing::debug!("Using HTTP client to submit request to: {}", url);
HttpClient::new(url)?
}
};
Ok(client)
}
/// Create a Tendermint WebSocket client.
///
/// The caller must start the driver in a background task.
pub async fn ws_client<U>(url: U) -> anyhow::Result<(WebSocketClient, WebSocketClientDriver)>
where
U: TryInto<WebSocketClientUrl, Error = tendermint_rpc::Error> + Display + Clone,
{
// TODO: Doesn't handle proxy.
tracing::debug!("Using WS client to submit request to: {}", url);
let (client, driver) = WebSocketClient::new(url.clone())
.await
.with_context(|| format!("failed to create WS client to: {}", url))?;
Ok((client, driver))
}
/// Unauthenticated Fendermint client.
#[derive(Clone)]
pub struct FendermintClient<C = HttpClient> {
inner: C,
}
impl<C> FendermintClient<C> {
pub fn new(inner: C) -> Self {
Self { inner }
}
/// Attach a message factory to the client.
pub fn bind(self, message_factory: SignedMessageFactory) -> BoundFendermintClient<C> {
BoundFendermintClient::new(self.inner, message_factory)
}
}
impl FendermintClient<HttpClient> {
pub fn new_http(url: Url, proxy_url: Option<Url>) -> anyhow::Result<Self> {
let inner = http_client(url, proxy_url)?;
Ok(Self { inner })
}
}
/// Get to the underlying Tendermint client if necessary, for example to query the state of transactions.
pub trait TendermintClient<C> {
/// The underlying Tendermint client.
fn underlying(&self) -> &C;
fn into_underlying(self) -> C;
}
impl<C> TendermintClient<C> for FendermintClient<C> {
fn underlying(&self) -> &C {
&self.inner
}
fn into_underlying(self) -> C {
self.inner
}
}
#[async_trait]
impl<C> QueryClient for FendermintClient<C>
where
C: Client + Sync + Send,
{
async fn perform(&self, query: FvmQuery, height: FvmQueryHeight) -> anyhow::Result<AbciQuery> {
perform_query(&self.inner, query, height).await
}
}
/// Fendermint client capable of signing transactions.
pub struct BoundFendermintClient<C = HttpClient> {
inner: C,
message_factory: SignedMessageFactory,
}
impl<C> BoundFendermintClient<C> {
pub fn new(inner: C, message_factory: SignedMessageFactory) -> Self {
Self {
inner,
message_factory,
}
}
}
impl<C> BoundClient for BoundFendermintClient<C> {
fn message_factory_mut(&mut self) -> &mut SignedMessageFactory {
&mut self.message_factory
}
}
impl<C> TendermintClient<C> for BoundFendermintClient<C> {
fn underlying(&self) -> &C {
&self.inner
}
fn into_underlying(self) -> C {
self.inner
}
}
#[async_trait]
impl<C> QueryClient for BoundFendermintClient<C>
where
C: Client + Sync + Send,
{
async fn perform(&self, query: FvmQuery, height: FvmQueryHeight) -> anyhow::Result<AbciQuery> {
perform_query(&self.inner, query, height).await
}
}
#[async_trait]
impl<C> TxClient<TxAsync> for BoundFendermintClient<C>
where
C: Client + Sync + Send,
{
async fn perform<F, T>(&self, msg: ChainMessage, _f: F) -> anyhow::Result<AsyncResponse<T>>
where
F: FnOnce(&ExecTxResult) -> anyhow::Result<T> + Sync + Send,
{
let data = SignedMessageFactory::serialize(&msg)?;
let response = self
.inner
.broadcast_tx_async(data)
.await
.context("broadcast_tx_async failed")?;
let response = AsyncResponse {
response,
return_data: PhantomData,
};
Ok(response)
}
}
#[async_trait]
impl<C> TxClient<TxSync> for BoundFendermintClient<C>
where
C: Client + Sync + Send,
{
async fn perform<F, T>(
&self,
msg: ChainMessage,
_f: F,
) -> anyhow::Result<crate::tx::SyncResponse<T>>
where
F: FnOnce(&ExecTxResult) -> anyhow::Result<T> + Sync + Send,
{
let data = SignedMessageFactory::serialize(&msg)?;
let response = self
.inner
.broadcast_tx_sync(data)
.await
.context("broadcast_tx_sync failed")?;
let response = SyncResponse {
response,
return_data: PhantomData,
};
Ok(response)
}
}
#[async_trait]
impl<C> TxClient<TxCommit> for BoundFendermintClient<C>
where
C: Client + Sync + Send,
{
async fn perform<F, T>(
&self,
msg: ChainMessage,
f: F,
) -> anyhow::Result<crate::tx::CommitResponse<T>>
where
F: FnOnce(&ExecTxResult) -> anyhow::Result<T> + Sync + Send,
{
let data = SignedMessageFactory::serialize(&msg)?;
let response = self
.inner
.broadcast_tx_commit(data)
.await
.context("broadcast_tx_commit failed")?;
// We have a fully `DeliverTx` with default fields even if `CheckTx` indicates failure.
let return_data = if response.check_tx.code.is_err() || response.tx_result.code.is_err() {
None
} else {
let return_data =
f(&response.tx_result).context("error decoding data from deliver_tx in commit")?;
Some(return_data)
};
let response = CommitResponse {
response,
return_data,
};
Ok(response)
}
}
async fn perform_query<C>(
client: &C,
query: FvmQuery,
height: FvmQueryHeight,
) -> anyhow::Result<AbciQuery>
where
C: Client + Sync + Send,
{
tracing::debug!(?query, ?height, "perform ABCI query");
let data = fvm_ipld_encoding::to_vec(&query).context("failed to encode query")?;
let height: u64 = height.into();
let height = Height::try_from(height).context("failed to conver to Height")?;
// This is how we'd call it, but here we're trying to debug what's going on using
// the `perform` method below with a request that prints the response if it fails
// to deserialize for any reason.
// let res = client
// .abci_query(None, data, Some(height), false)
// .await
// .context("abci query failed")?;
let req = tendermint_rpc::endpoint::abci_query::Request::new(None, data, Some(height), false);
let res = client
.perform(debug::DebugRequest(req))
.await
.context("abci query failed")?
.0
.response;
Ok(res)
}
mod debug {
use serde::{Deserialize, Serialize};
use tendermint_rpc as trpc;
#[derive(Serialize, Deserialize)]
#[serde(transparent)]
pub struct DebugRequest<R>(pub R);
#[derive(Serialize, Deserialize)]
pub struct DebugResponse<R>(pub R);
impl<R> trpc::Request for DebugRequest<R>
where
R: trpc::Request,
{
type Response = DebugResponse<R::Response>;
}
impl<R> trpc::request::RequestMessage for DebugRequest<R>
where
R: trpc::request::RequestMessage,
{
fn method(&self) -> trpc::Method {
self.0.method()
}
}
impl<R> trpc::SimpleRequest for DebugRequest<R>
where
R: trpc::SimpleRequest,
{
type Output = Self::Response;
}
impl<R> trpc::Response for DebugResponse<R>
where
R: trpc::Response,
{
fn from_string(response: impl AsRef<[u8]>) -> Result<Self, trpc::Error> {
let wrapper: Result<trpc::response::Wrapper<Self>, trpc::Error> =
serde_json::from_slice(response.as_ref()).map_err(trpc::Error::serde);
let response_body = || String::from_utf8_lossy(response.as_ref()).to_string();
match wrapper {
Err(e) => {
tracing::error!(
error = e.to_string(),
response = response_body(),
"failed to parse JSON-RPC response"
);
Err(e)
}
Ok(wrapper) => match wrapper.into_result() {
Err(e) => {
tracing::error!(
error = e.to_string(),
response = response_body(),
"error from JSON-RPC"
);
Err(e)
}
Ok(response) => Ok(response),
},
}
}
fn from_reader(reader: impl std::io::prelude::Read) -> Result<Self, trpc::Error> {
let wrapper: trpc::response::Wrapper<Self> =
serde_json::from_reader(reader).map_err(trpc::Error::serde)?;
wrapper.into_result()
}
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/rpc/src/query.rs | fendermint/rpc/src/query.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use anyhow::{anyhow, Context};
use async_trait::async_trait;
use fvm_ipld_encoding::serde::Serialize;
use fvm_shared::message::Message;
use prost::Message as ProstMessage;
use tendermint::abci::types::ExecTxResult;
use tendermint::block::Height;
use tendermint_rpc::endpoint::abci_query::AbciQuery;
use cid::Cid;
use fvm_shared::ActorID;
use fvm_shared::{address::Address, error::ExitCode};
use fendermint_vm_message::query::{
ActorState, BuiltinActors, FvmQuery, FvmQueryHeight, GasEstimate, StateParams,
};
use crate::response::encode_data;
#[derive(Serialize, Debug, Clone)]
/// The parsed value from a query, along with the height at which the query was performed.
pub struct QueryResponse<T> {
pub height: Height,
pub value: T,
}
/// Fendermint client for submitting queries.
#[async_trait]
pub trait QueryClient: Sync {
/// Query the contents of a CID from the IPLD store.
async fn ipld(&self, cid: &Cid, height: FvmQueryHeight) -> anyhow::Result<Option<Vec<u8>>> {
let res = self
.perform(FvmQuery::Ipld(*cid), height)
.await
.context("ipld query failed")?;
extract_opt(res, |res| Ok(res.value))
}
/// Query the the state of an actor.
async fn actor_state(
&self,
address: &Address,
height: FvmQueryHeight,
) -> anyhow::Result<QueryResponse<Option<(ActorID, ActorState)>>> {
let res = self
.perform(FvmQuery::ActorState(*address), height)
.await
.context("actor state query failed")?;
let height = res.height;
let value = extract_actor_state(res)?;
Ok(QueryResponse { height, value })
}
/// Run a message in a read-only fashion.
async fn call(
&self,
message: Message,
height: FvmQueryHeight,
) -> anyhow::Result<QueryResponse<ExecTxResult>> {
let res = self
.perform(FvmQuery::Call(Box::new(message)), height)
.await
.context("call query failed")?;
let height = res.height;
let value = extract(res, parse_exec_tx_result)?;
Ok(QueryResponse { height, value })
}
/// Estimate the gas limit of a message.
async fn estimate_gas(
&self,
mut message: Message,
height: FvmQueryHeight,
) -> anyhow::Result<QueryResponse<GasEstimate>> {
// Using 0 sequence so estimation doesn't get tripped over by nonce mismatch.
message.sequence = 0;
let res = self
.perform(FvmQuery::EstimateGas(Box::new(message)), height)
.await
.context("estimate gas query failed")?;
let height = res.height;
let value = extract(res, |res| {
fvm_ipld_encoding::from_slice(&res.value)
.context("failed to decode GasEstimate from query")
})?;
Ok(QueryResponse { height, value })
}
/// Slowly changing state parameters.
async fn state_params(
&self,
height: FvmQueryHeight,
) -> anyhow::Result<QueryResponse<StateParams>> {
let res = self
.perform(FvmQuery::StateParams, height)
.await
.context("state params query failed")?;
let height = res.height;
let value = extract(res, |res| {
fvm_ipld_encoding::from_slice(&res.value)
.context("failed to decode StateParams from query")
})?;
Ok(QueryResponse { height, value })
}
/// Queries the built-in actors known by the System actor.
async fn builtin_actors(
&self,
height: FvmQueryHeight,
) -> anyhow::Result<QueryResponse<BuiltinActors>> {
let res = self
.perform(FvmQuery::BuiltinActors, height)
.await
.context("builtin actors query failed")?;
let height = res.height;
let value = {
let registry: Vec<(String, Cid)> = extract(res, |res| {
fvm_ipld_encoding::from_slice(&res.value)
.context("failed to decode BuiltinActors from query")
})?;
BuiltinActors { registry }
};
Ok(QueryResponse { height, value })
}
/// Run an ABCI query.
async fn perform(&self, query: FvmQuery, height: FvmQueryHeight) -> anyhow::Result<AbciQuery>;
}
/// Extract some value from the query result, unless it's not found or other error.
fn extract_opt<T, F>(res: AbciQuery, f: F) -> anyhow::Result<Option<T>>
where
F: FnOnce(AbciQuery) -> anyhow::Result<T>,
{
if is_not_found(&res) {
Ok(None)
} else {
extract(res, f).map(Some)
}
}
/// Extract some value from the query result, unless there was an error.
fn extract<T, F>(res: AbciQuery, f: F) -> anyhow::Result<T>
where
F: FnOnce(AbciQuery) -> anyhow::Result<T>,
{
if res.code.is_err() {
Err(anyhow!(
"query returned non-zero exit code: {}",
res.code.value()
))
} else {
f(res)
}
}
fn extract_actor_state(res: AbciQuery) -> anyhow::Result<Option<(ActorID, ActorState)>> {
extract_opt(res, |res| {
let state: ActorState =
fvm_ipld_encoding::from_slice(&res.value).context("failed to decode state")?;
let id: ActorID = fvm_ipld_encoding::from_slice(&res.key).context("failed to decode ID")?;
Ok((id, state))
})
}
fn is_not_found(res: &AbciQuery) -> bool {
res.code.value() == ExitCode::USR_NOT_FOUND.value()
}
fn parse_exec_tx_result(res: AbciQuery) -> anyhow::Result<ExecTxResult> {
let bz: Vec<u8> =
fvm_ipld_encoding::from_slice(&res.value).context("failed to decode IPLD as bytes")?;
let deliver_tx = tendermint_proto::abci::ExecTxResult::decode(bz.as_ref())
.context("failed to deserialize ResponseDeliverTx from proto bytes")?;
let mut deliver_tx = tendermint::abci::types::ExecTxResult::try_from(deliver_tx)
.context("failed to create DeliverTx from proto response")?;
// Mimic the Base64 encoding of the value that Tendermint does.
deliver_tx.data = encode_data(&deliver_tx.data);
Ok(deliver_tx)
}
#[cfg(test)]
mod tests {
use tendermint_rpc::endpoint::abci_query::AbciQuery;
use crate::response::decode_fevm_invoke;
use super::parse_exec_tx_result;
#[test]
fn parse_call_query_response() {
// Value extracted from a log captured in an issue.
let response = "{\"code\":0,\"log\":\"\",\"info\":\"\",\"index\":\"0\",\"key\":null,\"value\":\"mNwIGCESARhAGCIYVxhtGGUYcxhzGGEYZxhlGCAYZhhhGGkYbBhlGGQYIBh3GGkYdBhoGCAYYhhhGGMYaxh0GHIYYRhjGGUYOgoYMBgwGDoYIBh0GDAYMRgxGDkYIBgoGG0YZRh0GGgYbxhkGCAYMxg4GDQYNBg0GDUYMBg4GDMYNxgpGCAYLRgtGCAYYxhvGG4YdBhyGGEYYxh0GCAYchhlGHYYZRhyGHQYZRhkGCAYKBgzGDMYKQoYMBiuGK0YpAEYOhh3CgcYbRhlGHMYcxhhGGcYZRIYNgoEGGYYchhvGG0SGCwYdBg0GDEYMBhmGGEYYRhhGGEYYRhhGGEYYRhhGGEYYRhhGGEYYRhhGGEYYRhhGGEYYRhhGGEYYRhhGGEYYRhhGGEYYRhhGGEYYRhvGG4YYxg2GGkYahhpGBgBEhg0CgIYdBhvEhgsGHQYNBgxGDAYZhg3GG8YNhh3GHYYNBhtGGgYaRg2GG0YdRgzGHgYZhhpGGYYdhhmGGcYbxhyGGIYYRhtGDUYbhhwGGcYbBhpGG0YNBhkGHkYdRh2GGkYaRgYAQ==\",\"proofOps\":null,\"height\":\"6148\",\"codespace\":\"\"}";
let query = serde_json::from_str::<AbciQuery>(response).expect("failed to parse AbciQuery");
let deliver_tx = parse_exec_tx_result(query).expect("failed to parse DeliverTx");
let return_data = decode_fevm_invoke(&deliver_tx).expect("failed to decode return data");
assert!(deliver_tx.code.is_err());
assert!(deliver_tx.info == "message failed with backtrace:\n00: t0119 (method 3844450837) -- contract reverted (33)\n");
assert!(return_data.is_empty(), "this error had no revert data");
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/rpc/src/message.rs | fendermint/rpc/src/message.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::path::Path;
use anyhow::Context;
use base64::Engine;
use bytes::Bytes;
use fendermint_crypto::SecretKey;
use fendermint_vm_actor_interface::{eam, evm};
use fendermint_vm_message::{chain::ChainMessage, signed::SignedMessage};
use fvm_ipld_encoding::{BytesSer, RawBytes};
use fvm_shared::{
address::Address, chainid::ChainID, econ::TokenAmount, message::Message, MethodNum, METHOD_SEND,
};
use crate::B64_ENGINE;
/// Factory methods for transaction payload construction.
///
/// It assumes the sender is an `f1` type address, it won't work with `f410` addresses.
/// For those one must use the Ethereum API, with a suitable client library such as [ethers].
pub struct MessageFactory {
addr: Address,
sequence: u64,
}
impl MessageFactory {
pub fn new(addr: Address, sequence: u64) -> Self {
Self { addr, sequence }
}
pub fn address(&self) -> &Address {
&self.addr
}
/// Set the sequence to an arbitrary value.
pub fn set_sequence(&mut self, sequence: u64) {
self.sequence = sequence;
}
pub fn transaction(
&mut self,
to: Address,
method_num: MethodNum,
params: RawBytes,
value: TokenAmount,
gas_params: GasParams,
) -> Message {
let msg = Message {
version: Default::default(), // TODO: What does this do?
from: self.addr,
to,
sequence: self.sequence,
value,
method_num,
params,
gas_limit: gas_params.gas_limit,
gas_fee_cap: gas_params.gas_fee_cap,
gas_premium: gas_params.gas_premium,
};
self.sequence += 1;
msg
}
pub fn fevm_create(
&mut self,
contract: Bytes,
constructor_args: Bytes,
value: TokenAmount,
gas_params: GasParams,
) -> anyhow::Result<Message> {
let initcode = [contract.to_vec(), constructor_args.to_vec()].concat();
let initcode = RawBytes::serialize(BytesSer(&initcode))?;
Ok(self.transaction(
eam::EAM_ACTOR_ADDR,
eam::Method::CreateExternal as u64,
initcode,
value,
gas_params,
))
}
pub fn fevm_invoke(
&mut self,
contract: Address,
calldata: Bytes,
value: TokenAmount,
gas_params: GasParams,
) -> anyhow::Result<Message> {
let calldata = RawBytes::serialize(BytesSer(&calldata))?;
Ok(self.transaction(
contract,
evm::Method::InvokeContract as u64,
calldata,
value,
gas_params,
))
}
pub fn fevm_call(
&mut self,
contract: Address,
calldata: Bytes,
value: TokenAmount,
gas_params: GasParams,
) -> anyhow::Result<Message> {
let msg = self.fevm_invoke(contract, calldata, value, gas_params)?;
// Roll back the sequence, we don't really want to invoke anything.
self.set_sequence(msg.sequence);
Ok(msg)
}
}
/// Wrapper for MessageFactory which generates signed messages
///
/// It assumes the sender is an `f1` type address, it won't work with `f410` addresses.
/// For those one must use the Ethereum API, with a suitable client library such as [ethers].
pub struct SignedMessageFactory {
inner: MessageFactory,
sk: SecretKey,
chain_id: ChainID,
}
impl SignedMessageFactory {
/// Create a factor from a secret key and its corresponding address, which could be a delegated one.
pub fn new(sk: SecretKey, addr: Address, sequence: u64, chain_id: ChainID) -> Self {
Self {
inner: MessageFactory::new(addr, sequence),
sk,
chain_id,
}
}
/// Treat the secret key as an f1 type account.
pub fn new_secp256k1(sk: SecretKey, sequence: u64, chain_id: ChainID) -> Self {
let pk = sk.public_key();
let addr = Address::new_secp256k1(&pk.serialize()).expect("public key is 65 bytes");
Self::new(sk, addr, sequence, chain_id)
}
/// Convenience method to read the secret key from a file, expected to be in Base64 format.
pub fn read_secret_key(sk: &Path) -> anyhow::Result<SecretKey> {
let b64 = std::fs::read_to_string(sk).context("failed to read secret key")?;
let bz: Vec<u8> = B64_ENGINE
.decode(b64)
.context("failed to parse base64 string")?;
let sk = SecretKey::try_from(bz)?;
Ok(sk)
}
/// Convenience method to serialize a [`ChainMessage`] for inclusion in a Tendermint transaction.
pub fn serialize(message: &ChainMessage) -> anyhow::Result<Vec<u8>> {
Ok(fvm_ipld_encoding::to_vec(message)?)
}
/// Actor address.
pub fn address(&self) -> &Address {
self.inner.address()
}
/// Transfer tokens to another account.
pub fn transfer(
&mut self,
to: Address,
value: TokenAmount,
gas_params: GasParams,
) -> anyhow::Result<ChainMessage> {
self.transaction(to, METHOD_SEND, Default::default(), value, gas_params)
}
/// Send a message to an actor.
pub fn transaction(
&mut self,
to: Address,
method_num: MethodNum,
params: RawBytes,
value: TokenAmount,
gas_params: GasParams,
) -> anyhow::Result<ChainMessage> {
let message = self
.inner
.transaction(to, method_num, params, value, gas_params);
let signed = SignedMessage::new_secp256k1(message, &self.sk, &self.chain_id)?;
let chain = ChainMessage::Signed(signed);
Ok(chain)
}
/// Deploy a FEVM contract.
pub fn fevm_create(
&mut self,
contract: Bytes,
constructor_args: Bytes,
value: TokenAmount,
gas_params: GasParams,
) -> anyhow::Result<ChainMessage> {
let initcode = [contract.to_vec(), constructor_args.to_vec()].concat();
let initcode = RawBytes::serialize(BytesSer(&initcode))?;
let message = self.transaction(
eam::EAM_ACTOR_ADDR,
eam::Method::CreateExternal as u64,
initcode,
value,
gas_params,
)?;
Ok(message)
}
/// Invoke a method on a FEVM contract.
pub fn fevm_invoke(
&mut self,
contract: Address,
calldata: Bytes,
value: TokenAmount,
gas_params: GasParams,
) -> anyhow::Result<ChainMessage> {
let calldata = RawBytes::serialize(BytesSer(&calldata))?;
let message = self.transaction(
contract,
evm::Method::InvokeContract as u64,
calldata,
value,
gas_params,
)?;
Ok(message)
}
/// Create a message for a read-only operation.
pub fn fevm_call(
&mut self,
contract: Address,
calldata: Bytes,
value: TokenAmount,
gas_params: GasParams,
) -> anyhow::Result<Message> {
let msg = self.fevm_invoke(contract, calldata, value, gas_params)?;
let msg = if let ChainMessage::Signed(signed) = msg {
signed.into_message()
} else {
panic!("unexpected message type: {msg:?}");
};
// Roll back the sequence, we don't really want to invoke anything.
self.inner.set_sequence(msg.sequence);
Ok(msg)
}
}
#[derive(Clone, Debug)]
pub struct GasParams {
/// Maximum amount of gas that can be charged.
pub gas_limit: u64,
/// Price of gas.
///
/// Any discrepancy between this and the base fee is paid for
/// by the validator who puts the transaction into the block.
pub gas_fee_cap: TokenAmount,
/// Gas premium.
pub gas_premium: TokenAmount,
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/rpc/examples/transfer.rs | fendermint/rpc/examples/transfer.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! Example of using the RPC library to send tokens from an f410 account to an f1 account.
//!
//! The example assumes that Tendermint and Fendermint have been started
//! and are running locally.
//!
//! # Usage
//! ```text
//! cargo run -p fendermint_rpc --release --example transfer -- --secret-key test-network/keys/eric.sk --verbose
//! ```
use std::path::PathBuf;
use anyhow::{anyhow, Context};
use clap::Parser;
use fendermint_rpc::query::QueryClient;
use fendermint_vm_actor_interface::eam::EthAddress;
use fendermint_vm_message::query::FvmQueryHeight;
use fvm_shared::address::Address;
use fvm_shared::chainid::ChainID;
use lazy_static::lazy_static;
use tendermint_rpc::Url;
use tracing::Level;
use fvm_shared::econ::TokenAmount;
use fendermint_rpc::client::FendermintClient;
use fendermint_rpc::message::{GasParams, SignedMessageFactory};
use fendermint_rpc::tx::{TxClient, TxCommit};
lazy_static! {
/// Default gas params based on the testkit.
static ref GAS_PARAMS: GasParams = GasParams {
gas_limit: 10_000_000_000,
gas_fee_cap: TokenAmount::default(),
gas_premium: TokenAmount::default(),
};
}
#[derive(Parser, Debug)]
pub struct Options {
/// The URL of the Tendermint node's RPC endpoint.
#[arg(
long,
short,
default_value = "http://127.0.0.1:26657",
env = "TENDERMINT_RPC_URL"
)]
pub url: Url,
/// Enable DEBUG logs.
#[arg(long, short)]
pub verbose: bool,
/// Path to the secret key to deploy with, expected to be in Base64 format,
/// and that it has a corresponding f410 account in genesis.
#[arg(long, short)]
pub secret_key: PathBuf,
}
impl Options {
pub fn log_level(&self) -> Level {
if self.verbose {
Level::DEBUG
} else {
Level::INFO
}
}
}
/// See the module docs for how to run.
#[tokio::main]
async fn main() {
let opts: Options = Options::parse();
tracing_subscriber::fmt()
.with_max_level(opts.log_level())
.init();
let client = FendermintClient::new_http(opts.url, None).expect("error creating client");
let sk =
SignedMessageFactory::read_secret_key(&opts.secret_key).expect("error reading secret key");
let pk = sk.public_key();
let f1_addr = Address::new_secp256k1(&pk.serialize()).expect("valid public key");
let f410_addr = Address::from(EthAddress::from(pk));
// Query the account nonce from the state, so it doesn't need to be passed as an arg.
let sn = sequence(&client, &f410_addr)
.await
.expect("error getting sequence");
// Query the chain ID, so it doesn't need to be passed as an arg.
let chain_id = client
.state_params(FvmQueryHeight::default())
.await
.expect("error getting state params")
.value
.chain_id;
let mf = SignedMessageFactory::new(sk, f410_addr, sn, ChainID::from(chain_id));
let mut client = client.bind(mf);
let res = TxClient::<TxCommit>::transfer(
&mut client,
f1_addr,
TokenAmount::from_whole(1),
GAS_PARAMS.clone(),
)
.await
.expect("transfer failed");
assert!(res.response.check_tx.code.is_ok(), "check is ok");
assert!(res.response.tx_result.code.is_ok(), "deliver is ok");
assert!(res.return_data.is_some());
}
/// Get the next sequence number (nonce) of an account.
async fn sequence(client: &impl QueryClient, addr: &Address) -> anyhow::Result<u64> {
let state = client
.actor_state(&addr, FvmQueryHeight::default())
.await
.context("failed to get actor state")?;
match state.value {
Some((_id, state)) => Ok(state.sequence),
None => Err(anyhow!("cannot find actor {addr}")),
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/rpc/examples/simplecoin.rs | fendermint/rpc/examples/simplecoin.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! Example of using the RPC library in combination with ethers abigen
//! to programmatically deploy and call a contract.
//!
//! The example assumes that Tendermint and Fendermint have been started
//! and are running locally.
//!
//! # Usage
//! ```text
//! cargo run -p fendermint_rpc --release --example simplecoin -- --secret-key test-network/keys/alice.sk --verbose
//! ```
use std::path::PathBuf;
use anyhow::{anyhow, Context};
use bytes::Bytes;
use clap::Parser;
use ethers::abi::Tokenizable;
use ethers::prelude::{abigen, decode_function_data};
use ethers::types::{H160, U256};
use fendermint_crypto::SecretKey;
use fendermint_rpc::query::QueryClient;
use fendermint_vm_actor_interface::eam::{self, CreateReturn, EthAddress};
use fendermint_vm_message::query::FvmQueryHeight;
use fvm_shared::address::Address;
use fvm_shared::chainid::ChainID;
use lazy_static::lazy_static;
use tendermint_rpc::Url;
use tracing::Level;
use fvm_shared::econ::TokenAmount;
use fendermint_rpc::client::FendermintClient;
use fendermint_rpc::message::{GasParams, SignedMessageFactory};
use fendermint_rpc::tx::{CallClient, TxClient, TxCommit};
type MockProvider = ethers::providers::Provider<ethers::providers::MockProvider>;
type MockContractCall<T> = ethers::prelude::ContractCall<MockProvider, T>;
// Generate a statically typed interface for the contract.
// This assumes the `builtin-actors` repo is checked in next to Fendermint,
// which the `make actor-bundle` command takes care of if it wasn't.
// This path starts from the root of this project, not this file.
abigen!(SimpleCoin, "../testing/contracts/SimpleCoin.abi");
const CONTRACT_HEX: &'static str = include_str!("../../testing/contracts/SimpleCoin.bin");
lazy_static! {
/// Default gas params based on the testkit.
static ref GAS_PARAMS: GasParams = GasParams {
gas_limit: 10_000_000_000,
gas_fee_cap: TokenAmount::default(),
gas_premium: TokenAmount::default(),
};
}
// Alternatively we can generate the ABI code as follows:
// ```
// ethers::prelude::Abigen::new("SimpleCoin", <path-to-abi>)
// .unwrap()
// .generate()
// .unwrap()
// .write_to_file("./simplecoin.rs")
// .unwrap();
// ```
// This approach combined with `build.rs` was explored in https://github.com/filecoin-project/ref-fvm/pull/1507
#[derive(Parser, Debug)]
pub struct Options {
/// The URL of the Tendermint node's RPC endpoint.
#[arg(
long,
short,
default_value = "http://127.0.0.1:26657",
env = "TENDERMINT_RPC_URL"
)]
pub url: Url,
/// Enable DEBUG logs.
#[arg(long, short)]
pub verbose: bool,
/// Path to the secret key to deploy with, expected to be in Base64 format.
#[arg(long, short)]
pub secret_key: PathBuf,
}
impl Options {
pub fn log_level(&self) -> Level {
if self.verbose {
Level::DEBUG
} else {
Level::INFO
}
}
}
/// See the module docs for how to run.
#[tokio::main]
async fn main() {
let opts: Options = Options::parse();
tracing_subscriber::fmt()
.with_max_level(opts.log_level())
.init();
let client = FendermintClient::new_http(opts.url, None).expect("error creating client");
let sk =
SignedMessageFactory::read_secret_key(&opts.secret_key).expect("error reading secret key");
// Query the account nonce from the state, so it doesn't need to be passed as an arg.
let sn = sequence(&client, &sk)
.await
.expect("error getting sequence");
// Query the chain ID, so it doesn't need to be passed as an arg.
// We could the chain name using `client.underlying().genesis().await?.chain_id.as_str()` as well.
let chain_id = client
.state_params(FvmQueryHeight::default())
.await
.expect("error getting state params")
.value
.chain_id;
let mf = SignedMessageFactory::new_secp256k1(sk, sn, ChainID::from(chain_id));
let mut client = client.bind(mf);
run(&mut client).await.expect("failed to run example");
}
async fn run(
client: &mut (impl TxClient<TxCommit> + QueryClient + CallClient),
) -> anyhow::Result<()> {
let create_return = deploy_contract(client)
.await
.context("failed to deploy contract")?;
let contract_addr = create_return.delegated_address();
tracing::info!(
contract_address = contract_addr.to_string(),
actor_id = create_return.actor_id,
"contract deployed"
);
let owner_addr = client.address();
let owner_id = actor_id(client, &owner_addr)
.await
.context("failed to fetch owner ID")?;
let owner_eth_addr = EthAddress::from_id(owner_id);
let balance_call = get_balance(client, &create_return.eth_address, &owner_eth_addr, false)
.await
.context("failed to get balance with call")?;
let balance_tx = get_balance(client, &create_return.eth_address, &owner_eth_addr, true)
.await
.context("failed to get balance with tx")?;
assert_eq!(
balance_call, balance_tx,
"balance read with or without a transaction should be the same"
);
tracing::info!(
balance = format!("{}", balance_call),
owner_eth_addr = hex::encode(&owner_eth_addr.0),
"owner balance"
);
let _sufficient = send_coin(client, &create_return.eth_address, &owner_eth_addr, 100)
.await
.context("failed to send coin")?;
Ok(())
}
/// Get the next sequence number (nonce) of an account.
async fn sequence(client: &impl QueryClient, sk: &SecretKey) -> anyhow::Result<u64> {
let pk = sk.public_key();
let addr = Address::new_secp256k1(&pk.serialize()).unwrap();
let state = client
.actor_state(&addr, FvmQueryHeight::default())
.await
.context("failed to get actor state")?;
match state.value {
Some((_id, state)) => Ok(state.sequence),
None => Err(anyhow!("cannot find actor {addr}")),
}
}
async fn actor_id(client: &impl QueryClient, addr: &Address) -> anyhow::Result<u64> {
let state = client
.actor_state(addr, FvmQueryHeight::default())
.await
.context("failed to get actor state")?;
match state.value {
Some((id, _state)) => Ok(id),
None => Err(anyhow!("cannot find actor {addr}")),
}
}
/// Deploy SimpleCoin.
async fn deploy_contract(client: &mut impl TxClient<TxCommit>) -> anyhow::Result<CreateReturn> {
let contract = hex::decode(&CONTRACT_HEX).context("error parsing contract")?;
let res = client
.fevm_create(
Bytes::from(contract),
Bytes::default(),
TokenAmount::default(),
GAS_PARAMS.clone(),
)
.await
.context("error deploying contract")?;
tracing::info!(tx_hash = ?res.response.hash, "deployment transaction");
let ret = res.return_data.ok_or(anyhow!(
"no CreateReturn data; response was {:?}",
res.response
))?;
Ok(ret)
}
/// Invoke or call SimpleCoin to query the balance of an account.
async fn get_balance(
client: &mut (impl TxClient<TxCommit> + CallClient),
contract_eth_addr: &EthAddress,
owner_eth_addr: &EthAddress,
in_transaction: bool,
) -> anyhow::Result<ethers::types::U256> {
let contract = coin_contract(contract_eth_addr);
let owner_h160_addr = eth_addr_to_h160(owner_eth_addr);
let call = contract.get_balance(owner_h160_addr);
let balance = invoke_or_call_contract(client, contract_eth_addr, call, in_transaction)
.await
.context("failed to call contract")?;
Ok(balance)
}
/// Invoke or call SimpleCoin to send some coins to self.
async fn send_coin(
client: &mut (impl TxClient<TxCommit> + CallClient),
contract_eth_addr: &EthAddress,
owner_eth_addr: &EthAddress,
value: u32,
) -> anyhow::Result<bool> {
let contract = coin_contract(contract_eth_addr);
let owner_h160_addr = eth_addr_to_h160(owner_eth_addr);
let call = contract.send_coin(owner_h160_addr, U256::from(value));
let sufficient: bool = invoke_or_call_contract(client, contract_eth_addr, call, true)
.await
.context("failed to call contract")?;
Ok(sufficient)
}
/// Invoke FEVM through Tendermint with the calldata encoded by ethers, decoding the result into the expected type.
async fn invoke_or_call_contract<T: Tokenizable>(
client: &mut (impl TxClient<TxCommit> + CallClient),
contract_eth_addr: &EthAddress,
call: MockContractCall<T>,
in_transaction: bool,
) -> anyhow::Result<T> {
let calldata: ethers::types::Bytes = call
.calldata()
.expect("calldata should contain function and parameters");
let contract_addr = eth_addr_to_eam(contract_eth_addr);
// We can perform the read as a distributed transaction (if we don't trust any particular node to give the right answer),
// or we can send a query with the same message and get a result without involving a transaction.
let return_data = if in_transaction {
let res = client
.fevm_invoke(
contract_addr,
calldata.0,
TokenAmount::default(),
GAS_PARAMS.clone(),
)
.await
.context("failed to invoke FEVM")?;
tracing::info!(tx_hash = ?res.response.hash, "invoked transaction");
res.return_data
} else {
let res = client
.fevm_call(
contract_addr,
calldata.0,
TokenAmount::default(),
GAS_PARAMS.clone(),
FvmQueryHeight::default(),
)
.await
.context("failed to call FEVM")?;
res.return_data
};
let bytes = return_data.ok_or(anyhow!("the contract did not return any data"))?;
let res = decode_function_data(&call.function, bytes, false)
.context("error deserializing return data")?;
Ok(res)
}
/// Create an instance of the statically typed contract client.
fn coin_contract(contract_eth_addr: &EthAddress) -> SimpleCoin<MockProvider> {
// A dummy client that we don't intend to use to call the contract or send transactions.
let (client, _mock) = ethers::providers::Provider::mocked();
let contract_h160_addr = eth_addr_to_h160(contract_eth_addr);
let contract = SimpleCoin::new(contract_h160_addr, std::sync::Arc::new(client));
contract
}
fn eth_addr_to_h160(eth_addr: &EthAddress) -> H160 {
ethers::core::types::Address::from_slice(ð_addr.0)
}
fn eth_addr_to_eam(eth_addr: &EthAddress) -> Address {
Address::new_delegated(eam::EAM_ACTOR_ID, ð_addr.0)
.expect("ETH address to delegated should work")
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/actors/build.rs | fendermint/actors/build.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use fil_actor_bundler::Bundler;
use std::error::Error;
use std::io::{BufRead, BufReader};
use std::path::Path;
use std::process::{Command, Stdio};
use std::thread;
const ACTORS: &[&str] = &["chainmetadata", "eam", "cetf"];
const FILES_TO_WATCH: &[&str] = &["Cargo.toml", "src"];
fn main() -> Result<(), Box<dyn Error>> {
// Cargo executable location.
let cargo = std::env::var_os("CARGO").expect("no CARGO env var");
let out_dir = std::env::var_os("OUT_DIR")
.as_ref()
.map(Path::new)
.map(|p| p.join("bundle"))
.expect("no OUT_DIR env var");
println!("cargo:warning=out_dir: {:?}", &out_dir);
let manifest_path =
Path::new(&std::env::var_os("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR unset"))
.join("Cargo.toml");
for file in [FILES_TO_WATCH, ACTORS].concat() {
println!("cargo:rerun-if-changed={}", file);
}
// Cargo build command for all test_actors at once.
let mut cmd = Command::new(cargo);
cmd.arg("build")
.args(
ACTORS
.iter()
.map(|pkg| "-p=fendermint_actor_".to_owned() + pkg),
)
.arg("--target=wasm32-unknown-unknown")
.arg("--profile=wasm")
.arg("--features=fil-actor")
.arg(format!("--manifest-path={}", manifest_path.display()))
.stdout(Stdio::piped())
.stderr(Stdio::piped())
// We are supposed to only generate artifacts under OUT_DIR,
// so set OUT_DIR as the target directory for this build.
.env("CARGO_TARGET_DIR", &out_dir)
// As we are being called inside a build-script, this env variable is set. However, we set
// our own `RUSTFLAGS` and thus, we need to remove this. Otherwise cargo favors this
// env variable.
.env_remove("CARGO_ENCODED_RUSTFLAGS");
// Print out the command line we're about to run.
println!("cargo:warning=cmd={:?}", &cmd);
// Launch the command.
let mut child = cmd.spawn().expect("failed to launch cargo build");
// Pipe the output as cargo warnings. Unfortunately this is the only way to
// get cargo build to print the output.
let stdout = child.stdout.take().expect("no stdout");
let stderr = child.stderr.take().expect("no stderr");
let j1 = thread::spawn(move || {
for line in BufReader::new(stderr).lines() {
println!("cargo:warning={:?}", line.unwrap());
}
});
let j2 = thread::spawn(move || {
for line in BufReader::new(stdout).lines() {
println!("cargo:warning={:?}", line.unwrap());
}
});
j1.join().unwrap();
j2.join().unwrap();
let result = child.wait().expect("failed to wait for build to finish");
if !result.success() {
return Err("actor build failed".into());
}
// make sure the output dir exists
std::fs::create_dir_all("output")
.expect("failed to create output dir for the custom_actors_bundle.car file");
let dst = Path::new("output/custom_actors_bundle.car");
let mut bundler = Bundler::new(dst);
for (&pkg, id) in ACTORS.iter().zip(1u32..) {
let bytecode_path = Path::new(&out_dir)
.join("wasm32-unknown-unknown/wasm")
.join(format!("fendermint_actor_{}.wasm", pkg));
// This actor version doesn't force synthetic CIDs; it uses genuine
// content-addressed CIDs.
let forced_cid = None;
let cid = bundler
.add_from_file(id, pkg.to_owned(), forced_cid, &bytecode_path)
.unwrap_or_else(|err| {
panic!(
"failed to add file {:?} to bundle for actor {}: {}",
bytecode_path, id, err
)
});
println!(
"cargo:warning=added {} ({}) to bundle with CID {}",
pkg, id, cid
);
}
bundler.finish().expect("failed to finish bundle");
println!("cargo:warning=bundle={}", dst.display());
Ok(())
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/actors/src/lib.rs | fendermint/actors/src/lib.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
mod manifest;
pub use manifest::Manifest;
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/actors/src/manifest.rs | fendermint/actors/src/manifest.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use anyhow::{anyhow, Context};
use cid::Cid;
use fendermint_actor_cetf::CETF_ACTOR_NAME;
use fendermint_actor_chainmetadata::CHAINMETADATA_ACTOR_NAME;
use fendermint_actor_eam::IPC_EAM_ACTOR_NAME;
use fvm_ipld_blockstore::Blockstore;
use fvm_ipld_encoding::CborStore;
use std::collections::HashMap;
// array of required actors
pub const REQUIRED_ACTORS: &[&str] = &[
CHAINMETADATA_ACTOR_NAME,
IPC_EAM_ACTOR_NAME,
CETF_ACTOR_NAME,
];
/// A mapping of internal actor CIDs to their respective types.
pub struct Manifest {
code_by_name: HashMap<String, Cid>,
}
impl Manifest {
/// Load a manifest from the blockstore.
pub fn load<B: Blockstore>(bs: &B, root_cid: &Cid, ver: u32) -> anyhow::Result<Manifest> {
if ver != 1 {
return Err(anyhow!("unsupported manifest version {}", ver));
}
let vec: Vec<(String, Cid)> = match bs.get_cbor(root_cid)? {
Some(vec) => vec,
None => {
return Err(anyhow!("cannot find manifest root cid {}", root_cid));
}
};
Manifest::new(vec)
}
/// Construct a new manifest from actor name/cid tuples.
pub fn new(iter: impl IntoIterator<Item = (impl Into<String>, Cid)>) -> anyhow::Result<Self> {
let mut code_by_name = HashMap::new();
for (name, code_cid) in iter.into_iter() {
code_by_name.insert(name.into(), code_cid);
}
// loop over required actors and ensure they are present
for &name in REQUIRED_ACTORS.iter() {
let _ = code_by_name
.get(name)
.with_context(|| format!("manifest missing required actor {}", name))?;
}
Ok(Self { code_by_name })
}
pub fn code_by_name(&self, str: &str) -> Option<&Cid> {
self.code_by_name.get(str)
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/actors/cetf/src/lib.rs | fendermint/actors/cetf/src/lib.rs | // Copyright 2022-2024 Protocol Labs
// Copyright 2021-2023 BadBoi Labs
// SPDX-License-Identifier: Apache-2.0, MIT
#[cfg(feature = "fil-actor")]
mod actor;
mod shared;
pub mod state;
pub use shared::*;
pub use state::State;
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/actors/cetf/src/actor.rs | fendermint/actors/cetf/src/actor.rs | // Copyright 2022-2024 Protocol Labs
// Copyright 2021-2023 BadBoi Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use core::hash;
use crate::state::State;
use crate::AddSignedBlockHeightTagParams;
use crate::AddSignedTagParams;
use crate::AddValidatorParams;
use crate::{EnqueueTagParams, GetTagParams};
use crate::{Method, CETF_ACTOR_NAME};
use fil_actors_runtime::actor_dispatch;
use fil_actors_runtime::actor_error;
use fil_actors_runtime::builtin::singletons::SYSTEM_ACTOR_ADDR;
use fil_actors_runtime::runtime::{ActorCode, Runtime};
use fil_actors_runtime::ActorError;
use sha3::{Digest, Keccak256};
// Note for myself: trampoline initializes a logger if debug mode is enabled.
fil_actors_runtime::wasm_trampoline!(Actor);
pub struct Actor;
impl Actor {
/// Initialize the HAMT store for tags in the actor state
/// Callable only by the system actor at genesis
pub fn constructor(rt: &impl Runtime) -> Result<(), ActorError> {
rt.validate_immediate_caller_is(std::iter::once(&SYSTEM_ACTOR_ADDR))?;
log::info!("cetf actor constructor called");
let st = State::new(rt.store())?;
rt.create(&st)?;
Ok(())
}
pub fn echo(rt: &impl Runtime, _params: ()) -> Result<(), ActorError> {
rt.validate_immediate_caller_accept_any()?;
log::info!(
"echo called by {} from origin {}",
rt.message().caller(),
rt.message().origin()
);
Ok(())
}
/// Add a new tag to the state to be signed by the validators
/// Callable by anyone and designed to be called from Solidity contracts
pub fn enqueue_tag(rt: &impl Runtime, tag: EnqueueTagParams) -> Result<u64, ActorError> {
rt.validate_immediate_caller_accept_any()?;
let calling_contract = rt
.lookup_delegated_address(rt.message().caller().id().unwrap())
.ok_or(ActorError::assertion_failed(
"No delegated address for caller".to_string(),
))?;
let calling_eth_address = &calling_contract.payload_bytes()[1..];
assert!(calling_eth_address.len() == 20, "Invalid eth address length");
// hash together the calling address and the tag to create a unique identifier for the tag
let mut hashdata = Vec::new();
hashdata.extend_from_slice(&calling_eth_address);
hashdata.extend_from_slice(&tag.tag.0);
let mut signing_tag = [0x0_u8; 32];
signing_tag.copy_from_slice(&Keccak256::digest(hashdata));
log::info!(
"cetf actor enqueue_tag called by {} with tag {:?}. Resulting signing tag is {:?}",
hex::encode(calling_eth_address),
tag,
&signing_tag,
);
let scheduled_epoch = rt.transaction(|st: &mut State, rt| {
// +2 because the Validators sign the tag in the next epoch
// then it gets included into the block one more epoch after that
// Then in 1 MORE epoch, it should be available to be queried in VM
let scheduled_epoch = rt.curr_epoch() + 2;
if st.enabled {
// NOTE: use of epoch is intentional here. In fendermint the epoch is the block height
log::info!(
"Cetf actor enqueue_tag called by {} with tag {:?} for height: {}",
rt.message().caller(),
&signing_tag,
scheduled_epoch
);
st.add_tag_at_height(rt, &(scheduled_epoch as u64), &signing_tag.into())?;
} else {
log::info!("CETF actor is disabled. Not all validators have added their keys. No tag was enqueued.");
}
Ok(scheduled_epoch)
})?;
Ok(scheduled_epoch as u64)
}
pub fn get_tag(rt: &impl Runtime, params: GetTagParams) -> Result<(), ActorError> {
log::info!("get_tag called");
rt.validate_immediate_caller_accept_any()?;
let state: State = rt.state()?;
state.get_tag_at_height(rt.store(), ¶ms.height)?;
Ok(())
}
pub fn enable(rt: &impl Runtime) -> Result<(), ActorError> {
log::info!("enable called");
rt.validate_immediate_caller_is(std::iter::once(&SYSTEM_ACTOR_ADDR))?;
rt.transaction(|st: &mut State, _rt| {
st.enabled = true;
Ok(())
})?;
Ok(())
}
// TODO: Should be unused for now. Need to figure out the mechanics for validator set changes. Assume static.
pub fn disable(rt: &impl Runtime) -> Result<(), ActorError> {
log::info!("disable called");
rt.validate_immediate_caller_is(std::iter::once(&SYSTEM_ACTOR_ADDR))?;
rt.transaction(|st: &mut State, _rt| {
st.enabled = false;
Ok(())
})?;
Ok(())
}
// TODO: We should use message.sender instead of having the address as a parameter.
// Leaving this as is for now because its just easier to write scripts for testing because we can send from the same sender.
pub fn add_validator(rt: &impl Runtime, params: AddValidatorParams) -> Result<(), ActorError> {
log::info!(
"add_validator called with caller: {}",
rt.message().caller()
);
rt.validate_immediate_caller_accept_any()?;
rt.transaction(|st: &mut State, rt| {
st.add_validator(rt.store(), ¶ms.address, ¶ms.public_key)?;
Ok(())
})?;
Ok(())
}
pub fn add_signed_tag(rt: &impl Runtime, params: AddSignedTagParams) -> Result<(), ActorError> {
rt.validate_immediate_caller_is(std::iter::once(&SYSTEM_ACTOR_ADDR))?;
rt.transaction(|st: &mut State, rt| {
st.add_signed_tag_at_height(rt, ¶ms.height, ¶ms.signature)?;
Ok(())
})?;
Ok(())
}
// pub fn add_signed_blockheight_tag(
// rt: &impl Runtime,
// params: AddSignedBlockHeightTagParams,
// ) -> Result<(), ActorError> {
// rt.validate_immediate_caller_is(std::iter::once(&SYSTEM_ACTOR_ADDR))?;
// rt.transaction(|st: &mut State, rt| {
// st.add_signed_blockheight_tag_at_height(rt, ¶ms.height, ¶ms.signature)?;
// Ok(())
// })?;
// Ok(())
// }
}
impl ActorCode for Actor {
type Methods = Method;
fn name() -> &'static str {
CETF_ACTOR_NAME
}
actor_dispatch! {
Constructor => constructor,
Echo => echo,
EnqueueTag => enqueue_tag,
GetTag => get_tag,
Enable => enable,
AddValidator => add_validator,
Disable => disable,
AddSignedTag => add_signed_tag,
// AddSignedBlockHeightTag => add_signed_blockheight_tag,
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/actors/cetf/src/state.rs | fendermint/actors/cetf/src/state.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use crate::{BlockHeight, Tag};
use crate::{BlsPublicKey, BlsSignature};
use cid::Cid;
use fil_actors_runtime::actor_error;
use fil_actors_runtime::{runtime::Runtime, ActorError, Map2};
use fvm_ipld_blockstore::Blockstore;
use fvm_ipld_encoding::tuple::*;
use fvm_sdk::crypto::hash_into;
use fvm_shared::address::Address;
use fvm_shared::crypto::hash::SupportedHashes;
pub type TagMap<BS> = Map2<BS, BlockHeight, Tag>;
pub type ValidatorBlsPublicKeyMap<BS> = Map2<BS, Address, BlsPublicKey>;
pub type SignedHashedTagMap<BS> = Map2<BS, Tag, BlsSignature>;
pub use fil_actors_runtime::DEFAULT_HAMT_CONFIG;
#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone)]
pub struct State {
pub tag_map: Cid, // HAMT[BlockHeight] => Tag
pub validators: Cid, // HAMT[Address] => BlsPublicKey (Assumes static validator set)
pub enabled: bool,
pub signed_hashed_tags: Cid, // HAMT[HashedTag] => BlsSignature(bytes 96)
}
impl State {
pub fn new<BS: Blockstore>(store: &BS) -> Result<State, ActorError> {
let tag_map = TagMap::empty(store, DEFAULT_HAMT_CONFIG, "empty tag_map").flush()?;
let validators =
ValidatorBlsPublicKeyMap::empty(store, DEFAULT_HAMT_CONFIG, "empty validators")
.flush()?;
let signed_hashed_tags =
SignedHashedTagMap::empty(store, DEFAULT_HAMT_CONFIG, "empty signed_hashed_tags")
.flush()?;
Ok(State {
tag_map,
validators,
enabled: false,
signed_hashed_tags,
})
}
pub fn add_validator<BS: Blockstore>(
&mut self,
store: &BS,
address: &Address,
public_key: &BlsPublicKey,
) -> Result<(), ActorError> {
let mut validators = ValidatorBlsPublicKeyMap::load(
store,
&self.validators,
DEFAULT_HAMT_CONFIG,
"reading validators",
)?;
validators.set(address, public_key.clone())?;
self.validators = validators.flush()?;
Ok(())
}
pub fn add_tag_at_height(
&mut self,
rt: &impl Runtime,
height: &BlockHeight,
tag: &Tag,
) -> Result<(), ActorError> {
let mut tag_map = TagMap::load(
rt.store(),
&self.tag_map,
DEFAULT_HAMT_CONFIG,
"writing tag_map",
)?;
tag_map.set(&height, tag.clone())?;
self.tag_map = tag_map.flush()?;
log::info!(
"Scheduled Cetf Tag for height {}. Current FVM epoch: {}. Tag: {:?}",
height,
rt.curr_epoch(),
tag.0,
);
Ok(())
}
pub fn get_tag_at_height<BS: Blockstore>(
&self,
store: &BS,
height: &BlockHeight,
) -> Result<Option<Tag>, ActorError> {
let tag_map = TagMap::load(store, &self.tag_map, DEFAULT_HAMT_CONFIG, "reading tag_map")?;
Ok(tag_map.get(&height)?.copied())
}
pub fn get_validators_keymap<BS: Blockstore>(
&self,
store: BS,
) -> Result<ValidatorBlsPublicKeyMap<BS>, ActorError> {
ValidatorBlsPublicKeyMap::load(
store,
&self.validators,
DEFAULT_HAMT_CONFIG,
"reading validators",
)
}
pub fn add_signed_tag_at_height(
&mut self,
rt: &impl Runtime,
height: &BlockHeight,
signature: &BlsSignature,
) -> Result<(), ActorError> {
let tag = self
.get_tag_at_height(rt.store(), height)?
.ok_or_else(|| actor_error!(illegal_state, "Tag not found at height {}", height))?;
self.add_signed_and_hashed_tag(rt, tag, signature)?;
log::info!(
"Added Signed Cetf Tag into map at height {}. FVM epoch: {}.",
height,
rt.curr_epoch(),
);
log::trace!(
r#"Tag: {:?}
Signature: {:?}"#,
tag.0,
signature,
);
Ok(())
}
pub fn add_signed_blockheight_tag_at_height(
&mut self,
rt: &impl Runtime,
height: &BlockHeight,
signature: &BlsSignature,
) -> Result<(), ActorError> {
let pre = height.to_be_bytes().to_vec();
let mut digest = [0u8; 32];
hash_into(SupportedHashes::Sha2_256, &pre, &mut digest);
self.add_signed_and_hashed_tag(rt, digest.into(), signature)?;
log::info!(
"Added Signed BlockHeight into map at reported height {}. FVM epoch: {}.",
height,
rt.curr_epoch(),
);
log::trace!(
r#"Height: {:?}
Hashed Blockheight (tag): {:?}
Signature: {:?}"#,
pre,
digest,
signature,
);
Ok(())
}
pub fn add_signed_and_hashed_tag(
&mut self,
rt: &impl Runtime,
tag: Tag,
signature: &BlsSignature,
) -> Result<(), ActorError> {
let mut signed_hashed_tags = SignedHashedTagMap::load(
rt.store(),
&self.signed_hashed_tags,
DEFAULT_HAMT_CONFIG,
"writing signed_hashed_tags",
)?;
signed_hashed_tags.set(&tag, signature.clone())?;
self.signed_hashed_tags = signed_hashed_tags.flush()?;
Ok(())
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/actors/cetf/src/shared.rs | fendermint/actors/cetf/src/shared.rs | use std::ops::{Deref, DerefMut};
use fil_actors_runtime::MapKey;
// Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use fvm_ipld_encoding::{
strict_bytes,
tuple::{Deserialize_tuple, Serialize_tuple},
};
use fvm_shared::address::Address;
use num_derive::FromPrimitive;
use serde::{Deserialize, Serialize};
pub type BlockHeight = u64;
pub const CETF_ACTOR_NAME: &str = "cetf";
/// Tag which will be signed by Validators
#[derive(Deserialize, Serialize, Clone, Copy, Eq, PartialEq, Debug)]
#[serde(transparent)]
pub struct Tag(#[serde(with = "strict_bytes")] pub [u8; 32]);
impl Default for Tag {
fn default() -> Self {
Tag([0; 32])
}
}
impl From<[u8; 32]> for Tag {
fn from(bytes: [u8; 32]) -> Self {
Tag(bytes)
}
}
impl From<&[u8; 32]> for Tag {
fn from(bytes: &[u8; 32]) -> Self {
Tag(*bytes)
}
}
impl Deref for Tag {
type Target = [u8; 32];
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for Tag {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl MapKey for Tag {
fn to_bytes(&self) -> Result<Vec<u8>, String> {
Ok(self.0.to_vec())
}
fn from_bytes(bytes: &[u8]) -> Result<Self, String> {
let mut buf = [0; 32];
buf.copy_from_slice(&bytes);
Ok(Self(buf))
}
}
/// A BLS Public Key used for signing tags.
#[derive(Deserialize, Serialize, Clone, Copy, Eq, PartialEq, Debug)]
#[serde(transparent)]
pub struct BlsPublicKey(#[serde(with = "strict_bytes")] pub [u8; 48]);
impl Default for BlsPublicKey {
fn default() -> Self {
BlsPublicKey([0; 48])
}
}
impl From<[u8; 48]> for BlsPublicKey {
fn from(bytes: [u8; 48]) -> Self {
BlsPublicKey(bytes)
}
}
impl From<&[u8; 48]> for BlsPublicKey {
fn from(bytes: &[u8; 48]) -> Self {
BlsPublicKey(*bytes)
}
}
/// A BLS Public Key used for signing tags.
#[derive(Deserialize, Serialize, Clone, Copy, Eq, PartialEq, Debug)]
#[serde(transparent)]
pub struct BlsSignature(#[serde(with = "strict_bytes")] pub [u8; 96]);
impl Default for BlsSignature {
fn default() -> Self {
BlsSignature([0; 96])
}
}
impl From<[u8; 96]> for BlsSignature {
fn from(bytes: [u8; 96]) -> Self {
BlsSignature(bytes)
}
}
impl From<&[u8; 96]> for BlsSignature {
fn from(bytes: &[u8; 96]) -> Self {
BlsSignature(*bytes)
}
}
#[derive(Default, Debug, Serialize_tuple, Deserialize_tuple)]
pub struct EnqueueTagParams {
pub tag: Tag,
}
#[derive(Default, Debug, Serialize_tuple, Deserialize_tuple)]
pub struct GetTagParams {
pub height: BlockHeight,
}
#[derive(Debug, Serialize_tuple, Deserialize_tuple)]
pub struct AddValidatorParams {
pub address: Address,
pub public_key: BlsPublicKey,
}
#[derive(Debug, Serialize_tuple, Deserialize_tuple)]
pub struct AddSignedTagParams {
pub height: BlockHeight,
pub signature: BlsSignature,
}
#[derive(Deserialize, Serialize, Clone, Copy, Eq, PartialEq, Debug, Default)]
#[serde(transparent)]
pub struct Hash32(#[serde(with = "strict_bytes")] pub [u8; 32]);
impl MapKey for Hash32 {
fn to_bytes(&self) -> Result<Vec<u8>, String> {
Ok(self.0.to_vec())
}
fn from_bytes(bytes: &[u8]) -> Result<Self, String> {
let mut buf = [0; 32];
buf.copy_from_slice(&bytes);
Ok(Hash32(buf))
}
}
impl From<[u8; 32]> for Hash32 {
fn from(bytes: [u8; 32]) -> Self {
Hash32(bytes)
}
}
#[derive(Default, Debug, Serialize_tuple, Deserialize_tuple)]
pub struct AddSignedBlockHeightTagParams {
pub height: BlockHeight,
pub signature: BlsSignature,
}
#[derive(FromPrimitive)]
#[repr(u64)]
pub enum Method {
Constructor = frc42_dispatch::method_hash!("Constructor"),
Echo = frc42_dispatch::method_hash!("Echo"),
EnqueueTag = frc42_dispatch::method_hash!("EnqueueTag"),
GetTag = frc42_dispatch::method_hash!("GetTag"),
AddValidator = frc42_dispatch::method_hash!("AddValidator"),
Enable = frc42_dispatch::method_hash!("Enable"),
Disable = frc42_dispatch::method_hash!("Disable"),
AddSignedTag = frc42_dispatch::method_hash!("AddSignedTag"),
// AddSignedBlockHeightTag = frc42_dispatch::method_hash!("AddSignedBlockHeightTag"),
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.