repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/actors/chainmetadata/src/lib.rs | fendermint/actors/chainmetadata/src/lib.rs | // Copyright 2021-2023 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
#[cfg(feature = "fil-actor")]
mod actor;
mod shared;
pub use shared::*;
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/actors/chainmetadata/src/actor.rs | fendermint/actors/chainmetadata/src/actor.rs | // Copyright 2021-2023 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use fil_actors_runtime::actor_dispatch;
use fil_actors_runtime::actor_error;
use fil_actors_runtime::builtin::singletons::SYSTEM_ACTOR_ADDR;
use fil_actors_runtime::runtime::{ActorCode, Runtime};
use fil_actors_runtime::ActorDowncast;
use fil_actors_runtime::ActorError;
use fil_actors_runtime::Array;
use fvm_shared::clock::ChainEpoch;
use fvm_shared::error::ExitCode;
use crate::{
BlockHash, ConstructorParams, Method, PushBlockParams, State, CHAINMETADATA_ACTOR_NAME,
};
fil_actors_runtime::wasm_trampoline!(Actor);
pub struct Actor;
impl Actor {
fn constructor(rt: &impl Runtime, params: ConstructorParams) -> Result<(), ActorError> {
rt.validate_immediate_caller_is(std::iter::once(&SYSTEM_ACTOR_ADDR))?;
let state = State::new(rt.store(), params.lookback_len).map_err(|e| {
e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to create empty AMT")
})?;
rt.create(&state)?;
Ok(())
}
fn push_block_hash(rt: &impl Runtime, params: PushBlockParams) -> Result<(), ActorError> {
rt.validate_immediate_caller_is(std::iter::once(&SYSTEM_ACTOR_ADDR))?;
rt.transaction(|st: &mut State, rt| {
// load the blockhashes AMT
let mut blockhashes = Array::load(&st.blockhashes, rt.store()).map_err(|e| {
e.downcast_default(
ExitCode::USR_ILLEGAL_STATE,
"failed to load blockhashes states",
)
})?;
// push the block to the AMT
blockhashes.set(params.epoch as u64, params.block).unwrap();
// remove the oldest block if the AMT is full (note that this assume the
// for_each_while iterates in order, which it seems to do)
if blockhashes.count() > st.lookback_len {
let mut first_idx = 0;
blockhashes
.for_each_while(|i, _: &BlockHash| {
first_idx = i;
Ok(false)
})
.unwrap();
blockhashes.delete(first_idx).unwrap();
}
// save the new blockhashes AMT cid root
st.blockhashes = blockhashes.flush().map_err(|e| {
e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to save blockhashes")
})?;
Ok(())
})?;
Ok(())
}
fn lookback_len(rt: &impl Runtime) -> Result<u64, ActorError> {
let state: State = rt.state()?;
Ok(state.lookback_len)
}
fn get_block_hash(
rt: &impl Runtime,
epoch: ChainEpoch,
) -> Result<Option<BlockHash>, ActorError> {
let st: State = rt.state()?;
st.get_block_hash(rt.store(), epoch)
.map_err(|e| e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to get blockhash"))
}
}
impl ActorCode for Actor {
type Methods = Method;
fn name() -> &'static str {
CHAINMETADATA_ACTOR_NAME
}
actor_dispatch! {
Constructor => constructor,
PushBlockHash => push_block_hash,
LookbackLen => lookback_len,
GetBlockHash => get_block_hash,
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/actors/chainmetadata/src/shared.rs | fendermint/actors/chainmetadata/src/shared.rs | // Copyright 2021-2023 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use cid::Cid;
use fvm_ipld_amt::Amt;
use fvm_ipld_blockstore::Blockstore;
use fvm_ipld_encoding::tuple::{Deserialize_tuple, Serialize_tuple};
use fvm_shared::{clock::ChainEpoch, METHOD_CONSTRUCTOR};
use num_derive::FromPrimitive;
// The state stores the blockhashes of the last `lookback_len` epochs
#[derive(Serialize_tuple, Deserialize_tuple)]
pub struct State {
// the AMT root cid of blockhashes
//
// TODO: consider using kamt instead due to appending larger and
// larger keys to the AMT makes it unbalanced requiring more space
// to store (see https://github.com/filecoin-project/go-amt-ipld/issues/17)
pub blockhashes: Cid,
// the maximum size of blockhashes before removing the oldest epoch
pub lookback_len: u64,
}
impl State {
pub fn new<BS: Blockstore>(store: &BS, lookback_len: u64) -> anyhow::Result<Self> {
let empty_blockhashes_cid =
match Amt::<(), _>::new_with_bit_width(store, BLOCKHASHES_AMT_BITWIDTH).flush() {
Ok(cid) => cid,
Err(e) => {
return Err(anyhow::anyhow!(
"chainmetadata actor failed to create empty Amt: {}",
e
))
}
};
Ok(Self {
blockhashes: empty_blockhashes_cid,
lookback_len,
})
}
// loads the blockhashes array from the AMT root cid and returns the blockhash
// at the given epoch
pub fn get_block_hash<BS: Blockstore>(
&self,
store: &BS,
epoch: ChainEpoch,
) -> anyhow::Result<Option<BlockHash>> {
// load the blockhashes array from the AMT root cid
let blockhashes = match Amt::load(&self.blockhashes, &store) {
Ok(v) => v,
Err(e) => {
return Err(anyhow::anyhow!(
"failed to load blockhashes from AMT cid {}, error: {}",
self.blockhashes,
e
));
}
};
// get the block hash at the given epoch
match blockhashes.get(epoch as u64) {
Ok(Some(v)) => Ok(Some(*v)),
Ok(None) => Ok(None),
Err(err) => Err(anyhow::anyhow!(
"failed to get blockhash at epoch {}, error: {}",
epoch,
err
)),
}
}
}
pub const CHAINMETADATA_ACTOR_NAME: &str = "chainmetadata";
// the default lookback length is 256 epochs
pub const DEFAULT_LOOKBACK_LEN: u64 = 256;
// the default bitwidth of the blockhashes AMT
pub const BLOCKHASHES_AMT_BITWIDTH: u32 = 3;
#[derive(Default, Debug, Serialize_tuple, Deserialize_tuple)]
pub struct ConstructorParams {
pub lookback_len: u64,
}
pub type BlockHash = [u8; 32];
#[derive(Default, Debug, Serialize_tuple, Deserialize_tuple)]
pub struct PushBlockParams {
pub epoch: ChainEpoch,
pub block: BlockHash,
}
#[derive(FromPrimitive)]
#[repr(u64)]
pub enum Method {
Constructor = METHOD_CONSTRUCTOR,
PushBlockHash = frc42_dispatch::method_hash!("PushBlockHash"),
LookbackLen = frc42_dispatch::method_hash!("LookbackLen"),
GetBlockHash = frc42_dispatch::method_hash!("GetBlockHash"),
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/actors/eam/src/lib.rs | fendermint/actors/eam/src/lib.rs | // Copyright 2021-2023 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use fil_actor_eam::{EamActor, Method};
use fil_actors_runtime::runtime::builtins::Type;
use fil_actors_runtime::runtime::{ActorCode, Runtime};
use fil_actors_runtime::ActorError;
use fil_actors_runtime::EAM_ACTOR_ID;
use fvm_ipld_blockstore::Blockstore;
use fvm_ipld_encoding::ipld_block::IpldBlock;
use fvm_ipld_encoding::tuple::*;
use fvm_shared::address::Address;
use fvm_shared::{ActorID, MethodNum};
use num_derive::FromPrimitive;
use crate::state::PermissionMode;
pub use crate::state::PermissionModeParams;
pub use crate::state::State;
mod state;
#[cfg(feature = "fil-actor")]
fil_actors_runtime::wasm_trampoline!(IPCEamActor);
pub const IPC_EAM_ACTOR_NAME: &str = "eam";
pub const IPC_EAM_ACTOR_ID: ActorID = EAM_ACTOR_ID;
pub struct IPCEamActor;
#[derive(FromPrimitive)]
#[repr(u64)]
pub enum ExtraMethods {
UpdateDeployers = frc42_dispatch::method_hash!("UpdateDeployers"),
}
impl IPCEamActor {
/// Creates the actor. If the `whitelisted_deployers` is empty, that means there is no restriction
/// for deployment, i.e any address can deploy.
pub fn constructor(rt: &impl Runtime, args: ConstructorParams) -> Result<(), ActorError> {
EamActor::constructor(rt)?;
let st = State::new(rt.store(), args.permission_mode)?;
rt.create(&st)?;
Ok(())
}
fn ensure_deployer_allowed(rt: &impl Runtime) -> Result<(), ActorError> {
// The caller is guaranteed to be an ID address.
let caller_id = rt.message().caller().id().unwrap();
// Check if the caller is a contract. If it is, and we're in permissioned mode,
// then the contract was either there in genesis or has been deployed by a whitelisted
// account; in both cases it's been known up front whether it creates other contracts,
// and if that was undesireable it would not have been deployed as it is.
let code_cid = rt.get_actor_code_cid(&caller_id).expect("caller has code");
if rt.resolve_builtin_actor_type(&code_cid) == Some(Type::EVM) {
return Ok(());
}
// Check if the caller is whitelisted.
let state: State = rt.state()?;
if !state.can_deploy(rt, caller_id)? {
return Err(ActorError::forbidden(String::from(
"sender not allowed to deploy contracts",
)));
}
Ok(())
}
fn update_deployers(rt: &impl Runtime, deployers: Vec<Address>) -> Result<(), ActorError> {
// Reject update if we're unrestricted.
let state: State = rt.state()?;
if !matches!(state.permission_mode, PermissionMode::AllowList(_)) {
return Err(ActorError::forbidden(String::from(
"deployers can only be updated in allowlist mode",
)));
};
// Check that the caller is in the allowlist.
let caller_id = rt.message().caller().id().unwrap();
if !state.can_deploy(rt, caller_id)? {
return Err(ActorError::forbidden(String::from(
"sender not allowed to update deployers",
)));
}
// Perform the update.
rt.transaction(|st: &mut State, rt| {
st.permission_mode =
State::new(rt.store(), PermissionModeParams::AllowList(deployers))?.permission_mode;
Ok(())
})?;
Ok(())
}
}
impl ActorCode for IPCEamActor {
type Methods = Method;
fn name() -> &'static str {
IPC_EAM_ACTOR_NAME
}
fn invoke_method<RT>(
rt: &RT,
method: MethodNum,
params: Option<IpldBlock>,
) -> Result<Option<IpldBlock>, ActorError>
where
RT: Runtime,
RT::Blockstore: Blockstore + Clone,
{
if method == Method::Constructor as u64 {
fil_actors_runtime::dispatch(rt, method, Self::constructor, params)
} else if method == ExtraMethods::UpdateDeployers as u64 {
fil_actors_runtime::dispatch(rt, method, Self::update_deployers, params)
} else {
Self::ensure_deployer_allowed(rt)?;
EamActor::invoke_method(rt, method, params)
}
}
}
#[derive(Debug, Serialize_tuple, Deserialize_tuple)]
pub struct ConstructorParams {
permission_mode: PermissionModeParams,
}
#[cfg(test)]
mod tests {
use fil_actor_eam::ext::evm::ConstructorParams;
use fil_actor_eam::ext::init::{Exec4Params, Exec4Return, EXEC4_METHOD};
use fil_actor_eam::{compute_address_create, CreateExternalParams, CreateParams, Return};
use fil_actors_evm_shared::address::EthAddress;
use fil_actors_runtime::runtime::builtins::Type;
use fil_actors_runtime::test_utils::{
expect_empty, MockRuntime, ETHACCOUNT_ACTOR_CODE_ID, EVM_ACTOR_CODE_ID,
SYSTEM_ACTOR_CODE_ID,
};
use fil_actors_runtime::INIT_ACTOR_ADDR;
use fil_actors_runtime::SYSTEM_ACTOR_ADDR;
use fvm_ipld_encoding::ipld_block::IpldBlock;
use fvm_ipld_encoding::RawBytes;
use fvm_shared::address::Address;
use fvm_shared::econ::TokenAmount;
use fvm_shared::error::ExitCode;
use fvm_shared::MethodNum;
use crate::state::PermissionModeParams;
use crate::{ConstructorParams as IPCConstructorParams, ExtraMethods, IPCEamActor, Method};
pub fn construct_and_verify(deployers: Vec<Address>) -> MockRuntime {
let rt = MockRuntime {
receiver: Address::new_id(10),
..Default::default()
};
// construct EAM singleton actor
rt.set_caller(*SYSTEM_ACTOR_CODE_ID, SYSTEM_ACTOR_ADDR);
rt.expect_validate_caller_addr(vec![SYSTEM_ACTOR_ADDR]);
let permission_mode = if deployers.is_empty() {
PermissionModeParams::Unrestricted
} else {
PermissionModeParams::AllowList(deployers)
};
let result = rt
.call::<IPCEamActor>(
Method::Constructor as u64,
IpldBlock::serialize_cbor(&IPCConstructorParams { permission_mode }).unwrap(),
)
.unwrap();
expect_empty(result);
rt.verify();
rt.reset();
rt
}
#[test]
fn test_create_not_allowed() {
let deployers = vec![Address::new_id(1000), Address::new_id(2000)];
let rt = construct_and_verify(deployers);
let id_addr = Address::new_id(10000);
let eth_addr = EthAddress(hex_literal::hex!(
"CAFEB0BA00000000000000000000000000000000"
));
let f4_eth_addr = Address::new_delegated(10, ð_addr.0).unwrap();
rt.set_delegated_address(id_addr.id().unwrap(), f4_eth_addr);
rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, id_addr);
let create_params = CreateExternalParams(vec![0xff]);
let exit_code = rt
.call::<IPCEamActor>(
Method::CreateExternal as u64,
IpldBlock::serialize_cbor(&create_params).unwrap(),
)
.unwrap_err()
.exit_code();
assert_eq!(exit_code, ExitCode::USR_FORBIDDEN)
}
#[test]
fn test_create_no_restriction() {
let deployers = vec![];
let rt = construct_and_verify(deployers);
let id_addr = Address::new_id(110);
let eth_addr = EthAddress(hex_literal::hex!(
"CAFEB0BA00000000000000000000000000000000"
));
let f4_eth_addr = Address::new_delegated(10, ð_addr.0).unwrap();
rt.set_delegated_address(id_addr.id().unwrap(), f4_eth_addr);
rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, id_addr);
rt.set_origin(id_addr);
rt.expect_validate_caller_addr(vec![id_addr]);
let initcode = vec![0xff];
let create_params = CreateExternalParams(initcode.clone());
let evm_params = ConstructorParams {
creator: eth_addr,
initcode: initcode.into(),
};
let new_eth_addr = compute_address_create(&rt, ð_addr, 0);
let params = Exec4Params {
code_cid: *EVM_ACTOR_CODE_ID,
constructor_params: RawBytes::serialize(evm_params).unwrap(),
subaddress: new_eth_addr.0[..].to_owned().into(),
};
let send_return = IpldBlock::serialize_cbor(&Exec4Return {
id_address: Address::new_id(111),
robust_address: Address::new_id(0), // not a robust address but im hacking here and nobody checks
})
.unwrap();
rt.expect_send_simple(
INIT_ACTOR_ADDR,
EXEC4_METHOD,
IpldBlock::serialize_cbor(¶ms).unwrap(),
TokenAmount::from_atto(0),
send_return,
ExitCode::OK,
);
let result = rt
.call::<IPCEamActor>(
Method::CreateExternal as u64,
IpldBlock::serialize_cbor(&create_params).unwrap(),
)
.unwrap()
.unwrap()
.deserialize::<Return>()
.unwrap();
let expected_return = Return {
actor_id: 111,
robust_address: Some(Address::new_id(0)),
eth_address: new_eth_addr,
};
assert_eq!(result, expected_return);
rt.verify();
}
#[test]
fn test_create_by_whitelisted_allowed() {
let eth_addr = EthAddress(hex_literal::hex!(
"CAFEB0BA00000000000000000000000000000000"
));
let f4_eth_addr = Address::new_delegated(10, ð_addr.0).unwrap();
let deployers = vec![Address::new_id(2000), f4_eth_addr];
let rt = construct_and_verify(deployers);
let id_addr = Address::new_id(1000);
rt.set_delegated_address(id_addr.id().unwrap(), f4_eth_addr);
rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, id_addr);
rt.set_origin(id_addr);
rt.expect_validate_caller_addr(vec![id_addr]);
let initcode = vec![0xff];
let create_params = CreateExternalParams(initcode.clone());
let evm_params = ConstructorParams {
creator: eth_addr,
initcode: initcode.into(),
};
let new_eth_addr = compute_address_create(&rt, ð_addr, 0);
let params = Exec4Params {
code_cid: *EVM_ACTOR_CODE_ID,
constructor_params: RawBytes::serialize(evm_params).unwrap(),
subaddress: new_eth_addr.0[..].to_owned().into(),
};
let send_return = IpldBlock::serialize_cbor(&Exec4Return {
id_address: Address::new_id(111),
robust_address: Address::new_id(0), // not a robust address but im hacking here and nobody checks
})
.unwrap();
rt.expect_send_simple(
INIT_ACTOR_ADDR,
EXEC4_METHOD,
IpldBlock::serialize_cbor(¶ms).unwrap(),
TokenAmount::from_atto(0),
send_return,
ExitCode::OK,
);
let result = rt
.call::<IPCEamActor>(
Method::CreateExternal as u64,
IpldBlock::serialize_cbor(&create_params).unwrap(),
)
.unwrap()
.unwrap()
.deserialize::<Return>()
.unwrap();
let expected_return = Return {
actor_id: 111,
robust_address: Some(Address::new_id(0)),
eth_address: new_eth_addr,
};
assert_eq!(result, expected_return);
rt.verify();
}
#[test]
fn test_create_by_contract_allowed() {
let eth_addr = EthAddress(hex_literal::hex!(
"CAFEB0BA00000000000000000000000000000000"
));
let f4_eth_addr = Address::new_delegated(10, ð_addr.0).unwrap();
let deployers = vec![Address::new_id(2000), f4_eth_addr];
let rt = construct_and_verify(deployers);
let id_addr = Address::new_id(1000);
rt.set_delegated_address(id_addr.id().unwrap(), f4_eth_addr);
rt.set_caller(*EVM_ACTOR_CODE_ID, id_addr);
rt.expect_validate_caller_type(vec![Type::EVM]);
let initcode = vec![0xff];
let create_params = CreateParams {
initcode: initcode.clone(),
nonce: 0,
};
let evm_params = ConstructorParams {
creator: eth_addr,
initcode: initcode.into(),
};
let new_eth_addr = compute_address_create(&rt, ð_addr, 0);
let params = Exec4Params {
code_cid: *EVM_ACTOR_CODE_ID,
constructor_params: RawBytes::serialize(evm_params).unwrap(),
subaddress: new_eth_addr.0[..].to_owned().into(),
};
let send_return = IpldBlock::serialize_cbor(&Exec4Return {
id_address: Address::new_id(111),
robust_address: Address::new_id(0), // not a robust address but im hacking here and nobody checks
})
.unwrap();
rt.expect_send_simple(
INIT_ACTOR_ADDR,
EXEC4_METHOD,
IpldBlock::serialize_cbor(¶ms).unwrap(),
TokenAmount::from_atto(0),
send_return,
ExitCode::OK,
);
let result = rt
.call::<IPCEamActor>(
Method::Create as u64,
IpldBlock::serialize_cbor(&create_params).unwrap(),
)
.unwrap()
.unwrap()
.deserialize::<Return>()
.unwrap();
let expected_return = Return {
actor_id: 111,
robust_address: Some(Address::new_id(0)),
eth_address: new_eth_addr,
};
assert_eq!(result, expected_return);
rt.verify();
}
#[test]
fn test_update_deployers() {
let deployers = vec![Address::new_id(1000)];
let rt = construct_and_verify(deployers);
struct AddrTriple {
eth: EthAddress,
f410: Address,
id: Address,
}
macro_rules! create_address {
($hex_addr:expr, $id:expr) => {{
let eth = EthAddress(hex_literal::hex!($hex_addr));
let f410 = Address::new_delegated(10, ð.0).unwrap();
rt.set_delegated_address($id, f410);
AddrTriple {
eth,
f410,
id: Address::new_id($id),
}
}};
}
let allowed = create_address!("CAFEB0BA00000000000000000000000000000000", 1000);
let deployer = create_address!("FAAAB0BA00000000000000000000000000000000", 2000);
let initcode = vec![0xff];
let create_params = CreateExternalParams(initcode.clone());
// Deployer is not allowed to create yet.
rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, deployer.id);
let ret = rt.call::<IPCEamActor>(
Method::CreateExternal as u64,
IpldBlock::serialize_cbor(&create_params).unwrap(),
);
assert_eq!(ExitCode::USR_FORBIDDEN, ret.err().unwrap().exit_code());
// Now add permissions for the deployer from the allowed address.
let update_deployers_params = vec![deployer.f410];
rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, allowed.id);
let ret = rt.call::<IPCEamActor>(
ExtraMethods::UpdateDeployers as MethodNum,
IpldBlock::serialize_cbor(&update_deployers_params).unwrap(),
);
assert!(ret.is_ok());
// Previously allowed deployer no longer allowed.
rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, allowed.id);
let ret = rt.call::<IPCEamActor>(
Method::CreateExternal as u64,
IpldBlock::serialize_cbor(&create_params).unwrap(),
);
assert_eq!(ExitCode::USR_FORBIDDEN, ret.err().unwrap().exit_code());
// New deployer is permissioned.
rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, deployer.id);
rt.set_origin(deployer.id);
rt.expect_validate_caller_addr(vec![deployer.id]);
let send_return = IpldBlock::serialize_cbor(&Exec4Return {
id_address: Address::new_id(111),
robust_address: Address::new_id(0), // nobody cares
})
.unwrap();
let new_eth_addr = compute_address_create(&rt, &deployer.eth, 0);
let params = {
let evm_params = ConstructorParams {
creator: deployer.eth,
initcode: initcode.clone().into(),
};
Exec4Params {
code_cid: *EVM_ACTOR_CODE_ID,
constructor_params: RawBytes::serialize(evm_params).unwrap(),
subaddress: new_eth_addr.0[..].to_owned().into(),
}
};
rt.expect_send_simple(
INIT_ACTOR_ADDR,
EXEC4_METHOD,
IpldBlock::serialize_cbor(¶ms).unwrap(),
TokenAmount::from_atto(0),
send_return,
ExitCode::OK,
);
let ret = rt
.call::<IPCEamActor>(
Method::CreateExternal as u64,
IpldBlock::serialize_cbor(&create_params).unwrap(),
)
.unwrap()
.unwrap()
.deserialize::<Return>()
.unwrap();
let expected_return = Return {
actor_id: 111,
robust_address: Some(Address::new_id(0)),
eth_address: new_eth_addr,
};
assert_eq!(ret, expected_return);
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/actors/eam/src/state.rs | fendermint/actors/eam/src/state.rs | // Copyright 2021-2023 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use cid::Cid;
use fil_actors_runtime::runtime::Runtime;
use fil_actors_runtime::{ActorError, Map2, DEFAULT_HAMT_CONFIG};
use fvm_ipld_blockstore::Blockstore;
use fvm_ipld_encoding::tuple::*;
use fvm_shared::address::Address;
use fvm_shared::ActorID;
use serde::{Deserialize, Serialize};
pub type DeployerMap<BS> = Map2<BS, Address, ()>;
/// The args used to create the permission mode in storage
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum PermissionModeParams {
/// No restriction, everyone can deploy
Unrestricted,
/// Only whitelisted addresses can deploy
AllowList(Vec<Address>),
}
/// The permission mode for controlling who can deploy contracts
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum PermissionMode {
/// No restriction, everyone can deploy
Unrestricted,
/// Only whitelisted addresses can deploy
AllowList(Cid), // HAMT[Address]()
}
#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone)]
pub struct State {
pub permission_mode: PermissionMode,
}
impl State {
pub fn new<BS: Blockstore>(
store: &BS,
args: PermissionModeParams,
) -> Result<State, ActorError> {
let permission_mode = match args {
PermissionModeParams::Unrestricted => PermissionMode::Unrestricted,
PermissionModeParams::AllowList(deployers) => {
let mut deployers_map = DeployerMap::empty(store, DEFAULT_HAMT_CONFIG, "empty");
for d in deployers {
deployers_map.set(&d, ())?;
}
PermissionMode::AllowList(deployers_map.flush()?)
}
};
Ok(State { permission_mode })
}
pub fn can_deploy(&self, rt: &impl Runtime, deployer: ActorID) -> Result<bool, ActorError> {
Ok(match &self.permission_mode {
PermissionMode::Unrestricted => true,
PermissionMode::AllowList(cid) => {
let deployers =
DeployerMap::load(rt.store(), cid, DEFAULT_HAMT_CONFIG, "verifiers")?;
let mut allowed = false;
deployers.for_each(|k, _| {
// Normalize allowed addresses to ID addresses, so we can compare any kind of allowlisted address.
// This includes f1, f2, f3, etc.
// We cannot normalize the allowlist at construction time because the addresses may not be bound to IDs yet (counterfactual usage).
// Unfortunately, API of Hamt::for_each won't let us stop iterating on match, so this is more wasteful than we'd like. We can optimize later.
// Hamt has implemented Iterator recently, but it's not exposed through Map2 (see ENG-800).
allowed = allowed || rt.resolve_address(&k) == Some(deployer);
Ok(())
})?;
allowed
}
})
}
}
#[cfg(test)]
mod tests {
use cid::Cid;
use crate::state::PermissionMode;
#[test]
fn test_serialization() {
let p = PermissionMode::Unrestricted;
let v = fvm_ipld_encoding::to_vec(&p).unwrap();
let dp: PermissionMode = fvm_ipld_encoding::from_slice(&v).unwrap();
assert_eq!(dp, p);
let p = PermissionMode::AllowList(Cid::default());
let v = fvm_ipld_encoding::to_vec(&p).unwrap();
let dp: PermissionMode = fvm_ipld_encoding::from_slice(&v).unwrap();
assert_eq!(dp, p)
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/contract-test/src/lib.rs | fendermint/testing/contract-test/src/lib.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use anyhow::{anyhow, Context, Result};
use byteorder::{BigEndian, WriteBytesExt};
use cid::Cid;
use fendermint_vm_core::Timestamp;
use fendermint_vm_interpreter::fvm::PowerUpdates;
use fvm_shared::{bigint::Zero, clock::ChainEpoch, econ::TokenAmount, version::NetworkVersion};
use std::{future::Future, sync::Arc};
use fendermint_vm_genesis::Genesis;
use fendermint_vm_interpreter::{
fvm::{
bundle::{bundle_path, contracts_path, custom_actors_bundle_path},
state::{FvmExecState, FvmGenesisState, FvmStateParams, FvmUpdatableParams},
store::memory::MemoryBlockstore,
upgrades::UpgradeScheduler,
FvmApplyRet, FvmGenesisOutput, FvmMessage, FvmMessageInterpreter,
},
ExecInterpreter, GenesisInterpreter,
};
use fvm::engine::MultiEngine;
pub mod ipc;
pub async fn init_exec_state(
multi_engine: Arc<MultiEngine>,
genesis: Genesis,
) -> anyhow::Result<(FvmExecState<MemoryBlockstore>, FvmGenesisOutput)> {
let bundle_path = bundle_path();
let bundle = std::fs::read(&bundle_path)
.with_context(|| format!("failed to read bundle: {}", bundle_path.to_string_lossy()))?;
let custom_actors_bundle_path = custom_actors_bundle_path();
let custom_actors_bundle = std::fs::read(&custom_actors_bundle_path).with_context(|| {
format!(
"failed to read custom actors_bundle: {}",
custom_actors_bundle_path.to_string_lossy()
)
})?;
let store = MemoryBlockstore::new();
let state = FvmGenesisState::new(store, multi_engine, &bundle, &custom_actors_bundle)
.await
.context("failed to create state")?;
let (client, _) =
tendermint_rpc::MockClient::new(tendermint_rpc::MockRequestMethodMatcher::default());
let interpreter = FvmMessageInterpreter::new(
client,
None,
contracts_path(),
1.05,
1.05,
false,
UpgradeScheduler::new(),
);
let (state, out) = interpreter
.init(state, genesis)
.await
.context("failed to create actors")?;
let state = state
.into_exec_state()
.map_err(|_| anyhow!("should be in exec stage"))?;
Ok((state, out))
}
pub struct Tester<I> {
interpreter: Arc<I>,
state_store: Arc<MemoryBlockstore>,
multi_engine: Arc<MultiEngine>,
exec_state: Arc<tokio::sync::Mutex<Option<FvmExecState<MemoryBlockstore>>>>,
state_params: FvmStateParams,
}
impl<I> Tester<I>
where
I: GenesisInterpreter<
State = FvmGenesisState<MemoryBlockstore>,
Genesis = Genesis,
Output = FvmGenesisOutput,
>,
I: ExecInterpreter<
State = FvmExecState<MemoryBlockstore>,
Message = FvmMessage,
BeginOutput = FvmApplyRet,
DeliverOutput = FvmApplyRet,
EndOutput = PowerUpdates,
>,
{
fn state_store_clone(&self) -> MemoryBlockstore {
self.state_store.as_ref().clone()
}
pub fn new(interpreter: I, state_store: MemoryBlockstore) -> Self {
Self {
interpreter: Arc::new(interpreter),
state_store: Arc::new(state_store),
multi_engine: Arc::new(MultiEngine::new(1)),
exec_state: Arc::new(tokio::sync::Mutex::new(None)),
state_params: FvmStateParams {
timestamp: Timestamp(0),
state_root: Cid::default(),
network_version: NetworkVersion::V21,
base_fee: TokenAmount::zero(),
circ_supply: TokenAmount::zero(),
chain_id: 0,
power_scale: 0,
app_version: 0,
},
}
}
pub async fn init(&mut self, genesis: Genesis) -> anyhow::Result<()> {
let bundle_path = bundle_path();
let bundle = std::fs::read(&bundle_path)
.with_context(|| format!("failed to read bundle: {}", bundle_path.to_string_lossy()))?;
let custom_actors_bundle_path = custom_actors_bundle_path();
let custom_actors_bundle =
std::fs::read(&custom_actors_bundle_path).with_context(|| {
format!(
"failed to read custom actors_bundle: {}",
custom_actors_bundle_path.to_string_lossy()
)
})?;
let state = FvmGenesisState::new(
self.state_store_clone(),
self.multi_engine.clone(),
&bundle,
&custom_actors_bundle,
)
.await
.context("failed to create genesis state")?;
let (state, out) = self
.interpreter
.init(state, genesis)
.await
.context("failed to init from genesis")?;
let state_root = state.commit().context("failed to commit genesis state")?;
self.state_params = FvmStateParams {
state_root,
timestamp: out.timestamp,
network_version: out.network_version,
base_fee: out.base_fee,
circ_supply: out.circ_supply,
chain_id: out.chain_id.into(),
power_scale: out.power_scale,
app_version: 0,
};
Ok(())
}
/// Take the execution state, update it, put it back, return the output.
async fn modify_exec_state<T, F, R>(&self, f: F) -> anyhow::Result<T>
where
F: FnOnce(FvmExecState<MemoryBlockstore>) -> R,
R: Future<Output = Result<(FvmExecState<MemoryBlockstore>, T)>>,
{
let mut guard = self.exec_state.lock().await;
let state = guard.take().expect("exec state empty");
let (state, ret) = f(state).await?;
*guard = Some(state);
Ok(ret)
}
/// Put the execution state during block execution. Has to be empty.
async fn put_exec_state(&self, state: FvmExecState<MemoryBlockstore>) {
let mut guard = self.exec_state.lock().await;
assert!(guard.is_none(), "exec state not empty");
*guard = Some(state);
}
/// Take the execution state during block execution. Has to be non-empty.
async fn take_exec_state(&self) -> FvmExecState<MemoryBlockstore> {
let mut guard = self.exec_state.lock().await;
guard.take().expect("exec state empty")
}
pub async fn begin_block(&self, block_height: ChainEpoch) -> Result<()> {
let mut block_hash: [u8; 32] = [0; 32];
let _ = block_hash.as_mut().write_i64::<BigEndian>(block_height);
let db = self.state_store.as_ref().clone();
let mut state_params = self.state_params.clone();
state_params.timestamp = Timestamp(block_height as u64);
let state = FvmExecState::new(db, self.multi_engine.as_ref(), block_height, state_params)
.context("error creating new state")?
.with_block_hash(block_hash);
self.put_exec_state(state).await;
let _res = self
.modify_exec_state(|s| self.interpreter.begin(s))
.await
.unwrap();
Ok(())
}
pub async fn end_block(&self, _block_height: ChainEpoch) -> Result<()> {
let _ret = self
.modify_exec_state(|s| self.interpreter.end(s))
.await
.context("end failed")?;
Ok(())
}
pub async fn commit(&mut self) -> Result<()> {
let exec_state = self.take_exec_state().await;
let (
state_root,
FvmUpdatableParams {
app_version,
base_fee,
circ_supply,
power_scale,
},
_,
) = exec_state.commit().context("failed to commit FVM")?;
self.state_params.state_root = state_root;
self.state_params.app_version = app_version;
self.state_params.base_fee = base_fee;
self.state_params.circ_supply = circ_supply;
self.state_params.power_scale = power_scale;
eprintln!("self.state_params: {:?}", self.state_params);
Ok(())
}
pub fn state_params(&self) -> FvmStateParams {
self.state_params.clone()
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/contract-test/src/ipc/subnet.rs | fendermint/testing/contract-test/src/ipc/subnet.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use fendermint_vm_actor_interface::eam::EthAddress;
use fendermint_vm_actor_interface::ipc::subnet::SubnetActorErrors;
use fendermint_vm_genesis::{Collateral, Validator};
use fendermint_vm_interpreter::fvm::state::fevm::{
ContractCaller, ContractResult, MockProvider, NoRevert,
};
use fendermint_vm_interpreter::fvm::state::FvmExecState;
use fendermint_vm_message::conv::{from_eth, from_fvm};
use fvm_ipld_blockstore::Blockstore;
use fvm_shared::crypto::signature::SECP_SIG_LEN;
use fvm_shared::econ::TokenAmount;
use ipc_actors_abis::subnet_actor_checkpointing_facet::{
self as checkpointer, SubnetActorCheckpointingFacet,
};
use ipc_actors_abis::subnet_actor_getter_facet::{self as getter, SubnetActorGetterFacet};
use ipc_actors_abis::subnet_actor_manager_facet::SubnetActorManagerFacet;
pub use ipc_actors_abis::register_subnet_facet::ConstructorParams as SubnetConstructorParams;
use ipc_actors_abis::subnet_actor_reward_facet::SubnetActorRewardFacet;
#[derive(Clone)]
pub struct SubnetCaller<DB> {
addr: EthAddress,
getter: ContractCaller<DB, SubnetActorGetterFacet<MockProvider>, NoRevert>,
manager: ContractCaller<DB, SubnetActorManagerFacet<MockProvider>, SubnetActorErrors>,
rewarder: ContractCaller<DB, SubnetActorRewardFacet<MockProvider>, SubnetActorErrors>,
checkpointer:
ContractCaller<DB, SubnetActorCheckpointingFacet<MockProvider>, SubnetActorErrors>,
}
impl<DB> SubnetCaller<DB> {
pub fn new(addr: EthAddress) -> Self {
Self {
addr,
getter: ContractCaller::new(addr, SubnetActorGetterFacet::new),
manager: ContractCaller::new(addr, SubnetActorManagerFacet::new),
rewarder: ContractCaller::new(addr, SubnetActorRewardFacet::new),
checkpointer: ContractCaller::new(addr, SubnetActorCheckpointingFacet::new),
}
}
pub fn addr(&self) -> EthAddress {
self.addr
}
}
type TryCallResult<T> = anyhow::Result<ContractResult<T, SubnetActorErrors>>;
impl<DB: Blockstore + Clone> SubnetCaller<DB> {
/// Join a subnet as a validator.
pub fn join(
&self,
state: &mut FvmExecState<DB>,
validator: &Validator<Collateral>,
) -> anyhow::Result<()> {
let public_key = validator.public_key.0.serialize();
let addr = EthAddress::new_secp256k1(&public_key)?;
let deposit = from_fvm::to_eth_tokens(&validator.power.0)?;
// We need to send in the name of the address as a sender, not the system account.
self.manager.call(state, |c| {
c.join(public_key.into()).from(addr).value(deposit)
})
}
/// Try to join the subnet as a validator.
pub fn try_join(
&self,
state: &mut FvmExecState<DB>,
validator: &Validator<Collateral>,
) -> TryCallResult<()> {
let public_key = validator.public_key.0.serialize();
let addr = EthAddress::new_secp256k1(&public_key)?;
let deposit = from_fvm::to_eth_tokens(&validator.power.0)?;
self.manager.try_call(state, |c| {
c.join(public_key.into()).from(addr).value(deposit)
})
}
/// Try to increase the stake of a validator.
pub fn try_stake(
&self,
state: &mut FvmExecState<DB>,
addr: &EthAddress,
value: &TokenAmount,
) -> TryCallResult<()> {
let deposit = from_fvm::to_eth_tokens(value)?;
self.manager
.try_call(state, |c| c.stake().from(addr).value(deposit))
}
/// Try to decrease the stake of a validator.
pub fn try_unstake(
&self,
state: &mut FvmExecState<DB>,
addr: &EthAddress,
value: &TokenAmount,
) -> TryCallResult<()> {
let withdraw = from_fvm::to_eth_tokens(value)?;
self.manager
.try_call(state, |c| c.unstake(withdraw).from(addr))
}
/// Try to remove all stake of a validator.
pub fn try_leave(&self, state: &mut FvmExecState<DB>, addr: &EthAddress) -> TryCallResult<()> {
self.manager.try_call(state, |c| c.leave().from(addr))
}
/// Claim any refunds.
pub fn try_claim(&self, state: &mut FvmExecState<DB>, addr: &EthAddress) -> TryCallResult<()> {
self.rewarder.try_call(state, |c| c.claim().from(addr))
}
/// Submit a bottom-up checkpoint.
pub fn try_submit_checkpoint(
&self,
state: &mut FvmExecState<DB>,
checkpoint: checkpointer::BottomUpCheckpoint,
_messages: Vec<checkpointer::IpcEnvelope>,
signatures: Vec<(EthAddress, [u8; SECP_SIG_LEN])>,
) -> TryCallResult<()> {
let mut addrs = Vec::new();
let mut sigs = Vec::new();
for (addr, sig) in signatures {
addrs.push(ethers::types::Address::from(addr));
sigs.push(sig.into());
}
self.checkpointer
.try_call(state, |c| c.submit_checkpoint(checkpoint, addrs, sigs))
}
/// Get information about the validator's current and total collateral.
pub fn get_validator(
&self,
state: &mut FvmExecState<DB>,
addr: &EthAddress,
) -> anyhow::Result<getter::ValidatorInfo> {
self.getter.call(state, |c| c.get_validator(addr.into()))
}
/// Get the confirmed collateral of a validator.
pub fn confirmed_collateral(
&self,
state: &mut FvmExecState<DB>,
addr: &EthAddress,
) -> anyhow::Result<TokenAmount> {
self.get_validator(state, addr)
.map(|i| from_eth::to_fvm_tokens(&i.confirmed_collateral))
}
/// Get the total (unconfirmed) collateral of a validator.
pub fn total_collateral(
&self,
state: &mut FvmExecState<DB>,
addr: &EthAddress,
) -> anyhow::Result<TokenAmount> {
self.get_validator(state, addr)
.map(|i| from_eth::to_fvm_tokens(&i.total_collateral))
}
/// Get the `(next, start)` configuration number pair.
///
/// * `next` is the next expected one
/// * `start` is the first unapplied one
pub fn get_configuration_numbers(
&self,
state: &mut FvmExecState<DB>,
) -> anyhow::Result<(u64, u64)> {
self.getter.call(state, |c| c.get_configuration_numbers())
}
/// Check if minimum collateral has been met.
pub fn bootstrapped(&self, state: &mut FvmExecState<DB>) -> anyhow::Result<bool> {
self.getter.call(state, |c| c.bootstrapped())
}
/// Check if a validator is active, ie. they are in the top N.
pub fn is_active(
&self,
state: &mut FvmExecState<DB>,
addr: &EthAddress,
) -> anyhow::Result<bool> {
self.getter
.call(state, |c| c.is_active_validator(addr.into()))
}
/// Check if a validator is wating, ie. they have deposited but are not in the top N.
pub fn is_waiting(
&self,
state: &mut FvmExecState<DB>,
addr: &EthAddress,
) -> anyhow::Result<bool> {
self.getter
.call(state, |c| c.is_waiting_validator(addr.into()))
}
/// This is purely for testing, although we could use it in production to avoid having to match Rust and Solidity semantics.
pub fn cross_msgs_hash(
&self,
state: &mut FvmExecState<DB>,
cross_msgs: Vec<getter::IpcEnvelope>,
) -> anyhow::Result<[u8; 32]> {
self.getter.call(state, |c| c.cross_msgs_hash(cross_msgs))
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/contract-test/src/ipc/registry.rs | fendermint/testing/contract-test/src/ipc/registry.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use anyhow::Context;
use fendermint_vm_actor_interface::eam::EthAddress;
use fendermint_vm_actor_interface::init::builtin_actor_eth_addr;
use fendermint_vm_actor_interface::ipc::SUBNETREGISTRY_ACTOR_ID;
use fendermint_vm_interpreter::fvm::state::fevm::{ContractCaller, MockProvider};
use fendermint_vm_interpreter::fvm::state::FvmExecState;
use fvm_ipld_blockstore::Blockstore;
use fvm_shared::ActorID;
use ipc_actors_abis::subnet_registry_diamond::SubnetRegistryDiamondErrors;
pub use ipc_actors_abis::register_subnet_facet::{
ConstructorParams as SubnetConstructorParams, RegisterSubnetFacet,
};
#[derive(Clone)]
pub struct RegistryCaller<DB> {
addr: EthAddress,
register: ContractCaller<DB, RegisterSubnetFacet<MockProvider>, SubnetRegistryDiamondErrors>,
}
impl<DB> Default for RegistryCaller<DB> {
fn default() -> Self {
Self::new(SUBNETREGISTRY_ACTOR_ID)
}
}
impl<DB> RegistryCaller<DB> {
pub fn new(actor_id: ActorID) -> Self {
let addr = builtin_actor_eth_addr(actor_id);
Self {
addr,
register: ContractCaller::new(addr, RegisterSubnetFacet::new),
}
}
pub fn addr(&self) -> EthAddress {
self.addr
}
}
impl<DB: Blockstore + Clone> RegistryCaller<DB> {
/// Create a new instance of the built-in subnet implemetation.
///
/// Returns the address of the deployed contract.
pub fn new_subnet(
&self,
state: &mut FvmExecState<DB>,
params: SubnetConstructorParams,
) -> anyhow::Result<EthAddress> {
let addr = self
.register
.call(state, |c| c.new_subnet_actor(params))
.context("failed to create new subnet")?;
Ok(EthAddress(addr.0))
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/contract-test/src/ipc/mod.rs | fendermint/testing/contract-test/src/ipc/mod.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
pub mod registry;
pub mod subnet;
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/contract-test/tests/smt_staking.rs | fendermint/testing/contract-test/tests/smt_staking.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//#![allow(unused)]
//! State Machine Test for the Staking contracts.
//!
//! The test simulates random actions validators can take, such as depositing and withdrawing
//! collateral, and executes these actions on the actual Solidity contracts as well as an
//! idealised model, comparing the results and testing that invariants are maintained.
//!
//! It can be executed the following way:
//!
//! ```text
//! cargo test --release -p fendermint_contract_test --test smt_staking
//! ```
use fendermint_testing::{arb::ArbTokenAmount, smt::StateMachine, state_machine_test};
mod staking;
use fendermint_vm_actor_interface::ipc::{abi_hash, AbiHash};
use fendermint_vm_message::conv::from_fvm;
use ipc_actors_abis::subnet_actor_getter_facet;
use staking::machine::StakingMachine;
state_machine_test!(staking, 30000 ms, 65512 bytes, 100 steps, StakingMachine::default());
//state_machine_test!(staking, 0x2924bbae0000ffe8, 100 steps, StakingMachine::default());
/// Test that the way we hash cross messages is the same as Solidity, without having
/// to construct actually executable cross messages.
#[test]
fn prop_cross_msgs_hash() {
use arbitrary::Arbitrary;
use subnet_actor_getter_facet as getter;
// We need an FVM execution state to interact with the contracts.
let machine = StakingMachine::default();
fendermint_testing::smt::fixed_size_builder(1024 * 1024)
.budget_ms(10000) // Need to set a budget otherwise the default is used up by setup.
.run(|u| {
let state = machine.gen_state(u)?;
let system = machine.new_system(&state);
let mut exec_state = system.exec_state.borrow_mut();
let mut cross_msgs = Vec::<getter::IpcEnvelope>::new();
// Generate a few random messages.
for _ in 0..u.int_in_range(0..=3)? {
cross_msgs.push(getter::IpcEnvelope {
// FIXME: Add different types here?
kind: 0,
from: getter::Ipcaddress {
subnet_id: getter::SubnetID {
root: u.arbitrary()?,
route: Vec::new(),
},
raw_address: getter::FvmAddress {
addr_type: u.arbitrary()?,
payload: <[u8; 20]>::arbitrary(u)?.into(),
},
},
to: getter::Ipcaddress {
subnet_id: getter::SubnetID {
root: u.arbitrary()?,
route: Vec::new(),
},
raw_address: getter::FvmAddress {
addr_type: u.arbitrary()?,
payload: <[u8; 20]>::arbitrary(u)?.into(),
},
},
value: from_fvm::to_eth_tokens(&ArbTokenAmount::arbitrary(u)?.0).unwrap(),
nonce: u.arbitrary()?,
// FIXME: Add arbitrary here?
message: Vec::new().into(),
})
}
// Check so we know we did not generate zero length messages all the time.
fendermint_testing::smt::ensure_has_randomness(u)?;
// It doesn't seem to actually matter whether we pass these as tuples or arrays.
let cross_msgs_hash = cross_msgs.clone().abi_hash();
let cross_msgs_hash_0 = abi_hash(cross_msgs.clone());
let cross_msgs_hash_1 = abi_hash((cross_msgs.clone(),));
let cross_msgs_hash_2 = abi_hash(((cross_msgs.clone(),),));
let hash = system
.subnet
.cross_msgs_hash(&mut exec_state, cross_msgs)
.expect("failed to call cross_msgs_hash");
assert_eq!(cross_msgs_hash, hash, "impl OK");
assert_eq!(cross_msgs_hash_0, hash, "array OK");
assert_eq!(cross_msgs_hash_1, hash, "tuple of array OK");
assert_ne!(cross_msgs_hash_2, hash, "tuple of tuple of array NOT OK");
Ok(())
})
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/contract-test/tests/run_upgrades.rs | fendermint/testing/contract-test/tests/run_upgrades.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
mod staking;
use anyhow::{Context, Ok};
use async_trait::async_trait;
use ethers::types::U256;
use fendermint_contract_test::Tester;
use fendermint_rpc::response::decode_fevm_return_data;
use rand::rngs::StdRng;
use rand::SeedableRng;
use std::str::FromStr;
use ethers::contract::abigen;
use fvm_shared::address::Address;
use fvm_shared::bigint::Zero;
use fvm_shared::econ::TokenAmount;
use fvm_shared::version::NetworkVersion;
use tendermint_rpc::Client;
use fendermint_crypto::SecretKey;
use fendermint_vm_actor_interface::eam;
use fendermint_vm_actor_interface::eam::EthAddress;
use fendermint_vm_core::Timestamp;
use fendermint_vm_genesis::{Account, Actor, ActorMeta, Genesis, PermissionMode, SignerAddr};
use fendermint_vm_interpreter::fvm::store::memory::MemoryBlockstore;
use fendermint_vm_interpreter::fvm::upgrades::{Upgrade, UpgradeScheduler};
use fendermint_vm_interpreter::fvm::{bundle::contracts_path, FvmMessageInterpreter};
// returns a seeded secret key which is guaranteed to be the same every time
fn my_secret_key() -> SecretKey {
SecretKey::random(&mut StdRng::seed_from_u64(123))
}
// this test applies a series of upgrades to the state and checks that the upgrades are applied correctly
#[tokio::test]
async fn test_applying_upgrades() {
use bytes::Bytes;
use fendermint_rpc::message::{GasParams, MessageFactory};
use lazy_static::lazy_static;
lazy_static! {
/// Default gas params based on the testkit.
static ref GAS_PARAMS: GasParams = GasParams {
gas_limit: 10_000_000_000,
gas_fee_cap: TokenAmount::default(),
gas_premium: TokenAmount::default(),
};
static ref ADDR: Address = Address::new_secp256k1(&my_secret_key().public_key().serialize()).unwrap();
}
// this is the contract we want to deploy
const CONTRACT_HEX: &str = include_str!("../../contracts/SimpleCoin.bin");
// generate type safe bindings in rust to this contract
abigen!(SimpleCoin, "../contracts/SimpleCoin.abi");
// once we deploy this contract, this is the address we expect the contract to be deployed to
const CONTRACT_ADDRESS: &str = "f410fnz5jdky3zzcj6pejqkomkggw72pcuvkpihz2rwa";
// the amount we want to send to the contract
const SEND_BALANCE_AMOUNT: u64 = 1000;
const CHAIN_NAME: &str = "mychain";
let mut upgrade_scheduler = UpgradeScheduler::new();
upgrade_scheduler
.add(
Upgrade::new(CHAIN_NAME, 1, Some(1), |state| {
println!(
"[Upgrade at height {}] Deploy simple contract",
state.block_height()
);
// create a message for deploying the contract
let mut mf = MessageFactory::new(*ADDR, 1);
let message = mf
.fevm_create(
Bytes::from(
hex::decode(CONTRACT_HEX)
.context("error parsing contract")
.unwrap(),
),
Bytes::default(),
TokenAmount::default(),
GAS_PARAMS.clone(),
)
.unwrap();
// execute the message
let (res, _) = state.execute_implicit(message).unwrap();
assert!(
res.msg_receipt.exit_code.is_success(),
"{:?}",
res.failure_info
);
// parse the message receipt data and make sure the contract was deployed to the expected address
let res = fvm_ipld_encoding::from_slice::<eam::CreateReturn>(
&res.msg_receipt.return_data,
)
.unwrap();
assert_eq!(
res.delegated_address(),
Address::from_str(CONTRACT_ADDRESS).unwrap()
);
Ok(())
})
.unwrap(),
)
.unwrap();
upgrade_scheduler
.add(
Upgrade::new(CHAIN_NAME, 2, None, |state| {
println!(
"[Upgrade at height {}] Sends a balance",
state.block_height()
);
// build the calldata for the send_coin function
let (client, _mock) = ethers::providers::Provider::mocked();
let simple_coin = SimpleCoin::new(EthAddress::from_id(101), client.into());
let call = simple_coin.send_coin(
// the address we are sending the balance to (which is us in this case)
EthAddress::from(my_secret_key().public_key()).into(),
// the amount we are sending
U256::from(SEND_BALANCE_AMOUNT),
);
// create a message for sending the balance
let mut mf = MessageFactory::new(*ADDR, 1);
let message = mf
.fevm_invoke(
Address::from_str(CONTRACT_ADDRESS).unwrap(),
call.calldata().unwrap().0,
TokenAmount::default(),
GAS_PARAMS.clone(),
)
.unwrap();
// execute the message
let (res, _) = state.execute_implicit(message).unwrap();
assert!(
res.msg_receipt.exit_code.is_success(),
"{:?}",
res.failure_info
);
Ok(())
})
.unwrap(),
)
.unwrap();
upgrade_scheduler
.add(
Upgrade::new(CHAIN_NAME, 3, None, |state| {
println!(
"[Upgrade at height {}] Returns a balance",
state.block_height()
);
// build the calldata for the get_balance function
let (client, _mock) = ethers::providers::Provider::mocked();
let simple_coin = SimpleCoin::new(EthAddress::from_id(0), client.into());
let call =
simple_coin.get_balance(EthAddress::from(my_secret_key().public_key()).into());
let mut mf = MessageFactory::new(*ADDR, 1);
let message = mf
.fevm_invoke(
Address::from_str(CONTRACT_ADDRESS).unwrap(),
call.calldata().unwrap().0,
TokenAmount::default(),
GAS_PARAMS.clone(),
)
.unwrap();
// execute the message
let (res, _) = state.execute_implicit(message).unwrap();
assert!(
res.msg_receipt.exit_code.is_success(),
"{:?}",
res.failure_info
);
// parse the message receipt data and make sure the balance we sent in previous upgrade is returned
let bytes = decode_fevm_return_data(res.msg_receipt.return_data).unwrap();
let balance = U256::from_big_endian(&bytes);
assert_eq!(balance, U256::from(SEND_BALANCE_AMOUNT));
Ok(())
})
.unwrap(),
)
.unwrap();
let interpreter: FvmMessageInterpreter<MemoryBlockstore, _> = FvmMessageInterpreter::new(
NeverCallClient,
None,
contracts_path(),
1.05,
1.05,
false,
upgrade_scheduler,
);
let mut tester = Tester::new(interpreter, MemoryBlockstore::new());
let genesis = Genesis {
chain_name: CHAIN_NAME.to_string(),
timestamp: Timestamp(0),
network_version: NetworkVersion::V21,
base_fee: TokenAmount::zero(),
power_scale: 0,
validators: Vec::new(),
accounts: vec![Actor {
meta: ActorMeta::Account(Account {
owner: SignerAddr(*ADDR),
}),
balance: TokenAmount::from_atto(0),
}],
eam_permission_mode: PermissionMode::Unrestricted,
ipc: None,
};
tester.init(genesis).await.unwrap();
// check that the app version is 0
assert_eq!(tester.state_params().app_version, 0);
// iterate over all the upgrades
for block_height in 1..=3 {
tester.begin_block(block_height).await.unwrap();
tester.end_block(block_height).await.unwrap();
tester.commit().await.unwrap();
// check that the app_version was upgraded to 1
assert_eq!(tester.state_params().app_version, 1);
}
}
#[derive(Clone)]
struct NeverCallClient;
#[async_trait]
impl Client for NeverCallClient {
async fn perform<R>(&self, _request: R) -> Result<R::Output, tendermint_rpc::Error>
where
R: tendermint_rpc::SimpleRequest,
{
todo!()
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/contract-test/tests/staking/state.rs | fendermint/testing/contract-test/tests/staking/state.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::collections::{BTreeMap, VecDeque};
use arbitrary::Unstructured;
use fendermint_crypto::{PublicKey, SecretKey};
use fendermint_testing::arb::{ArbSubnetAddress, ArbSubnetID, ArbTokenAmount};
use fendermint_vm_actor_interface::eam::EthAddress;
use fendermint_vm_core::Timestamp;
use fendermint_vm_genesis::ipc::{GatewayParams, IpcParams};
use fendermint_vm_genesis::{
Account, Actor, ActorMeta, Collateral, Genesis, PermissionMode, SignerAddr, Validator,
ValidatorKey,
};
use fvm_shared::address::Address;
use fvm_shared::bigint::BigInt;
use fvm_shared::bigint::Integer;
use fvm_shared::{econ::TokenAmount, version::NetworkVersion};
use ipc_api::subnet_id::SubnetID;
use rand::rngs::StdRng;
use rand::SeedableRng;
use super::choose_amount;
#[derive(Debug, Clone)]
pub enum StakingOp {
Deposit(TokenAmount),
Withdraw(TokenAmount),
}
/// The staking message that goes towards the subnet to increase or decrease power.
#[derive(Debug, Clone)]
pub struct StakingUpdate {
pub configuration_number: u64,
pub addr: EthAddress,
pub op: StakingOp,
}
#[derive(Debug, Clone)]
pub struct StakingAccount {
pub public_key: PublicKey,
pub secret_key: SecretKey,
pub addr: EthAddress,
/// In this test the accounts should never gain more than their initial balance.
pub initial_balance: TokenAmount,
/// Balance after the effects of deposits/withdrawals.
pub current_balance: TokenAmount,
/// Currently it's not possible to specify the locking period, so all claims are immediately available.
pub claim_balance: TokenAmount,
}
#[derive(Debug, Clone, Default)]
pub struct StakingDistribution {
/// The highest configuration number applied.
pub configuration_number: u64,
/// Stake for each account that put down some collateral.
pub collaterals: BTreeMap<EthAddress, Collateral>,
/// Stakers ordered by collateral in descending order.
pub ranking: Vec<(Collateral, EthAddress)>,
/// Total collateral amount, computed because we check it often.
total_collateral: TokenAmount,
}
impl StakingDistribution {
/// Sum of all collaterals from active an inactive validators.
///
/// Do not compare this against signature weights because it contains inactive ones!
pub fn total_collateral(&self) -> TokenAmount {
self.total_collateral.clone()
}
pub fn total_validators(&self) -> usize {
self.collaterals.len()
}
/// Collateral of a validator.
pub fn collateral(&self, addr: &EthAddress) -> TokenAmount {
self.collaterals
.get(addr)
.map(|c| c.0.clone())
.unwrap_or_default()
}
/// Update the staking distribution. Return the actually applied operation, if any.
pub fn update(&mut self, update: StakingUpdate) -> Option<StakingOp> {
self.configuration_number = update.configuration_number;
let updated = match update.op {
StakingOp::Deposit(v) => {
let power = self.collaterals.entry(update.addr).or_default();
power.0 += v.clone();
Some((StakingOp::Deposit(v), power.clone()))
}
StakingOp::Withdraw(v) => {
match self.collaterals.entry(update.addr) {
std::collections::btree_map::Entry::Occupied(mut e) => {
let c = e.get().0.clone();
let v = v.min(c.clone());
let p = Collateral(c - v.clone());
if p.0.is_zero() {
e.remove();
} else {
e.insert(p.clone());
};
Some((StakingOp::Withdraw(v), p))
}
std::collections::btree_map::Entry::Vacant(_) => {
// Tried to withdraw more than put in.
None
}
}
}
};
match updated {
Some((op, power)) => {
match op {
StakingOp::Deposit(ref v) => self.total_collateral += v.clone(),
StakingOp::Withdraw(ref v) => self.total_collateral -= v.clone(),
}
self.adjust_rank(update.addr, power);
Some(op)
}
None => None,
}
}
fn adjust_rank(&mut self, addr: EthAddress, power: Collateral) {
if power.0.is_zero() {
self.ranking.retain(|(_, a)| *a != addr);
} else {
match self.ranking.iter_mut().find(|(_, a)| *a == addr) {
None => self.ranking.push((power, addr)),
Some(rank) => rank.0 = power,
}
// Sort by collateral descending. Use a stable sort so already sorted items are not affected.
// Hopefully this works like the sink/swim of the priority queues.
self.ranking
.sort_by(|a, b| b.0 .0.atto().cmp(a.0 .0.atto()));
}
}
}
/// Reference implementation for staking.
#[derive(Debug, Clone)]
pub struct StakingState {
/// Accounts with secret key of accounts in case the contract wants to validate signatures.
pub accounts: BTreeMap<EthAddress, StakingAccount>,
/// List of account addresses to help pick a random one.
pub addrs: Vec<EthAddress>,
/// The parent genesis should include a bunch of accounts we can use to join a subnet.
pub parent_genesis: Genesis,
/// The child genesis describes the initial validator set to join the subnet.
pub child_genesis: Genesis,
/// Current staking distribution, after the application of checkpoints.
pub current_configuration: StakingDistribution,
/// Next staking distribution, applied immediately without involving checkpoints.
pub next_configuration: StakingDistribution,
/// Flag indicating whether the minimum collateral has been met.
pub activated: bool,
/// Configuration number to be used in the next operation.
pub next_configuration_number: u64,
/// Unconfirmed staking operations.
pub pending_updates: VecDeque<StakingUpdate>,
/// The block height of the last checkpoint.
/// The first checkpoint we expect is `0 + bottom_up_checkpoint_period`.
pub last_checkpoint_height: u64,
}
impl StakingState {
pub fn new(
accounts: Vec<StakingAccount>,
parent_genesis: Genesis,
child_genesis: Genesis,
) -> Self {
let current_configuration = child_genesis
.validators
.iter()
.map(|v| {
let addr = EthAddress::new_secp256k1(&v.public_key.0.serialize()).unwrap();
(addr, v.power.clone())
})
.collect::<Vec<_>>();
let accounts = accounts
.into_iter()
.map(|a| (a.addr, a))
.collect::<BTreeMap<_, _>>();
let mut addrs: Vec<EthAddress> = accounts.keys().cloned().collect();
// It's important to sort the addresses so we always pick the same ones given the same seed.
addrs.sort();
let mut state = Self {
accounts,
addrs,
parent_genesis,
child_genesis,
current_configuration: StakingDistribution::default(),
next_configuration: StakingDistribution::default(),
activated: false,
next_configuration_number: 0,
pending_updates: VecDeque::new(),
last_checkpoint_height: 0,
};
// Joining one by one so the we test the activation logic
for (addr, c) in current_configuration {
state.join(addr, c.0);
}
assert!(
state.activated,
"subnet should be activated by the child genesis"
);
assert_eq!(state.next_configuration_number, 1);
state
}
/// Until the minimum collateral is reached, apply the changes immediately.
fn update<F: FnOnce(&mut Self) -> StakingUpdate>(&mut self, f: F) {
let update = f(self);
let configuration_number = update.configuration_number;
// Apply on the next configuration immediately.
let _ = self.next_configuration.update(update.clone());
// Defer for checkpointing.
self.pending_updates.push_back(update);
if !self.activated {
self.checkpoint(configuration_number, 0);
let total_collateral = self.current_configuration.total_collateral();
let total_validators = self.current_configuration.total_validators();
let min_collateral = self.min_collateral();
let min_validators = self.min_validators();
if total_collateral >= min_collateral && total_validators >= min_validators {
self.activated = true;
self.next_configuration_number = 1;
}
}
}
/// Check if checkpoints can be sent to the system.
pub fn can_checkpoint(&self) -> bool {
// This is a technical thing of how the the state does transitions, it's all done in the checkpoint method.
if !self.activated {
return true;
}
// Now the contract expects to be killed explicitly.
// if self.current_configuration.total_collateral() >= self.min_collateral()
// && self.current_configuration.total_validators() >= self.min_collateral()
// {
// return true;
// }
// This used to be the case when the collateral fell below a threshold,
// but now with explicit kill you can always checkpoint until then.
// return false;
if self.active_validators().next().is_none() {
return false;
}
true
}
/// Apply the changes up to the `next_configuration_number`.
pub fn checkpoint(&mut self, next_configuration_number: u64, height: u64) {
// TODO: The contract allows staking operations even after the deactivation of a subnet.
if self.can_checkpoint() {
loop {
if self.pending_updates.is_empty() {
break;
}
if self.pending_updates[0].configuration_number > next_configuration_number {
break;
}
let update = self.pending_updates.pop_front().expect("checked non-empty");
let addr = update.addr;
if let Some(StakingOp::Withdraw(value)) = self.current_configuration.update(update)
{
self.add_claim(&addr, value);
}
}
self.last_checkpoint_height = height;
}
}
/// Check whether an account has staked before. The stake does not have to be confirmed by a checkpoint.
pub fn has_staked(&self, addr: &EthAddress) -> bool {
self.total_deposit(addr).is_positive()
}
/// Check whether an account has a non-zero claim balance.
pub fn has_claim(&self, addr: &EthAddress) -> bool {
self.account(addr).claim_balance.is_positive()
}
/// Total amount staked by a validator.
pub fn total_deposit(&self, addr: &EthAddress) -> TokenAmount {
self.next_configuration.collateral(addr)
}
/// Maximum number of active validators.
pub fn max_validators(&self) -> u16 {
self.child_genesis
.ipc
.as_ref()
.map(|ipc| ipc.gateway.active_validators_limit)
.unwrap_or_default()
}
/// Minimum number of validators required to activate the subnet.
pub fn min_validators(&self) -> usize {
// For now just make it so that when all genesis validators join, the subnet is activated.
self.child_genesis.validators.len()
}
/// Minimum collateral required to activate the subnet.
pub fn min_collateral(&self) -> TokenAmount {
// For now just make it so that when all genesis validators join, the subnet is activated.
self.child_genesis
.validators
.iter()
.map(|v| v.power.0.clone())
.sum()
}
/// Top N validators ordered by collateral.
pub fn active_validators(&self) -> impl Iterator<Item = &(Collateral, EthAddress)> {
let n = self.max_validators() as usize;
self.current_configuration.ranking.iter().take(n)
}
/// Total collateral of the top N validators.
///
/// This is what we have to achieve quorum over.
pub fn active_collateral(&self) -> TokenAmount {
self.active_validators().map(|(c, _)| c.0.clone()).sum()
}
/// Get and increment the configuration number.
fn next_configuration_number(&mut self) -> u64 {
let n = self.next_configuration_number;
if self.activated {
self.next_configuration_number += 1;
}
n
}
/// Get an account. Panics if it doesn't exist.
pub fn account(&self, addr: &EthAddress) -> &StakingAccount {
self.accounts.get(addr).expect("accounts exist")
}
/// Get an account. Panics if it doesn't exist.
fn account_mut(&mut self, addr: &EthAddress) -> &mut StakingAccount {
self.accounts.get_mut(addr).expect("accounts exist")
}
/// Increase the claim balance.
fn add_claim(&mut self, addr: &EthAddress, value: TokenAmount) {
let a = self.account_mut(addr);
eprintln!(
"> ADD CLAIM addr={} value={} current={}",
addr, value, a.claim_balance
);
a.claim_balance += value;
}
/// Increase the current balance.
fn credit(&mut self, addr: &EthAddress, value: TokenAmount) {
let a = self.account_mut(addr);
eprintln!(
"> CREDIT addr={} value={} current={}",
addr, value, a.current_balance
);
a.current_balance += value;
}
/// Decrease the current balance.
fn debit(&mut self, addr: &EthAddress, value: TokenAmount) {
let a = self.account_mut(addr);
eprintln!(
"> DEBIT addr={} value={} current={}",
addr, value, a.current_balance
);
a.current_balance -= value;
}
/// Join with a validator. Repeated joins are allowed.
///
/// Unlike the contract, the model doesn't require metadata here.
pub fn join(&mut self, addr: EthAddress, value: TokenAmount) {
if value.is_zero() || self.has_staked(&addr) {
return;
}
self.update(|this| {
this.debit(&addr, value.clone());
StakingUpdate {
configuration_number: {
// Add an extra because joining in the model would cause a metadata update as well.
this.next_configuration_number();
this.next_configuration_number()
},
addr,
op: StakingOp::Deposit(value),
}
});
}
/// Enqueue a deposit. Must be one of the current validators to succeed, otherwise ignored.
pub fn stake(&mut self, addr: EthAddress, value: TokenAmount) {
// Simulate the check the contract does to ensure the metadata has been added before.
if value.is_zero() || !self.has_staked(&addr) {
return;
}
self.update(|this| {
this.debit(&addr, value.clone());
StakingUpdate {
configuration_number: this.next_configuration_number(),
addr,
op: StakingOp::Deposit(value),
}
});
}
/// Enqueue a withdrawal.
pub fn unstake(&mut self, addr: EthAddress, value: TokenAmount) {
if value.is_zero() || self.total_deposit(&addr) <= value {
return;
}
self.update(|this| StakingUpdate {
configuration_number: this.next_configuration_number(),
addr,
op: StakingOp::Withdraw(value),
});
}
/// Enqueue a total withdrawal.
pub fn leave(&mut self, addr: EthAddress) {
if !self.has_staked(&addr) {
return;
}
let value = self.total_deposit(&addr);
self.update(|this| StakingUpdate {
configuration_number: this.next_configuration_number(),
addr,
op: StakingOp::Withdraw(value),
});
}
/// Put released collateral back into the account's current balance.
pub fn claim(&mut self, addr: EthAddress) {
let a = self.account_mut(&addr);
if a.claim_balance.is_zero() {
return;
}
let c = a.claim_balance.clone();
a.claim_balance = TokenAmount::from_atto(0);
self.credit(&addr, c);
}
}
impl arbitrary::Arbitrary<'_> for StakingState {
fn arbitrary(u: &mut Unstructured<'_>) -> arbitrary::Result<Self> {
// Limit the maximum number of *child subnet* validators to what the hypothetical consensus algorithm can scale to.
let num_max_validators = 1 + usize::arbitrary(u)? % 10;
// Create a number of accounts; it's okay if not everyone can become validators, and also okay if all of them can.
let num_accounts = 1 + usize::arbitrary(u)? % 20;
// Choose the size for the initial *child subnet* validator set.
let num_validators = 1 + usize::arbitrary(u)? % num_accounts.min(num_max_validators);
// Limit the amount of balance anyone can have so that the sum total of all of them
// will still be lower than what we can send within Solidity as a value, which is U128.
let max_balance = BigInt::from(u128::MAX) / num_accounts;
// Create the desired number of accounts.
let mut rng = StdRng::seed_from_u64(u64::arbitrary(u)?);
let mut accounts = Vec::new();
for _ in 0..num_accounts {
let sk = SecretKey::random(&mut rng);
let pk = sk.public_key();
// All of them need to be ethereum accounts to interact with IPC.
let addr = EthAddress::new_secp256k1(&pk.serialize()).unwrap();
// Create with a non-zero balance so we can pick anyone to be a validator and deposit some collateral.
let initial_balance = ArbTokenAmount::arbitrary(u)?.0;
let initial_balance = initial_balance.atto();
let initial_balance = initial_balance.mod_floor(&max_balance);
let initial_balance =
TokenAmount::from_atto(initial_balance).max(TokenAmount::from_atto(1).clone());
// The current balance is the same as the initial balance even if the account becomes
// one of the validators on the child subnet, because for that they have to join the
// subnet and that's when their funds are going to be locked up.
let current_balance = initial_balance.clone();
accounts.push(StakingAccount {
public_key: pk,
secret_key: sk,
addr,
initial_balance,
current_balance,
claim_balance: TokenAmount::from_atto(0),
});
}
// Accounts on the parent subnet.
let parent_actors = accounts
.iter()
.map(|s| Actor {
meta: ActorMeta::Account(Account {
owner: SignerAddr(Address::from(s.addr)),
}),
balance: s.initial_balance.clone(),
})
.collect();
// Select one validator to be the parent validator, it doesn't matter who.
let parent_validators = vec![Validator {
public_key: ValidatorKey(accounts[0].public_key),
// All the power in the parent subnet belongs to this single validator.
// We are only interested in the staking of the *child subnet*.
power: Collateral(TokenAmount::from_atto(1)),
}];
// Select some of the accounts to be the initial *child subnet* validators.
let current_configuration = accounts
.iter()
.take(num_validators)
.map(|a| {
// Choose an initial stake committed to the child subnet.
let initial_stake = choose_amount(u, &a.initial_balance)?;
// Make sure it's not zero.
let initial_stake = initial_stake.max(TokenAmount::from_atto(1));
Ok(Validator {
public_key: ValidatorKey(a.public_key),
power: Collateral(initial_stake),
})
})
.collect::<Result<Vec<_>, _>>()?;
// Currently there is a feature flag in the contracts called `FEATURE_SUBNET_DEPTH`
// that restricts the creation of subnets to be L2 only, so the creator has
// to live under the root directly.
let subnet_id = ArbSubnetID::arbitrary(u)?.0;
let subnet_id = SubnetID::new_root(subnet_id.root_id());
// IPC of the parent subnet itself - most are not going to be used.
let parent_ipc = IpcParams {
gateway: GatewayParams {
subnet_id,
bottom_up_check_period: 1 + u.choose_index(100)? as u64,
majority_percentage: 51 + u8::arbitrary(u)? % 50,
active_validators_limit: 1 + u.choose_index(100)? as u16,
},
};
// We cannot actually use this value because the real ID will only be
// apparent once the subnet is deployed.
let child_subnet_id = SubnetID::new_from_parent(
&parent_ipc.gateway.subnet_id,
ArbSubnetAddress::arbitrary(u)?.0,
);
let parent_genesis = Genesis {
chain_name: String::arbitrary(u)?,
timestamp: Timestamp(u64::arbitrary(u)?),
network_version: NetworkVersion::V21,
base_fee: ArbTokenAmount::arbitrary(u)?.0,
power_scale: *u.choose(&[0, 3]).expect("non empty"),
validators: parent_validators,
accounts: parent_actors,
eam_permission_mode: PermissionMode::Unrestricted,
ipc: Some(parent_ipc),
};
let child_ipc = IpcParams {
gateway: GatewayParams {
subnet_id: child_subnet_id,
bottom_up_check_period: 1 + u.choose_index(100)? as u64,
majority_percentage: 51 + u8::arbitrary(u)? % 50,
active_validators_limit: num_max_validators as u16,
},
};
let child_genesis = Genesis {
chain_name: String::arbitrary(u)?,
timestamp: Timestamp(u64::arbitrary(u)?),
network_version: NetworkVersion::V21,
base_fee: ArbTokenAmount::arbitrary(u)?.0,
power_scale: *u.choose(&[0, 3]).expect("non empty"),
validators: current_configuration,
accounts: Vec::new(),
eam_permission_mode: PermissionMode::Unrestricted,
ipc: Some(child_ipc),
};
Ok(StakingState::new(accounts, parent_genesis, child_genesis))
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/contract-test/tests/staking/mod.rs | fendermint/testing/contract-test/tests/staking/mod.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use arbitrary::{Arbitrary, Unstructured};
use fendermint_testing::arb::ArbTokenAmount;
use fvm_shared::{bigint::Integer, econ::TokenAmount};
pub mod machine;
pub mod state;
fn choose_amount(u: &mut Unstructured<'_>, max: &TokenAmount) -> arbitrary::Result<TokenAmount> {
if max.is_zero() {
Ok(TokenAmount::from_atto(0))
} else {
let tokens = ArbTokenAmount::arbitrary(u)?.0;
Ok(TokenAmount::from_atto(tokens.atto().mod_floor(max.atto())))
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/contract-test/tests/staking/machine.rs | fendermint/testing/contract-test/tests/staking/machine.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::{cell::RefCell, collections::HashSet, sync::Arc};
use arbitrary::{Arbitrary, Unstructured};
use fendermint_contract_test::ipc::{registry::RegistryCaller, subnet::SubnetCaller};
use fendermint_crypto::{PublicKey, SecretKey};
use fendermint_testing::smt::StateMachine;
use fendermint_vm_actor_interface::{
eam::EthAddress,
ipc::{subnet::SubnetActorErrors, subnet_id_to_eth, AbiHash},
};
use fendermint_vm_genesis::{Collateral, Validator, ValidatorKey};
use fendermint_vm_interpreter::fvm::{
state::{fevm::ContractResult, ipc::GatewayCaller, FvmExecState},
store::memory::MemoryBlockstore,
};
use fendermint_vm_message::{
conv::from_fvm::{self, to_eth_tokens},
signed::sign_secp256k1,
};
use fvm::engine::MultiEngine;
use fvm_ipld_blockstore::Blockstore;
use fvm_shared::bigint::Integer;
use fvm_shared::econ::TokenAmount;
use fvm_shared::{address::Address, bigint::BigInt};
use ipc_actors_abis::subnet_actor_checkpointing_facet as checkpointer;
use ipc_api::subnet_id::SubnetID;
use super::{
choose_amount,
state::{StakingAccount, StakingState},
};
use fendermint_contract_test::ipc::registry::SubnetConstructorParams;
/// System Under Test for staking.
pub struct StakingSystem {
/// FVM state initialized with the parent genesis, and a subnet created for the child.
pub exec_state: RefCell<FvmExecState<MemoryBlockstore>>,
_gateway: GatewayCaller<MemoryBlockstore>,
_registry: RegistryCaller<MemoryBlockstore>,
pub subnet: SubnetCaller<MemoryBlockstore>,
pub subnet_id: SubnetID,
}
#[derive(Debug)]
pub enum StakingCommand {
/// Bottom-up checkpoint; confirms all staking operations up to the configuration number.
Checkpoint {
block_height: u64,
block_hash: [u8; 32],
next_configuration_number: u64,
signatories: Vec<(EthAddress, SecretKey)>,
},
/// Join by as a new validator.
Join(EthAddress, TokenAmount, PublicKey),
/// Increase the collateral of an already existing validator.
Stake(EthAddress, TokenAmount),
/// Decrease the collateral of a validator.
Unstake(EthAddress, TokenAmount),
/// Remove all collateral at once.
Leave(EthAddress),
/// Claim released collateral.
Claim(EthAddress),
}
#[derive(Default)]
pub struct StakingMachine {
multi_engine: Arc<MultiEngine>,
}
impl StateMachine for StakingMachine {
type System = StakingSystem;
type State = StakingState;
type Command = StakingCommand;
type Result = ContractResult<(), SubnetActorErrors>;
fn gen_state(&self, u: &mut Unstructured) -> arbitrary::Result<Self::State> {
eprintln!("\nNEW STATE");
StakingState::arbitrary(u)
}
fn new_system(&self, state: &Self::State) -> Self::System {
let rt = tokio::runtime::Runtime::new().expect("create tokio runtime for init");
let (mut exec_state, _) = rt
.block_on(fendermint_contract_test::init_exec_state(
self.multi_engine.clone(),
state.parent_genesis.clone(),
))
.expect("failed to init parent");
let gateway = GatewayCaller::default();
let registry = RegistryCaller::default();
// Deploy a new subnet based on `state.child_genesis`
let parent_ipc = state.parent_genesis.ipc.as_ref().unwrap();
let child_ipc = state.child_genesis.ipc.as_ref().unwrap();
let (root, route) =
subnet_id_to_eth(&parent_ipc.gateway.subnet_id).expect("subnet ID is valid");
// TODO: Need to add field to specify release queue lock time.
let params = SubnetConstructorParams {
parent_id: ipc_actors_abis::register_subnet_facet::SubnetID { root, route },
ipc_gateway_addr: gateway.addr().into(),
consensus: 0, // TODO: What are the options?
bottom_up_check_period: child_ipc.gateway.bottom_up_check_period,
majority_percentage: child_ipc.gateway.majority_percentage,
active_validators_limit: child_ipc.gateway.active_validators_limit,
power_scale: state.child_genesis.power_scale,
min_activation_collateral: to_eth_tokens(&state.min_collateral()).unwrap(),
min_validators: state.min_validators() as u64,
permission_mode: 0, // collateral based
supply_source: ipc_actors_abis::register_subnet_facet::SupplySource {
kind: 0, // native token
token_address: ethers::types::Address::zero(),
},
};
eprintln!("\n> PARENT IPC: {parent_ipc:?}");
eprintln!("\n> CHILD IPC: {child_ipc:?}");
eprintln!("\n> CREATING SUBNET: {params:?}");
let subnet_addr = registry
.new_subnet(&mut exec_state, params)
.expect("failed to create subnet");
let subnet_id =
SubnetID::new_from_parent(&parent_ipc.gateway.subnet_id, subnet_addr.into());
let subnet = SubnetCaller::new(subnet_addr);
// Make all the validators join the subnet by putting down collateral according to their power.
for v in state.child_genesis.validators.iter() {
let _addr = EthAddress::from(v.public_key.0);
eprintln!("\n> JOINING SUBNET: addr={_addr} deposit={}", v.power.0);
subnet
.join(&mut exec_state, v)
.expect("failed to join subnet");
}
let bootstrapped = subnet
.bootstrapped(&mut exec_state)
.expect("failed to call bootstrapped");
assert!(
bootstrapped,
"the genesis joiners should bootstrap the subnet"
);
let (next_configuration_number, _) = subnet
.get_configuration_numbers(&mut exec_state)
.expect("failed to call config numbers");
assert_eq!(
next_configuration_number, 1,
"after initial joiners configuration should be 1"
);
eprintln!("BOOTSTRAPPED");
StakingSystem {
exec_state: RefCell::new(exec_state),
_gateway: gateway,
_registry: registry,
subnet,
subnet_id,
}
}
fn gen_command(
&self,
u: &mut Unstructured,
state: &Self::State,
) -> arbitrary::Result<Self::Command> {
let cmd = u
.choose(&["checkpoint", "join", "stake", "leave", "claim", "unstake"])
.unwrap();
let cmd = match cmd {
&"checkpoint" => {
let next_configuration_number = match state.pending_updates.len() {
0 => 0, // No change
n => {
let idx = u.choose_index(n).expect("non-zero");
state.pending_updates[idx].configuration_number
}
};
let ipc_params = state.child_genesis.ipc.clone().unwrap();
let block_height =
state.last_checkpoint_height + ipc_params.gateway.bottom_up_check_period;
let block_hash = <[u8; 32]>::arbitrary(u)?;
let majority_percentage = ipc_params.gateway.majority_percentage;
let collateral = state.active_collateral();
let collateral = collateral.atto();
let quorum_threshold =
(collateral * majority_percentage).div_ceil(&BigInt::from(100));
let mut signatories = Vec::new();
let mut sign_power = BigInt::from(0);
for (collateral, addr) in state.active_validators() {
let a = state.account(addr);
signatories.push((*addr, a.secret_key.clone()));
sign_power += collateral.0.atto();
if sign_power >= quorum_threshold {
break;
}
}
// Technically we cannot build a proper checkpoint here because we don't know the subnet address.
StakingCommand::Checkpoint {
block_height,
block_hash,
next_configuration_number,
signatories,
}
}
&"join" => {
// Pick any account, doesn't have to be new; the system should handle repeated joins.
let a = choose_account(u, state)?;
let b = choose_amount(u, &a.current_balance)?;
StakingCommand::Join(a.addr, b, a.public_key)
}
&"leave" => {
// Pick any account, doesn't have to be bonded; the system should ignore non-validators and not pay out twice.
let a = choose_account(u, state)?;
StakingCommand::Leave(a.addr)
}
&"stake" => {
let a = choose_account(u, state)?;
// Limit ourselves to the outstanding balance - the user would not be able to send more value to the contract.
let b = choose_amount(u, &a.current_balance)?;
StakingCommand::Stake(a.addr, b)
}
&"unstake" => {
let a = choose_account(u, state)?;
// We can try sending requests to unbond arbitrarily large amounts of collateral - the system should catch any attempt to steal.
// Only limiting it to be under the initial balance so that it's comparable to what the deposits could have been.
let b = choose_amount(u, &a.initial_balance)?;
StakingCommand::Unstake(a.addr, b)
}
&"claim" => {
// Pick any account, even if has nothing to claim; the system should reject those.
let a = choose_account(u, state)?;
StakingCommand::Claim(a.addr)
}
other => unimplemented!("unknown command: {other}"),
};
Ok(cmd)
}
fn run_command(&self, system: &mut Self::System, cmd: &Self::Command) -> Self::Result {
let mut exec_state = system.exec_state.borrow_mut();
match cmd {
StakingCommand::Checkpoint {
block_height,
block_hash,
next_configuration_number,
signatories,
} => {
eprintln!(
"\n> CMD: CHECKPOINT h={} cn={}",
block_height, next_configuration_number
);
// Build the checkpoint payload.
let (root, route) = subnet_id_to_eth(&system.subnet_id).unwrap();
let checkpoint = checkpointer::BottomUpCheckpoint {
subnet_id: checkpointer::SubnetID { root, route },
block_height: ethers::types::U256::from(*block_height),
block_hash: *block_hash,
next_configuration_number: *next_configuration_number,
msgs: Vec::new(),
};
let checkpoint_hash = checkpoint.clone().abi_hash();
let mut signatures = Vec::new();
for (addr, secret_key) in signatories {
let signature = sign_secp256k1(secret_key, &checkpoint_hash);
let signature = from_fvm::to_eth_signature(&signature, false).unwrap();
signatures.push((*addr, signature.into()));
}
system
.subnet
.try_submit_checkpoint(
&mut exec_state,
checkpoint.clone(),
Vec::new(),
signatures.clone(),
)
.expect("failed to call: submit_checkpoint")
}
StakingCommand::Join(_addr, value, public_key) => {
eprintln!("\n> CMD: JOIN addr={_addr} value={value}");
let validator = Validator {
public_key: ValidatorKey(*public_key),
power: Collateral(value.clone()),
};
system
.subnet
.try_join(&mut exec_state, &validator)
.expect("failed to call: join")
}
StakingCommand::Stake(addr, value) => {
eprintln!("\n> CMD: STAKE addr={addr} value={value}");
system
.subnet
.try_stake(&mut exec_state, addr, value)
.expect("failed to call: stake")
}
StakingCommand::Unstake(addr, value) => {
eprintln!("\n> CMD: UNSTAKE addr={addr} value={value}");
system
.subnet
.try_unstake(&mut exec_state, addr, value)
.expect("failed to call: unstake")
}
StakingCommand::Leave(addr) => {
eprintln!("\n> CMD: LEAVE addr={addr}");
system
.subnet
.try_leave(&mut exec_state, addr)
.expect("failed to call: leave")
}
StakingCommand::Claim(addr) => {
eprintln!("\n> CMD: CLAIM addr={addr}");
system
.subnet
.try_claim(&mut exec_state, addr)
.expect("failed to call: claim")
}
}
}
fn check_result(&self, cmd: &Self::Command, pre_state: &Self::State, result: Self::Result) {
let info = match result {
Err(ref e) => format!("error: {:?}", e.error),
Ok(()) => "ok".to_owned(),
};
eprintln!("> RESULT: {info}");
match cmd {
StakingCommand::Checkpoint { .. } => {
if !pre_state.can_checkpoint() {
result.expect_err("the subnet should be inactive");
} else {
result.expect("checkpoint submission should succeed");
}
}
StakingCommand::Join(eth_addr, value, _) => {
if value.is_zero() {
result.expect_err("should not join with 0 value");
} else if pre_state.has_staked(eth_addr) {
result.expect_err("should not join again");
} else {
result.expect("join should succeed");
}
}
StakingCommand::Stake(addr, value) => {
if value.is_zero() {
result.expect_err("should not stake with 0 value");
} else if !pre_state.has_staked(addr) {
result.expect_err("must call join before stake");
} else {
result.expect("stake should succeed");
}
}
StakingCommand::Unstake(addr, value) => {
if value.is_zero() {
result.expect_err("cannot unstake 0");
} else if pre_state.total_deposit(addr) <= *value {
result.expect_err("tried to unstake too much");
} else {
result.expect("unstake should succeed")
}
}
StakingCommand::Leave(addr) => {
if !pre_state.has_staked(addr) {
result.expect_err("must call join before leave");
} else {
result.expect("leave should succeed");
}
}
StakingCommand::Claim(addr) => {
if !pre_state.has_claim(addr) {
result.expect_err("zero claims should fail");
} else {
result.expect("claim should succeed");
}
}
}
}
fn next_state(&self, cmd: &Self::Command, mut state: Self::State) -> Self::State {
match cmd {
StakingCommand::Checkpoint {
next_configuration_number,
block_height,
..
} => state.checkpoint(*next_configuration_number, *block_height),
StakingCommand::Join(addr, value, _) => state.join(*addr, value.clone()),
StakingCommand::Stake(addr, value) => state.stake(*addr, value.clone()),
StakingCommand::Unstake(addr, value) => state.unstake(*addr, value.clone()),
StakingCommand::Leave(addr) => state.leave(*addr),
StakingCommand::Claim(addr) => state.claim(*addr),
}
state
}
fn check_system(
&self,
cmd: &Self::Command,
post_state: &Self::State,
post_system: &Self::System,
) -> bool {
// Queries need mutable reference too.
let mut exec_state = post_system.exec_state.borrow_mut();
// Check configuration numbers
let (next_cn, start_cn) = post_system
.subnet
.get_configuration_numbers(&mut exec_state)
.expect("failed to get config numbers");
assert_eq!(
next_cn, post_state.next_configuration_number,
"next configuration number mismatch"
);
assert_eq!(
start_cn,
post_state.current_configuration.configuration_number + 1,
"start configuration number mismatch"
);
match cmd {
StakingCommand::Checkpoint { .. } => {
// Sanity check the reference state while we have no contract to compare with.
assert!(
post_state
.accounts
.iter()
.all(|(_, a)| a.current_balance <= a.initial_balance),
"no account goes over initial balance"
);
assert!(
post_state
.current_configuration
.collaterals
.iter()
.all(|(_, p)| !p.0.is_zero()),
"all child validators have non-zero collateral"
);
// Collect all account info so we can see the ranking, check if there are edge cases.
let mut obs = Vec::new();
let active_validators = post_state.active_validators().collect::<Vec<_>>();
let active_addresses = active_validators
.iter()
.map(|(_, addr)| addr)
.collect::<HashSet<_>>();
let min_active_collateral = active_validators
.last()
.map(|(c, _)| c.0.clone())
.unwrap_or_default();
for (addr, a) in post_state.accounts.iter() {
// Check balances
let sys_balance = get_actor_balance(&mut exec_state, *addr);
// Check that we agree on who the active validators are.
let sys_collateral = post_system
.subnet
.confirmed_collateral(&mut exec_state, addr)
.expect("failed to get confirmed collateral");
let sys_active = post_system
.subnet
.is_active(&mut exec_state, addr)
.expect("failed to call is_waiting");
let sys_waiting = post_system
.subnet
.is_waiting(&mut exec_state, addr)
.expect("failed to call is_active");
let sys = (sys_balance, sys_collateral, sys_active, sys_waiting);
let st_balance = a.current_balance.clone();
let st_collateral = post_state.current_configuration.collateral(addr);
let st_active = active_addresses.contains(addr);
let st_waiting = !st_active && st_collateral.is_positive();
let st = (st_balance, st_collateral, st_active, st_waiting);
obs.push((addr, sys, st))
}
let mut sys_active_cnt = 0;
let mut st_active_cnt = 0;
for (addr, (_, sys_coll, sys_active, _), (_, _, st_active, _)) in obs.iter() {
if *sys_active || *st_active {
eprintln!(
"> CONFIRMED addr={:?} collateral={} active=({} vs {})",
addr, sys_coll, sys_active, st_active
);
}
if *sys_active {
sys_active_cnt += 1;
}
if *st_active {
st_active_cnt += 1;
}
}
assert!(
sys_active_cnt <= post_state.max_validators(),
"system over max active"
);
assert!(
st_active_cnt <= post_state.max_validators(),
"state over max active"
);
for (
addr,
(sys_bal, sys_coll, sys_active, sys_waiting),
(st_bal, st_coll, st_active, st_waiting),
) in obs.iter()
{
assert_eq!(sys_bal, st_bal, "balance mismatch for {addr}");
assert_eq!(sys_coll, st_coll, "collateral mismatch for {addr}");
if sys_active != st_active && *sys_coll == min_active_collateral {
let cnt = obs
.iter()
.filter(|(_, (_, c, _, _), _)| *c == min_active_collateral)
.count();
if cnt > 1 {
eprintln!(">>> There is a disagreement at the minimum collateral.");
eprintln!(">>> Quitting now because the next checkpoint might get invalid signature");
return false;
}
}
assert_eq!(sys_active, st_active, "active mismatch for {addr}");
assert_eq!(sys_waiting, st_waiting, "waiting mismatch for {addr}");
}
}
StakingCommand::Stake(addr, _)
| StakingCommand::Unstake(addr, _)
| StakingCommand::Join(addr, _, _)
| StakingCommand::Leave(addr)
| StakingCommand::Claim(addr) => {
let a = post_state.accounts.get(addr).unwrap();
assert!(a.current_balance <= a.initial_balance);
// Check collaterals
let total = post_system
.subnet
.total_collateral(&mut exec_state, addr)
.expect("failed to get total collateral");
let confirmed = post_system
.subnet
.confirmed_collateral(&mut exec_state, addr)
.expect("failed to get confirmed collateral");
assert_eq!(
total,
post_state.next_configuration.collateral(addr),
"total collateral mismatch"
);
assert_eq!(
confirmed,
post_state.current_configuration.collateral(addr),
"confirmed collateral mismatch"
);
// Check balance
let balance = get_actor_balance(&mut exec_state, *addr);
assert_eq!(balance, a.current_balance, "current balance mismatch");
}
}
eprintln!(
"> LAST UPDATE CONFIG NUMBER: {}",
post_state.next_configuration.configuration_number
);
true
}
}
fn choose_account<'a>(
u: &mut Unstructured<'_>,
state: &'a StakingState,
) -> arbitrary::Result<&'a StakingAccount> {
let a = u.choose(&state.addrs).expect("accounts not empty");
let a = state.accounts.get(a).expect("account exists");
Ok(a)
}
fn get_actor_balance<DB: Blockstore + Clone>(
exec_state: &mut FvmExecState<DB>,
addr: EthAddress,
) -> TokenAmount {
let actor_id = exec_state
.state_tree_mut()
.lookup_id(&Address::from(addr))
.expect("failed to get actor ID")
.expect("actor exists");
let actor = exec_state
.state_tree_mut()
.get_actor(actor_id)
.expect("failed to get actor")
.expect("actor exists");
actor.balance
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/graph-test/src/lib.rs | fendermint/testing/graph-test/src/lib.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! Run tests against a local Fendermint docker container node:
//! 0. The default `graph-fendermint`, `graph-cometbft` and `graph-ethapi` triplet
//! 1. The Graph docker-compose setup connecting to Fendermint through the Ethereum API
//!
//! Note that CometBFT state sync requires 2 RPC servers, which is why we need 3 nodes.
//!
//! See
//! * <https://github.com/graphprotocol/graph-node/blob/master/docker/README.md>
//! * <https://docs.hedera.com/hedera/tutorials/smart-contracts/deploy-a-subgraph-using-the-graph-and-json-rpc>
//! * <https://github.com/hashgraph/hedera-subgraph-example>
//! * <https://github.com/hashgraph/hedera-hardhat-example-project>
//!
//! Examples:
//!
//! 1. All in one go
//! ```text
//! cd fendermint/testing/graph-test
//! cargo make
//! ```
//!
//! 2. One by one
//! ```text
//! cd fendermint/testing/graph-test
//! cargo make setup
//! cargo make test
//! cargo make teardown
//! ```
//!
//! Make sure you installed cargo-make by running `cargo install cargo-make` first.
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/smoke-test/src/lib.rs | fendermint/testing/smoke-test/src/lib.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! Run some tests against a pair of Fendermint and Tendermint docker containers running locally.
//!
//! Example:
//!
//! ```text
//! cd fendermint/testing/smoke-test
//! cargo make
//! ```
//!
//! Make sure you installed cargo-make by running `cargo install cargo-make` first.
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/materializer/src/lib.rs | fendermint/testing/materializer/src/lib.rs | use ethers::providers::{Http, Provider};
// Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use multihash::MultihashDigest;
use serde::{Deserialize, Serialize};
use std::fmt::Formatter;
use std::{
fmt::{Debug, Display},
path::{Path, PathBuf},
};
#[allow(unused_variables, dead_code)] // TODO: Remove once implemented
pub mod docker;
pub mod logging;
pub mod manifest;
pub mod materializer;
pub mod materials;
pub mod testnet;
pub mod validation;
#[cfg(feature = "arb")]
mod arb;
/// An ID identifying a resource within its parent.
#[derive(Clone, Serialize, PartialEq, Eq, PartialOrd, Ord)]
pub struct ResourceId(String);
/// Implementing a deserializer which has the logic to sanitise URL-unfriendly characters.
impl<'de> Deserialize<'de> for ResourceId {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
String::deserialize(deserializer).map(Self::from)
}
}
impl Display for ResourceId {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "'{}'", self.0)
}
}
impl Debug for ResourceId {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self)
}
}
impl From<&str> for ResourceId {
fn from(value: &str) -> Self {
Self::from(value.to_string())
}
}
/// Replace the path separator with a different character when reading strings.
impl From<String> for ResourceId {
fn from(value: String) -> Self {
Self(value.replace('/', "_"))
}
}
impl From<&ResourceId> for ResourceId {
fn from(value: &Self) -> Self {
value.clone()
}
}
impl AsRef<str> for ResourceId {
fn as_ref(&self) -> &str {
&self.0
}
}
/// A human readable name for a testnet.
pub type TestnetId = ResourceId;
/// A human readable name for an account.
pub type AccountId = ResourceId;
/// A human readable name for a subnet.
pub type SubnetId = ResourceId;
/// A human readable name for a node.
pub type NodeId = ResourceId;
/// A human readable name for a relayer.
pub type RelayerId = ResourceId;
/// The name of a resource consists of its ID and all the IDs of its ancestors
/// concatenated into a URL-like path.
///
/// See <https://cloud.google.com/apis/design/resource_names>
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
pub struct ResourceName(PathBuf);
impl ResourceName {
fn join(&self, s: &str) -> Self {
Self(self.0.join(s))
}
fn join_id(&self, id: &ResourceId) -> Self {
self.join(&id.0)
}
pub fn is_prefix_of(&self, other: &ResourceName) -> bool {
other.0.starts_with(&self.0)
}
pub fn path_string(&self) -> String {
self.0.to_string_lossy().to_string()
}
pub fn path(&self) -> &Path {
self.0.as_path()
}
pub fn id(&self) -> ResourceId {
ResourceId(
self.0
.file_name()
.expect("resource name has file segment")
.to_string_lossy()
.to_string(),
)
}
}
impl From<&str> for ResourceName {
fn from(value: &str) -> Self {
Self(PathBuf::from(value))
}
}
impl Display for ResourceName {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "'{}'", self.0.to_string_lossy())
}
}
impl Debug for ResourceName {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self)
}
}
pub trait TestnetResource {
fn testnet(&self) -> TestnetName;
}
macro_rules! resource_name {
($name:ident) => {
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
pub struct $name(ResourceName);
impl $name {
pub fn path(&self) -> &Path {
&self.0 .0
}
pub fn path_string(&self) -> String {
self.0.path_string()
}
}
impl AsRef<ResourceName> for $name {
fn as_ref(&self) -> &ResourceName {
&self.0
}
}
impl AsRef<Path> for $name {
fn as_ref(&self) -> &Path {
self.path()
}
}
impl Display for $name {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}({})",
stringify!($name).trim_end_matches("Name"),
self.0
)
}
}
impl Debug for $name {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Display::fmt(&self, f)
}
}
};
($name:ident: Testnet) => {
resource_name!($name);
impl TestnetResource for $name {
fn testnet(&self) -> TestnetName {
TestnetName::from_prefix(&self.0)
}
}
};
}
resource_name!(TestnetName);
resource_name!(AccountName: Testnet);
resource_name!(SubnetName: Testnet);
resource_name!(NodeName: Testnet);
resource_name!(RelayerName: Testnet);
resource_name!(CliName: Testnet);
impl TestnetName {
pub fn new<T: Into<TestnetId>>(id: T) -> Self {
// Not including a leadign slash (ie. "/testnets") so that we can join with directory paths.
Self(ResourceName::from("testnets").join_id(&id.into()))
}
pub fn account<T: Into<AccountId>>(&self, id: T) -> AccountName {
AccountName(self.0.join("accounts").join_id(&id.into()))
}
pub fn root(&self) -> SubnetName {
SubnetName(self.0.join("root"))
}
/// Check that the testnet contains a certain resource name, ie. it's a prefix of it.
pub fn contains<T: AsRef<ResourceName>>(&self, name: T) -> bool {
self.0.is_prefix_of(name.as_ref())
}
/// Assuming correct contstruction of resources, get the testnet prefix.
fn from_prefix(name: &ResourceName) -> Self {
name.0
.components()
.nth(1)
.map(|c| c.as_os_str().to_string_lossy().to_string())
.map(Self::new)
.unwrap_or_else(|| Self(name.clone()))
}
}
impl SubnetName {
pub fn subnet<T: Into<SubnetId>>(&self, id: T) -> Self {
Self(self.0.join("subnets").join_id(&id.into()))
}
pub fn node<T: Into<NodeId>>(&self, id: T) -> NodeName {
NodeName(self.0.join("nodes").join_id(&id.into()))
}
pub fn relayer<T: Into<RelayerId>>(&self, id: T) -> RelayerName {
RelayerName(self.0.join("relayers").join_id(&id.into()))
}
pub fn cli(&self, id: &str) -> CliName {
CliName(self.0.join("cli").join(id))
}
/// Check if this is the root subnet, ie. it ends with `root` and it parent is a `testnet`
pub fn is_root(&self) -> bool {
self.path().ends_with("root")
&& self
.path()
.parent()
.and_then(|p| p.parent())
.filter(|p| p.ends_with("testnets"))
.is_some()
}
pub fn parent(&self) -> Option<SubnetName> {
if self.is_root() {
None
} else {
let path = self
.path()
.parent()
.and_then(|p| p.parent())
.expect("invalid subnet path");
Some(Self(ResourceName(path.into())))
}
}
/// All the subnet names from the root to the parent of the subnet,
/// excluding the subnet itself.
pub fn ancestors(&self) -> Vec<SubnetName> {
let mut ss = Vec::new();
let mut p = self.parent();
while let Some(s) = p {
p = s.parent();
ss.push(s);
}
ss.reverse();
ss
}
/// parent->child hop pairs from the root to the current subnet.
pub fn ancestor_hops(&self, include_self: bool) -> Vec<(SubnetName, SubnetName)> {
let ss0 = self.ancestors();
let ss1 = ss0
.iter()
.skip(1)
.chain(std::iter::once(self))
.cloned()
.collect::<Vec<_>>();
let mut hops = ss0.into_iter().zip(ss1).collect::<Vec<_>>();
if !include_self {
hops.pop();
}
hops
}
/// Check that the subnet contains a certain resource name, ie. it's a prefix of it.
pub fn contains<T: AsRef<ResourceName>>(&self, name: T) -> bool {
self.0.is_prefix_of(name.as_ref())
}
}
/// Unique identifier for certain things that we want to keep unique.
#[derive(Clone, Debug, Hash, PartialEq, PartialOrd, Eq, Ord)]
pub struct ResourceHash([u8; 32]);
impl ResourceHash {
/// Digest some general unique but unwieldy label for a more compact form.
pub fn digest<T: AsRef<[u8]>>(value: T) -> Self {
let d = multihash::Code::Blake2b256.digest(value.as_ref());
let mut bz = [0u8; 32];
bz.copy_from_slice(d.digest());
Self(bz)
}
}
impl Display for ResourceHash {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", hex::encode(self.0))
}
}
pub trait HasEthApi {
/// URL of the HTTP endpoint *on the host*, if it's enabled.
fn ethapi_http_endpoint(&self) -> Option<url::Url>;
fn ethapi_http_provider(&self) -> anyhow::Result<Option<Provider<Http>>> {
match self.ethapi_http_endpoint() {
Some(url) => Ok(Some(Provider::<Http>::try_from(url.to_string())?)),
None => Ok(None),
}
}
}
pub trait HasCometBftApi {
/// URL of the HTTP endpoint *on the host*.
fn cometbft_http_endpoint(&self) -> tendermint_rpc::Url;
fn cometbft_http_provider(&self) -> anyhow::Result<tendermint_rpc::HttpClient> {
Ok(tendermint_rpc::HttpClient::new(
self.cometbft_http_endpoint(),
)?)
}
}
#[cfg(test)]
mod tests {
use std::path::PathBuf;
use crate::{TestnetName, TestnetResource};
#[test]
fn test_path_join() {
let root = PathBuf::from("/tmp/foo");
let net = TestnetName::new("bar");
let acc = net.account("spam");
let dir = root.join(acc);
assert_eq!(dir, PathBuf::from("/tmp/foo/testnets/bar/accounts/spam"));
}
#[test]
fn test_subnet_parent() {
let tn = TestnetName::new("example");
let rn = tn.root();
let sn = rn.subnet("foo");
assert_eq!(rn.parent(), None, "root shouldn't have a parent");
assert_eq!(sn.parent(), Some(rn), "parent should be the root");
assert_eq!(sn.testnet(), tn, "testnet is the prefix");
}
#[test]
fn test_subnet_ancestors() {
let tn = TestnetName::new("example");
let sn = tn.root().subnet("foo").subnet("bar");
assert_eq!(sn.ancestors(), vec![tn.root(), tn.root().subnet("foo")]);
}
#[test]
fn test_subnet_ancestor_hops() {
let tn = TestnetName::new("example");
let rn = tn.root();
let foo = rn.subnet("foo");
let bar = foo.subnet("bar");
let hops0 = bar.ancestor_hops(false);
let hops1 = bar.ancestor_hops(true);
let hops = [(rn, foo.clone()), (foo, bar)];
assert_eq!(hops0[..], hops[..1]);
assert_eq!(hops1[..], hops[..]);
}
#[test]
fn test_node_subnet() {
let tn = TestnetName::new("example");
let sn = tn.root().subnet("foo");
let node = sn.node("node-1");
assert!(sn.contains(&node));
assert_eq!(node.testnet(), tn, "testnet is the prefix");
}
#[test]
fn test_resource_name_display() {
let tn = TestnetName::new("display-test");
assert_eq!(format!("{tn}"), "Testnet('testnets/display-test')");
assert_eq!(format!("{tn:?}"), "Testnet('testnets/display-test')");
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/materializer/src/validation.rs | fendermint/testing/materializer/src/validation.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use anyhow::{anyhow, bail, Ok};
use async_trait::async_trait;
use either::Either;
use ethers::types::H160;
use fendermint_vm_genesis::Collateral;
use fvm_shared::{chainid::ChainID, econ::TokenAmount};
use std::{
collections::{BTreeMap, HashSet},
fmt::Debug,
ops::{Add, Sub},
};
use url::Url;
use crate::{
logging::LoggingMaterializer,
manifest::{Balance, Manifest},
materializer::{Materializer, NodeConfig, RelayerConfig, SubmitConfig, SubnetConfig},
materials::Materials,
testnet::Testnet,
AccountName, NodeName, RelayerName, ResourceHash, ResourceName, SubnetName, TestnetName,
};
const DEFAULT_FAUCET_FIL: u64 = 100;
/// Do simple sanity checks on the manifest, e.g.:
/// * we are not over allocating the balances
/// * relayers have balances on the parent to submit transactions
/// * subnet creators have balances on the parent to submit transactions
pub async fn validate_manifest(name: &TestnetName, manifest: &Manifest) -> anyhow::Result<()> {
let m = ValidatingMaterializer::default();
// Wrap with logging so that we can debug the tests easier.
let mut m = LoggingMaterializer::new(m, "validation".to_string());
let _ = Testnet::setup(&mut m, name, manifest).await?;
// We could check here that all subnets have enough validators for a quorum.
Ok(())
}
pub struct ValidationMaterials;
impl Materials for ValidationMaterials {
type Network = TestnetName;
type Deployment = SubnetName;
type Account = AccountName;
type Genesis = SubnetName;
type Subnet = SubnetName;
type Node = NodeName;
type Relayer = RelayerName;
}
type VNetwork = <ValidationMaterials as Materials>::Network;
type VDeployment = <ValidationMaterials as Materials>::Deployment;
type VAccount = <ValidationMaterials as Materials>::Account;
type VGenesis = <ValidationMaterials as Materials>::Genesis;
type VSubnet = <ValidationMaterials as Materials>::Subnet;
type VNode = <ValidationMaterials as Materials>::Node;
type VRelayer = <ValidationMaterials as Materials>::Relayer;
#[derive(Clone, Debug, Default)]
pub struct ValidatingMaterializer {
network: Option<TestnetName>,
balances: BTreeMap<SubnetName, BTreeMap<AccountName, TokenAmount>>,
references: BTreeMap<SubnetName, HashSet<ResourceHash>>,
}
impl ValidatingMaterializer {
fn network(&self) -> anyhow::Result<TestnetName> {
self.network
.as_ref()
.cloned()
.ok_or_else(|| anyhow!("network isn't set"))
}
/// Check that a name is within the subnet. This should trivially be true by construction, but still.
fn ensure_contains<T: AsRef<ResourceName> + Debug>(&self, name: &T) -> anyhow::Result<()> {
let tn = self.network()?;
if !tn.contains(name) {
bail!("{tn:?} does not contain {name:?}");
}
Ok(())
}
/// Ensure we aren't reusing references.
fn ensure_unique(
&mut self,
subnet: &SubnetName,
reference: Option<ResourceHash>,
) -> anyhow::Result<()> {
if let Some(r) = reference {
let rs = self.references.entry(subnet.clone()).or_default();
if !rs.insert(r) {
bail!("a reference is reused in {subnet:?}");
}
}
Ok(())
}
/// Check that an account has a positive balance on a subnet
fn ensure_balance(&self, subnet: &SubnetName, account: &AccountName) -> anyhow::Result<()> {
match self.balances.get(subnet) {
None => bail!("{subnet:?} has not been created"),
Some(bs) => match bs.get(account) {
None => bail!("{account:?} has no balance on {subnet:?}"),
Some(b) if b.is_zero() => bail!("{account:?} has zero balance on {subnet:?}"),
Some(_) => Ok(()),
},
}
}
/// Check that the subnet has been created already.
fn ensure_subnet_exists(&self, subnet: &SubnetName) -> anyhow::Result<()> {
if !self.balances.contains_key(subnet) {
bail!("{subnet:?} has not been created");
}
Ok(())
}
/// Move funds of an account from the parent to the child subnet.
///
/// Fails if either:
/// * the parent doesn't exist
/// * the child doesn't exist
/// * the account doesn't have the funds
fn fund_from_parent(
&mut self,
subnet: &SubnetName,
account: &AccountName,
amount: TokenAmount,
credit_child: bool,
) -> anyhow::Result<()> {
let parent = subnet
.parent()
.ok_or_else(|| anyhow!("{subnet} must have a parent to fund from"))?;
self.ensure_subnet_exists(&parent)?;
self.ensure_subnet_exists(subnet)?;
if amount.is_zero() {
return Ok(());
}
self.ensure_balance(&parent, account)?;
let pbs = self.balances.get_mut(&parent).unwrap();
let pb = pbs.get_mut(account).unwrap();
if *pb < amount {
bail!("{account:?} has less than {amount} on {parent:?}, cannot fund {subnet:?}");
}
*pb = pb.clone().sub(amount.clone());
if credit_child {
let cbs = self.balances.get_mut(subnet).unwrap();
let cb = cbs.entry(account.clone()).or_default();
*cb = cb.clone().add(amount);
}
Ok(())
}
}
#[async_trait]
impl Materializer<ValidationMaterials> for ValidatingMaterializer {
async fn create_network(&mut self, testnet_name: &TestnetName) -> anyhow::Result<VNetwork> {
self.network = Some(testnet_name.clone());
Ok(testnet_name.clone())
}
fn create_account(&mut self, account_name: &AccountName) -> anyhow::Result<VAccount> {
self.ensure_contains(account_name)?;
Ok(account_name.clone())
}
async fn fund_from_faucet<'s, 'a>(
&'s mut self,
account: &'a VAccount,
reference: Option<ResourceHash>,
) -> anyhow::Result<()>
where
's: 'a,
{
let tn = self.network()?;
self.ensure_unique(&tn.root(), reference)?;
let balances = self.balances.entry(tn.root()).or_default();
let balance = balances.entry(account.clone()).or_default();
*balance = balance
.clone()
.add(TokenAmount::from_whole(DEFAULT_FAUCET_FIL));
Ok(())
}
async fn new_deployment<'s, 'a>(
&'s mut self,
subnet_name: &SubnetName,
deployer: &'a VAccount,
_urls: Vec<Url>,
) -> anyhow::Result<VDeployment>
where
's: 'a,
{
self.ensure_contains(subnet_name)?;
self.ensure_balance(subnet_name, deployer)?;
Ok(subnet_name.clone())
}
fn existing_deployment(
&mut self,
subnet_name: &SubnetName,
gateway: H160,
registry: H160,
) -> anyhow::Result<VDeployment> {
self.ensure_contains(subnet_name)?;
if gateway == registry {
bail!("gateway and registry addresses are the same in {subnet_name:?}: {gateway} == {registry}");
}
Ok(subnet_name.clone())
}
fn default_deployment(&mut self, subnet_name: &SubnetName) -> anyhow::Result<VDeployment> {
self.ensure_contains(subnet_name)?;
Ok(subnet_name.clone())
}
fn create_root_genesis<'a>(
&mut self,
subnet_name: &SubnetName,
validators: BTreeMap<&'a VAccount, Collateral>,
balances: BTreeMap<&'a VAccount, Balance>,
) -> anyhow::Result<VGenesis> {
self.ensure_contains(subnet_name)?;
let tn = self.network()?;
if validators.is_empty() {
bail!("validators of {subnet_name:?} cannot be empty");
}
let root_balances = self.balances.entry(tn.root()).or_default();
for (n, b) in balances {
let balance = root_balances.entry(n.clone()).or_default();
*balance = b.0;
}
Ok(subnet_name.clone())
}
fn create_root_subnet(
&mut self,
subnet_name: &SubnetName,
_params: Either<ChainID, &VGenesis>,
) -> anyhow::Result<VSubnet> {
Ok(subnet_name.clone())
}
async fn create_node<'s, 'a>(
&'s mut self,
node_name: &NodeName,
_node_config: &NodeConfig<'a, ValidationMaterials>,
) -> anyhow::Result<VNode>
where
's: 'a,
{
self.ensure_contains(node_name)?;
Ok(node_name.clone())
}
async fn start_node<'s, 'a>(
&'s mut self,
_node: &'a VNode,
_seed_nodes: &'a [&'a VNode],
) -> anyhow::Result<()> {
Ok(())
}
async fn create_subnet<'s, 'a>(
&'s mut self,
parent_submit_config: &SubmitConfig<'a, ValidationMaterials>,
subnet_name: &SubnetName,
subnet_config: &SubnetConfig<'a, ValidationMaterials>,
) -> anyhow::Result<VSubnet>
where
's: 'a,
{
self.ensure_contains(subnet_name)?;
// Check that the submitter has balance on the parent subnet to create the child.
let parent = parent_submit_config.subnet;
self.ensure_balance(parent, subnet_config.creator)?;
// Insert child subnet balances entry.
self.balances
.insert(subnet_name.clone(), Default::default());
Ok(subnet_name.clone())
}
async fn fund_subnet<'s, 'a>(
&'s mut self,
_parent_submit_config: &SubmitConfig<'a, ValidationMaterials>,
account: &'a VAccount,
subnet: &'a VSubnet,
amount: TokenAmount,
reference: Option<ResourceHash>,
) -> anyhow::Result<()>
where
's: 'a,
{
// Debit parent balance; Credit child balance
self.fund_from_parent(subnet, account, amount, true)?;
self.ensure_unique(&subnet.parent().unwrap(), reference)?;
Ok(())
}
async fn join_subnet<'s, 'a>(
&'s mut self,
_parent_submit_config: &SubmitConfig<'a, ValidationMaterials>,
account: &'a VAccount,
subnet: &'a VSubnet,
collateral: Collateral,
balance: Balance,
reference: Option<ResourceHash>,
) -> anyhow::Result<()>
where
's: 'a,
{
// Debit parent balance, but do not make the funds available in the child
self.fund_from_parent(subnet, account, collateral.0, false)?;
// Debit parent balance; Credit child balance
self.fund_from_parent(subnet, account, balance.0, true)?;
self.ensure_unique(&subnet.parent().unwrap(), reference)?;
Ok(())
}
async fn create_subnet_genesis<'s, 'a>(
&'s mut self,
_parent_submit_config: &SubmitConfig<'a, ValidationMaterials>,
subnet: &'a VSubnet,
) -> anyhow::Result<VGenesis>
where
's: 'a,
{
// We're supposed to fetch the data from the parent, there's nothing to check.
Ok(subnet.clone())
}
async fn create_relayer<'s, 'a>(
&'s mut self,
parent_submit_config: &SubmitConfig<'a, ValidationMaterials>,
relayer_name: &RelayerName,
relayer_config: RelayerConfig<'a, ValidationMaterials>,
) -> anyhow::Result<VRelayer>
where
's: 'a,
{
self.ensure_contains(relayer_name)?;
// Check that submitter has balance on the parent.
let parent = parent_submit_config.subnet;
self.ensure_balance(parent, relayer_config.submitter)?;
Ok(relayer_name.clone())
}
}
#[cfg(test)]
mod tests {
use crate::{manifest::Manifest, validation::validate_manifest, TestnetId, TestnetName};
// Unfortunately doesn't seem to work with quickcheck_async
// /// Run the tests with `RUST_LOG=info` to see the logs, for example:
// ///
// /// ```text
// /// RUST_LOG=info cargo test -p fendermint_testing_materializer prop_validation -- --nocapture
// /// ```
// fn init_log() {
// let _ = env_logger::builder().is_test(true).try_init();
// }
/// Check that the random manifests we generate would pass validation.
#[quickcheck_async::tokio]
async fn prop_validation(id: TestnetId, manifest: Manifest) -> anyhow::Result<()> {
let name = TestnetName::new(id);
validate_manifest(&name, &manifest).await
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/materializer/src/manifest.rs | fendermint/testing/materializer/src/manifest.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
// See https://github.com/cometbft/cometbft/blob/v0.38.5/test/e2e/pkg/manifest.go for inspiration.
use anyhow::{bail, Context};
use fvm_shared::econ::TokenAmount;
use serde::{Deserialize, Serialize};
use serde_with::serde_as;
use std::{collections::BTreeMap, path::Path};
use url::Url;
use fendermint_vm_encoding::IsHumanReadable;
use fendermint_vm_genesis::Collateral;
use crate::{validation::validate_manifest, AccountId, NodeId, RelayerId, SubnetId, TestnetName};
pub type SubnetMap = BTreeMap<SubnetId, Subnet>;
pub type BalanceMap = BTreeMap<AccountId, Balance>;
pub type CollateralMap = BTreeMap<AccountId, Collateral>;
pub type NodeMap = BTreeMap<NodeId, Node>;
pub type RelayerMap = BTreeMap<RelayerId, Relayer>;
pub type EnvMap = BTreeMap<String, String>;
/// The manifest is a static description of a testnet.
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct Manifest {
/// All the accounts we want to act with across the entire subnet hierarchy.
///
/// Each account will have its pair of private and public keys.
///
/// In the rootnet, if we are dealing with Calibration, they will get their
/// initial balance from the Faucet, which should give 100 tFIL ("testnet" FIL),
/// which is why there is no definition for the account balances for the root.
///
/// This would be different if we deployed a root in the test, e.g. using
/// Fendermint itself, in which case we could set whatever balance we wanted.
pub accounts: BTreeMap<AccountId, Account>,
/// Whether we use an existing L1 or create or own.
pub rootnet: Rootnet,
/// Subnets created on the rootnet.
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
pub subnets: SubnetMap,
}
impl Manifest {
/// Read a manifest from file. It chooses the format based on the extension.
pub fn from_file(path: &Path) -> anyhow::Result<Self> {
let Some(ext) = path
.extension()
.map(|e| e.to_string_lossy().to_ascii_lowercase())
else {
bail!("manifest file has no extension, cannot determine format");
};
let manifest = std::fs::read_to_string(path)
.with_context(|| format!("failed to read manifest from {}", path.to_string_lossy()))?;
match ext.as_str() {
"yaml" => serde_yaml::from_str(&manifest).context("failed to parse manifest YAML"),
"json" => serde_json::from_str(&manifest).context("failed to parse manifest JSON"),
"toml" => toml::from_str(&manifest).context("failed to parse manifest TOML"),
other => bail!("unknown manifest format: {other}"),
}
}
/// Perform sanity checks.
pub async fn validate(&self, name: &TestnetName) -> anyhow::Result<()> {
validate_manifest(name, self).await
}
}
/// Any potential attributes of an account.
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct Account {}
/// Account balance.
#[serde_as]
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Default)]
pub struct Balance(#[serde_as(as = "IsHumanReadable")] pub TokenAmount);
/// Ways we can hook up with IPC contracts on the rootnet.
///
/// The rootnet is generally expected to be Calibration net,
/// where IPC contracts are deployed from Hardhat, and multiple
/// instances of the gateway exist, each with a different version
/// and an address we learn after deployment.
#[serde_as]
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
#[serde(tag = "type")]
pub enum IpcDeployment {
/// Deploy a new IPC contract stack using one of the accounts.
/// This can take a long time, but ensures we are testing with
/// contracts that have the same version as the client.
New { deployer: AccountId },
/// Use one of the existing deployments, given by the delegated address of
/// the Gateway and Registry contracts.
Existing {
gateway: ethers::core::types::Address,
registry: ethers::core::types::Address,
},
}
/// The rootnet, ie. the L1 chain, can already exist and be outside our control
/// if we are deploying to Calibration, or it might be a chain we provision
/// with CometBFT and Fendermint.
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
#[serde(tag = "type")]
pub enum Rootnet {
/// Existing L1 running outside our control.
///
/// This implies using some sort of Faucet to get balances for the accounts.
External {
/// We need to know the ID of the chain to be able to create a `SubnetID` for it.
chain_id: u64,
/// Indicate whether we have to (re)deploy the IPC contract or we can use an existing one.
deployment: IpcDeployment,
/// Addresses of JSON-RPC endpoints on the external L1.
urls: Vec<Url>,
},
/// Provision a new chain to run the L1.
///
/// It is assumed that a newly provisioned chain will have built-in support for IPC,
/// e.g. the way Fendermint deploys IPC actors at well-known addresses.
New {
/// Collateral of the initial validator set.
validators: CollateralMap,
/// Balances of the accounts in the rootnet.
///
/// These balances will go in the genesis file.
balances: BalanceMap,
/// Nodes that participate in running the root chain.
nodes: NodeMap,
/// Custom env vars to pass on to the nodes.
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
env: EnvMap,
},
}
/// An IPC subnet.
///
/// The balance of the account on the parent subnet, as declared in this manifest,
/// _does not_ have to account for the collateral/balance we have to take from it to join/fund the subnet.
/// When we create the testnet configuration from the manifest we will account for this with a rollup,
/// so we don't have to do that much mental arithmetic and run into frustrating errors during setup.
/// If we want to test trying to join with more than what we have, we can do so in the integration test.
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct Subnet {
/// The account we use to create the subnet.
pub creator: AccountId,
/// Collateral of the initial validator set.
///
/// These validators will join the subnet with these collaterals after the subnet is created.
pub validators: CollateralMap,
/// Balances of the accounts at the creation of the subnet.
///
/// These accounts will pre-fund the subnet after it's created.
pub balances: BalanceMap,
/// Nodes that participate in running the chain of this subnet.
pub nodes: NodeMap,
/// Relayers that submit bottom-up checkpoints to the parent subnet.
pub relayers: RelayerMap,
/// Bottom-up checkpoint configuration.
pub bottom_up_checkpoint: CheckpointConfig,
/// Custom env vars to pass on to the nodes.
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
pub env: EnvMap,
/// Child subnets under this parent.
///
/// The subnet ID exists so we can find the outcome of existing deployments in the log.
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
pub subnets: SubnetMap,
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct Node {
/// Indicate whether this is a validator node or a full node.
pub mode: NodeMode,
/// Indicate whether to run the Ethereum API.
pub ethapi: bool,
/// The nodes from which CometBFT should bootstrap itself.
///
/// We can leave it empty for standalone nodes and in cases
/// where we don't want mutual seeding, however it's best to
/// still show the field in the manifest explicitly, to make
/// sure it's not forgotten, which would prevent the nodes
/// discovering each other.
pub seed_nodes: Vec<NodeId>,
/// The parent node that the top-down syncer follows;
/// or leave it empty if node is on the rootnet.
///
/// We can skip this field if it's empty because validation
/// will tell us that all subnet nodes need a parent.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub parent_node: Option<ParentNode>,
}
/// The mode in which CometBFT is running.
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
#[serde(tag = "type")]
pub enum NodeMode {
/// A node able to create and sign blocks.
Validator { validator: AccountId },
/// A node which executes blocks and checks their content, but doesn't have a validator key.
Full,
// TODO: We can expand this to include seed nodes.
}
/// A node on the parent subnet.
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
#[serde(untagged)]
pub enum ParentNode {
/// An external node such as one on Calibnet, given by its JSON-RPC URL.
External(Url),
/// A node defined in the manifest.
Internal(NodeId),
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct Relayer {
/// The account which will pay for the submission on the parent subnet.
pub submitter: AccountId,
/// The node which the relayer is following on the subnet.
pub follow_node: NodeId,
/// The node where the relayer submits the checkpoints;
/// or leave it empty if the parent is CalibrationNet.
pub submit_node: ParentNode,
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct CheckpointConfig {
/// Number of blocks between checkpoints.
pub period: u64,
}
#[cfg(test)]
mod tests {
use quickcheck_macros::quickcheck;
use super::Manifest;
#[quickcheck]
fn manifest_json(value0: Manifest) {
let repr = serde_json::to_string(&value0).expect("failed to encode");
let value1: Manifest = serde_json::from_str(&repr)
.map_err(|e| format!("{e}; {repr}"))
.expect("failed to decode JSON");
assert_eq!(value1, value0)
}
#[quickcheck]
fn manifest_yaml(value0: Manifest) {
let repr = serde_yaml::to_string(&value0).expect("failed to encode");
let value1: Manifest = serde_yaml::from_str(&repr)
.map_err(|e| format!("{e}; {repr}"))
.expect("failed to decode");
assert_eq!(value1, value0)
}
#[quickcheck]
fn manifest_toml(value0: Manifest) {
let repr = toml::to_string(&value0).expect("failed to encode");
let value1: Manifest = toml::from_str(&repr)
.map_err(|e| format!("{e}; {repr}"))
.expect("failed to decode");
assert_eq!(value1, value0)
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/materializer/src/materializer.rs | fendermint/testing/materializer/src/materializer.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use async_trait::async_trait;
use either::Either;
use ethers::types::H160;
use fvm_shared::{chainid::ChainID, econ::TokenAmount};
use std::collections::BTreeMap;
use url::Url;
use fendermint_vm_genesis::Collateral;
use crate::{
manifest::{Balance, CheckpointConfig, EnvMap},
materials::Materials,
AccountName, NodeName, RelayerName, ResourceHash, SubnetName, TestnetName,
};
/// The materializer is a component to provision resources of a testnet, and
/// to carry out subsequent commands on them, e.g. to restart nodes.
///
/// By contrast, the role of the [Testnet] is to keep related items organised
/// and accessible for the integration tests, carrying out the operations with
/// the help of the materializer, which should keep the [Testnet] itself testable.
///
/// The materializer might not actually instantiate the resources. By returning
/// abstract types instead of concrete values, it is possible to just collect the
/// operations and use them to validate the behaviour of whatever is driving
/// the materializer. We can use this for dry-runs as well.
///
/// A live materializer should persist its logs, so that it can be resumed.
/// For example we can create and run a testnet externally, then parse the manifest
/// and the materializer logs inside a test to talk to one of the nodes, and the
/// materializer should be able to return to the test correct JSON-RPC endpoints.
///
/// Some of the operations of the materializer should be idempotent, e.g. the
/// creation of a wallet or a node should only happen once.
///
/// The types returned might have their own logic to execute when dropped, to free
/// resources. This might happen only if the resource is not an externally managed
/// one, e.g. a testnet set up before tests are run, which the materializer should
/// know.
#[async_trait]
pub trait Materializer<M: Materials> {
/// Create the physical network group.
///
/// The return value should be able to able to represent settings that allow nodes
/// to connect to each other, as well as perhaps to be labelled as a group
/// (although for that we can use the common name prefixes as well).
async fn create_network(&mut self, testnet_name: &TestnetName) -> anyhow::Result<M::Network>;
/// Create a Secp256k1 keypair for signing transactions or creating blocks.
fn create_account(&mut self, account_name: &AccountName) -> anyhow::Result<M::Account>;
/// Fund an account on the rootnet from the faucet.
async fn fund_from_faucet<'s, 'a>(
&'s mut self,
account: &'a M::Account,
reference: Option<ResourceHash>,
) -> anyhow::Result<()>
where
's: 'a;
/// Deploy the IPC contracts onto the rootnet.
///
/// This is assumed to be used with external subnets, with the API address
/// being known to the materializer, but not being part of the manifest,
/// as there can be multiple endpoints to choose from, some better than others.
///
/// The return value should contain at the addresses of the contracts.
async fn new_deployment<'s, 'a>(
&'s mut self,
subnet_name: &SubnetName,
deployer: &'a M::Account,
urls: Vec<Url>,
) -> anyhow::Result<M::Deployment>
where
's: 'a;
/// Set the IPC contracts onto the rootnet.
///
/// This is assumed to be used with external subnets, with the API address
/// being known to the materializer, but not being part of the manifest,
/// as there can be multiple endpoints to choose from, some better than others.
fn existing_deployment(
&mut self,
subnet_name: &SubnetName,
gateway: H160,
registry: H160,
) -> anyhow::Result<M::Deployment>;
/// Return the well-known IPC contract deployments.
fn default_deployment(&mut self, subnet_name: &SubnetName) -> anyhow::Result<M::Deployment>;
/// Construct the genesis for the rootnet.
///
/// The genesis time and the chain name (which should determine the chain ID and
/// thus the subnet ID as well) can be chosen by the materializer, or we could make
/// it part of the manifest.
fn create_root_genesis<'a>(
&mut self,
subnet_name: &SubnetName,
validators: BTreeMap<&'a M::Account, Collateral>,
balances: BTreeMap<&'a M::Account, Balance>,
) -> anyhow::Result<M::Genesis>;
/// Create a subnet to represent the root.
fn create_root_subnet(
&mut self,
subnet_name: &SubnetName,
params: Either<ChainID, &M::Genesis>,
) -> anyhow::Result<M::Subnet>;
/// Construct the configuration for a node.
///
/// This should create keys, configurations, but hold on from starting so that we can
/// first learn about the dynamic properties of other nodes in the cluster we depend on,
/// such as their network identities which are a function of their keys.
///
/// The method is async in case we have to provision some resources remotely.
async fn create_node<'s, 'a>(
&'s mut self,
node_name: &NodeName,
node_config: &NodeConfig<'a, M>,
) -> anyhow::Result<M::Node>
where
's: 'a;
/// Start a node.
///
/// At this point the identities of any dependency nodes should be known.
async fn start_node<'s, 'a>(
&'s mut self,
node: &'a M::Node,
seed_nodes: &'a [&'a M::Node],
) -> anyhow::Result<()>
where
's: 'a;
/// Create a subnet on the parent subnet ledger.
///
/// The parent nodes are the ones where subnet-creating transactions
/// can be sent, or it can be empty if it's an external rootnet.
///
/// The result should contain the address of the subnet.
async fn create_subnet<'s, 'a>(
&'s mut self,
parent_submit_config: &SubmitConfig<'a, M>,
subnet_name: &SubnetName,
subnet_config: &SubnetConfig<'a, M>,
) -> anyhow::Result<M::Subnet>
where
's: 'a;
/// Fund an account on a target subnet by transferring tokens from the source subnet.
///
/// Only works if the target subnet has been bootstrapped.
///
/// The `reference` can be used to deduplicate repeated transfer attempts.
async fn fund_subnet<'s, 'a>(
&'s mut self,
parent_submit_config: &SubmitConfig<'a, M>,
account: &'a M::Account,
subnet: &'a M::Subnet,
amount: TokenAmount,
reference: Option<ResourceHash>,
) -> anyhow::Result<()>
where
's: 'a;
/// Join a target subnet as a validator.
///
/// The `reference` can be used to deduplicate repeated transfer attempts.
async fn join_subnet<'s, 'a>(
&'s mut self,
parent_submit_config: &SubmitConfig<'a, M>,
account: &'a M::Account,
subnet: &'a M::Subnet,
collateral: Collateral,
balance: Balance,
reference: Option<ResourceHash>,
) -> anyhow::Result<()>
where
's: 'a;
/// Construct the genesis for a subnet, which involves fetching details from the parent.
///
/// The method is async to allow for network operations.
async fn create_subnet_genesis<'s, 'a>(
&'s mut self,
parent_submit_config: &SubmitConfig<'a, M>,
subnet: &'a M::Subnet,
) -> anyhow::Result<M::Genesis>
where
's: 'a;
/// Create and start a relayer.
async fn create_relayer<'s, 'a>(
&'s mut self,
parent_submit_config: &SubmitConfig<'a, M>,
relayer_name: &RelayerName,
relayer_config: RelayerConfig<'a, M>,
) -> anyhow::Result<M::Relayer>
where
's: 'a;
}
/// Options regarding node configuration, e.g. which services to start.
pub struct NodeConfig<'a, M: Materials> {
/// The physical network to join.
pub network: &'a M::Network,
/// The genesis of this subnet; it should indicate whether this is a rootnet or a deeper level.
pub genesis: &'a M::Genesis,
/// The validator keys if this is a validator node; none if just a full node.
pub validator: Option<&'a M::Account>,
/// The node for the top-down syncer to follow; none if this is a root node.
///
/// This can potentially also be used to configure the IPLD Resolver seeds, to connect across subnets.
pub parent_node: Option<ParentConfig<'a, M>>,
/// Run the Ethereum API facade or not.
pub ethapi: bool,
/// Arbitrary env vars, e.g. to regulate block production rates.
pub env: &'a EnvMap,
/// Number of nodes to be expected in the subnet, including this node, or 0 if unknown.
pub peer_count: usize,
}
/// Options regarding relayer configuration
pub struct RelayerConfig<'a, M: Materials> {
/// Where to send queries on the child subnet.
pub follow_config: &'a SubmitConfig<'a, M>,
/// The account to use to submit transactions on the parent subnet.
pub submitter: &'a M::Account,
/// Arbitrary env vars, e.g. to set the logging level.
pub env: &'a EnvMap,
}
/// Options regarding subnet configuration, e.g. how many validators are required.
pub struct SubnetConfig<'a, M: Materials> {
/// Which account to use on the parent to create the subnet.
///
/// This account has to have the necessary balance on the parent.
pub creator: &'a M::Account,
/// Number of validators required for bootstrapping a subnet.
pub min_validators: usize,
pub bottom_up_checkpoint: &'a CheckpointConfig,
}
/// Options for how to submit IPC transactions to a subnet.
pub struct SubmitConfig<'a, M: Materials> {
/// The nodes to which we can send transactions or queries, ie. any of the parent nodes.
pub nodes: Vec<TargetConfig<'a, M>>,
/// The identity of the subnet to which we submit the transaction, ie. the parent subnet.
pub subnet: &'a M::Subnet,
/// The location of the IPC contracts on the (generally parent) subnet.
pub deployment: &'a M::Deployment,
}
/// Options for how to follow the parent consensus and sync IPC changes.
pub struct ParentConfig<'a, M: Materials> {
/// The trusted parent node to follow.
pub node: TargetConfig<'a, M>,
/// The location of the IPC contracts on the parent subnet.
pub deployment: &'a M::Deployment,
}
/// Where to submit a transaction or a query.
pub enum TargetConfig<'a, M: Materials> {
External(Url),
Internal(&'a M::Node),
}
impl<'a, M: Materials> SubmitConfig<'a, M> {
/// Map over the internal and external target configurations to find a first non-empty result.
pub fn find_node<F, G, T>(&self, f: F, g: G) -> Option<T>
where
F: Fn(&M::Node) -> Option<T>,
G: Fn(&Url) -> Option<T>,
{
self.nodes
.iter()
.filter_map(|tc| match tc {
TargetConfig::Internal(n) => f(n),
TargetConfig::External(u) => g(u),
})
.next()
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/materializer/src/logging.rs | fendermint/testing/materializer/src/logging.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use async_trait::async_trait;
use either::Either;
use ethers::types::H160;
use fendermint_vm_genesis::Collateral;
use fvm_shared::{chainid::ChainID, econ::TokenAmount};
use std::{collections::BTreeMap, fmt::Display};
use url::Url;
use crate::{
manifest::Balance,
materializer::{Materializer, NodeConfig, RelayerConfig, SubmitConfig, SubnetConfig},
materials::Materials,
AccountName, NodeName, RelayerName, ResourceHash, SubnetName, TestnetName,
};
/// Simple in-memory logging to help debug manifests.
pub struct LoggingMaterializer<R> {
ctx: String,
inner: R,
}
impl<R> LoggingMaterializer<R> {
pub fn new(inner: R, ctx: String) -> Self {
Self { inner, ctx }
}
}
#[async_trait]
impl<M, R> Materializer<M> for LoggingMaterializer<R>
where
M: Materials + Send + Sync + 'static,
R: Materializer<M> + Send + Sync,
M::Network: Display,
M::Deployment: Display,
M::Account: Display,
M::Genesis: Display,
M::Subnet: Display,
M::Node: Display,
M::Relayer: Display,
{
async fn create_network(&mut self, testnet_name: &TestnetName) -> anyhow::Result<M::Network> {
tracing::info!(%testnet_name, ctx=self.ctx, "create_network");
self.inner.create_network(testnet_name).await
}
fn create_account(&mut self, account_name: &AccountName) -> anyhow::Result<M::Account> {
tracing::info!(%account_name, ctx=self.ctx, "create_account");
self.inner.create_account(account_name)
}
async fn fund_from_faucet<'s, 'a>(
&'s mut self,
account: &'a M::Account,
reference: Option<ResourceHash>,
) -> anyhow::Result<()>
where
's: 'a,
{
tracing::info!(%account, ctx=self.ctx, "fund_from_faucet");
self.inner.fund_from_faucet(account, reference).await
}
async fn new_deployment<'s, 'a>(
&'s mut self,
subnet_name: &SubnetName,
deployer: &'a M::Account,
urls: Vec<Url>,
) -> anyhow::Result<M::Deployment>
where
's: 'a,
{
tracing::info!(%subnet_name, ctx=self.ctx, %deployer, "new_deployment");
self.inner.new_deployment(subnet_name, deployer, urls).await
}
fn existing_deployment(
&mut self,
subnet_name: &SubnetName,
gateway: H160,
registry: H160,
) -> anyhow::Result<M::Deployment> {
tracing::info!(%subnet_name, ctx=self.ctx, "existing_deployment");
self.inner
.existing_deployment(subnet_name, gateway, registry)
}
fn default_deployment(&mut self, subnet_name: &SubnetName) -> anyhow::Result<M::Deployment> {
tracing::info!(%subnet_name, ctx=self.ctx, "default_deployment");
self.inner.default_deployment(subnet_name)
}
fn create_root_genesis<'a>(
&mut self,
subnet_name: &SubnetName,
validators: BTreeMap<&'a M::Account, Collateral>,
balances: BTreeMap<&'a M::Account, Balance>,
) -> anyhow::Result<M::Genesis> {
tracing::info!(%subnet_name, ctx=self.ctx, "create_root_genesis");
self.inner
.create_root_genesis(subnet_name, validators, balances)
}
fn create_root_subnet(
&mut self,
subnet_name: &SubnetName,
params: Either<ChainID, &M::Genesis>,
) -> anyhow::Result<M::Subnet> {
tracing::info!(%subnet_name, ctx=self.ctx, "create_root_subnet");
self.inner.create_root_subnet(subnet_name, params)
}
async fn create_node<'s, 'a>(
&'s mut self,
node_name: &NodeName,
node_config: &NodeConfig<'a, M>,
) -> anyhow::Result<M::Node>
where
's: 'a,
{
tracing::info!(%node_name, ctx=self.ctx, "create_node");
self.inner.create_node(node_name, node_config).await
}
async fn start_node<'s, 'a>(
&'s mut self,
node: &'a M::Node,
seed_nodes: &'a [&'a M::Node],
) -> anyhow::Result<()>
where
's: 'a,
{
tracing::info!(%node, ctx=self.ctx, "start_node");
self.inner.start_node(node, seed_nodes).await
}
async fn create_subnet<'s, 'a>(
&'s mut self,
parent_submit_config: &SubmitConfig<'a, M>,
subnet_name: &SubnetName,
subnet_config: &SubnetConfig<'a, M>,
) -> anyhow::Result<M::Subnet>
where
's: 'a,
{
tracing::info!(%subnet_name, ctx=self.ctx, "create_subnet");
self.inner
.create_subnet(parent_submit_config, subnet_name, subnet_config)
.await
}
async fn fund_subnet<'s, 'a>(
&'s mut self,
parent_submit_config: &SubmitConfig<'a, M>,
account: &'a M::Account,
subnet: &'a M::Subnet,
amount: TokenAmount,
reference: Option<ResourceHash>,
) -> anyhow::Result<()>
where
's: 'a,
{
tracing::info!(%subnet, %account, ctx=self.ctx, "fund_subnet");
self.inner
.fund_subnet(parent_submit_config, account, subnet, amount, reference)
.await
}
async fn join_subnet<'s, 'a>(
&'s mut self,
parent_submit_config: &SubmitConfig<'a, M>,
account: &'a M::Account,
subnet: &'a M::Subnet,
collateral: Collateral,
balance: Balance,
reference: Option<ResourceHash>,
) -> anyhow::Result<()>
where
's: 'a,
{
tracing::info!(%subnet, %account, ctx=self.ctx, "join_subnet");
self.inner
.join_subnet(
parent_submit_config,
account,
subnet,
collateral,
balance,
reference,
)
.await
}
async fn create_subnet_genesis<'s, 'a>(
&'s mut self,
parent_submit_config: &SubmitConfig<'a, M>,
subnet: &'a M::Subnet,
) -> anyhow::Result<M::Genesis>
where
's: 'a,
{
tracing::info!(%subnet, ctx=self.ctx, "create_subnet_genesis");
self.inner
.create_subnet_genesis(parent_submit_config, subnet)
.await
}
async fn create_relayer<'s, 'a>(
&'s mut self,
parent_submit_config: &SubmitConfig<'a, M>,
relayer_name: &RelayerName,
relayer_config: RelayerConfig<'a, M>,
) -> anyhow::Result<M::Relayer>
where
's: 'a,
{
tracing::info!(%relayer_name, ctx=self.ctx, "create_relayer");
self.inner
.create_relayer(parent_submit_config, relayer_name, relayer_config)
.await
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/materializer/src/testnet.rs | fendermint/testing/materializer/src/testnet.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use anyhow::{anyhow, bail, Context};
use async_recursion::async_recursion;
use either::Either;
use fvm_shared::chainid::ChainID;
use std::{
collections::{BTreeMap, BTreeSet},
fmt::Display,
marker::PhantomData,
};
use url::Url;
use crate::{
manifest::{
BalanceMap, CollateralMap, EnvMap, IpcDeployment, Manifest, Node, NodeMode, ParentNode,
Rootnet, Subnet,
},
materializer::{
Materializer, NodeConfig, ParentConfig, RelayerConfig, SubmitConfig, SubnetConfig,
TargetConfig,
},
materials::Materials,
AccountId, NodeId, NodeName, RelayerName, ResourceHash, SubnetId, SubnetName, TestnetName,
};
/// The `Testnet` parses a [Manifest] and is able to derive the steps
/// necessary to instantiate it with the help of the [Materializer].
///
/// The `Testnet` data structure itself acts as an indexer over the
/// resources created by the [Materializer]. It owns them, and by
/// doing so controls their life cycle. By dropping the `Testnet`
/// or various components from it we are able to free resources.
///
/// Arguably the same could be achieved by keeping the created
/// resources inside the [Materializer] and discarding that as
/// a whole, keeping the `Testnet` completely stateless, but
/// perhaps this way writing a [Materializer] is just a tiny
/// bit simpler.
pub struct Testnet<M: Materials, R> {
name: TestnetName,
network: M::Network,
externals: Vec<Url>,
accounts: BTreeMap<AccountId, M::Account>,
deployments: BTreeMap<SubnetName, M::Deployment>,
genesis: BTreeMap<SubnetName, M::Genesis>,
subnets: BTreeMap<SubnetName, M::Subnet>,
nodes: BTreeMap<NodeName, M::Node>,
relayers: BTreeMap<RelayerName, M::Relayer>,
_phantom_materializer: PhantomData<R>,
}
impl<M: Materials, R> Drop for Testnet<M, R> {
fn drop(&mut self) {
// Make sure anything that can use a common network is dropped first.
drop(std::mem::take(&mut self.relayers));
drop(std::mem::take(&mut self.nodes));
}
}
impl<M, R> Testnet<M, R>
where
M: Materials,
R: Materializer<M> + Sync + Send,
{
pub async fn new(m: &mut R, name: &TestnetName) -> anyhow::Result<Self> {
let network = m
.create_network(name)
.await
.context("failed to create the network")?;
Ok(Self {
name: name.clone(),
network,
externals: Default::default(),
accounts: Default::default(),
deployments: Default::default(),
genesis: Default::default(),
subnets: Default::default(),
nodes: Default::default(),
relayers: Default::default(),
_phantom_materializer: PhantomData,
})
}
pub fn name(&self) -> &TestnetName {
&self.name
}
pub fn root(&self) -> SubnetName {
self.name.root()
}
/// Set up a testnet from scratch.
///
/// To validate a manifest, we can first create a testnet with a [Materializer]
/// that only creates symbolic resources.
pub async fn setup(m: &mut R, name: &TestnetName, manifest: &Manifest) -> anyhow::Result<Self> {
let mut t = Self::new(m, name).await?;
let root_name = t.root();
// Create keys for accounts.
for account_id in manifest.accounts.keys() {
t.create_account(m, account_id)?;
}
// Create the rootnet.
t.create_and_start_rootnet(m, &root_name, &manifest.rootnet)
.await
.context("failed to create and start rootnet")?;
// Recursively create and start all subnet nodes.
for (subnet_id, subnet) in &manifest.subnets {
t.create_and_start_subnet(m, &root_name, subnet_id, subnet)
.await
.with_context(|| format!("failed to create and start subnet {subnet_id}"))?;
}
Ok(t)
}
/// Return a reference to the physical network.
fn network(&self) -> &M::Network {
&self.network
}
/// Create a cryptographic keypair for an account ID.
pub fn create_account(&mut self, m: &mut R, id: &AccountId) -> anyhow::Result<()> {
let n = self.name.account(id);
let a = m.create_account(&n).context("failed to create account")?;
self.accounts.insert(id.clone(), a);
Ok(())
}
/// Get an account by ID.
pub fn account(&self, id: impl Into<AccountId>) -> anyhow::Result<&M::Account> {
let id: AccountId = id.into();
self.accounts
.get(&id)
.ok_or_else(|| anyhow!("account {id} does not exist"))
}
/// Get a node by name.
pub fn node(&self, name: &NodeName) -> anyhow::Result<&M::Node> {
self.nodes
.get(name)
.ok_or_else(|| anyhow!("node {name:?} does not exist"))
}
/// Get a subnet by name.
pub fn subnet(&self, name: &SubnetName) -> anyhow::Result<&M::Subnet> {
self.subnets
.get(name)
.ok_or_else(|| anyhow!("subnet {name:?} does not exist"))
}
/// Get a genesis by subnet.
pub fn genesis(&self, name: &SubnetName) -> anyhow::Result<&M::Genesis> {
self.genesis
.get(name)
.ok_or_else(|| anyhow!("genesis for {name:?} does not exist"))
}
/// Get a deployment by subnet.
pub fn deployment(&self, name: &SubnetName) -> anyhow::Result<&M::Deployment> {
self.deployments
.get(name)
.ok_or_else(|| anyhow!("deployment for {name:?} does not exist"))
}
/// List all the nodes in a subnet.
pub fn nodes_by_subnet(&self, subnet_name: &SubnetName) -> Vec<&M::Node> {
self.nodes
.iter()
.filter(|(node_name, _)| subnet_name.contains(node_name))
.map(|(_, n)| n)
.collect()
}
/// Iterate all the nodes in the testnet.
pub fn nodes(&self) -> impl Iterator<Item = (&NodeName, &M::Node)> {
self.nodes.iter()
}
/// Where can we send transactions and queries on a subnet.
pub fn submit_config(&self, subnet_name: &SubnetName) -> anyhow::Result<SubmitConfig<M>> {
let deployment = self.deployment(subnet_name)?;
let subnet = self.subnet(subnet_name)?;
let mut nodes = self
.nodes_by_subnet(subnet_name)
.into_iter()
.map(TargetConfig::Internal)
.collect::<Vec<_>>();
if subnet_name.is_root() {
nodes.extend(self.externals.iter().cloned().map(TargetConfig::External));
}
Ok(SubmitConfig {
subnet,
deployment,
nodes,
})
}
/// Resolve account IDs in a map to account references.
fn account_map<T>(
&self,
m: BTreeMap<AccountId, T>,
) -> anyhow::Result<BTreeMap<&M::Account, T>> {
m.into_iter()
.map(|(id, x)| self.account(&id).map(|a| (a, x)))
.collect()
}
/// Create a genesis for the rootnet nodes.
///
/// On the rootnet the validator power comes out of thin air,
/// ie. the balances don't have to cover it. On subnets this
/// will be different, the collateral has to be funded.
fn create_root_genesis(
&mut self,
m: &mut R,
subnet_name: &SubnetName,
validators: CollateralMap,
balances: BalanceMap,
) -> anyhow::Result<()> {
let validators = self
.account_map(validators)
.context("invalid root collaterals")?;
let balances = self
.account_map(balances)
.context("invalid root balances")?;
// Remember the genesis so we can potentially create more nodes later.
let genesis = m.create_root_genesis(subnet_name, validators, balances)?;
self.genesis.insert(subnet_name.clone(), genesis);
Ok(())
}
/// Configure and start the nodes of a subnet.
///
/// Fails if the genesis of this subnet hasn't been created yet.
async fn create_and_start_nodes(
&mut self,
m: &mut R,
subnet_name: &SubnetName,
nodes: &BTreeMap<NodeId, Node>,
env: &EnvMap,
) -> anyhow::Result<()> {
let node_ids = sort_by_seeds(nodes).context("invalid root subnet topology")?;
for (node_id, node) in node_ids.iter() {
self.create_node(m, subnet_name, node_id, node, env, node_ids.len())
.await
.with_context(|| format!("failed to create node {node_id} in {subnet_name}"))?;
}
for (node_id, node) in node_ids.iter() {
self.start_node(m, subnet_name, node_id, node)
.await
.with_context(|| format!("failed to start node {node_id} in {subnet_name}"))?;
}
Ok(())
}
/// Create the configuration of a node.
///
/// Fails if the genesis hasn't been created yet.
async fn create_node(
&mut self,
m: &mut R,
subnet_name: &SubnetName,
node_id: &NodeId,
node: &Node,
env: &EnvMap,
peer_count: usize,
) -> anyhow::Result<()> {
let genesis = self.genesis(subnet_name)?;
let network = self.network();
let node_name = subnet_name.node(node_id);
let parent_node = match (subnet_name.parent(), &node.parent_node) {
(Some(ps), Some(ParentNode::Internal(id))) => {
let tc = TargetConfig::<M>::Internal(
self.node(&ps.node(id))
.with_context(|| format!("invalid parent node in {node_name:?}"))?,
);
let deployment = self.deployment(&ps)?;
Some(ParentConfig {
node: tc,
deployment,
})
}
(Some(ps), Some(ParentNode::External(url))) if ps.is_root() => {
let tc = TargetConfig::External(url.clone());
let deployment = self.deployment(&ps)?;
Some(ParentConfig {
node: tc,
deployment,
})
}
(Some(_), Some(ParentNode::External(_))) => {
bail!("node {node_name:?} specifies external URL for parent, but it's on a non-root subnet")
}
(None, Some(_)) => {
bail!("node {node_name:?} specifies parent node, but there is no parent subnet")
}
(Some(_), None) => {
bail!("node {node_name:?} is on a subnet, but doesn't specify a parent node")
}
_ => None,
};
let node_config = NodeConfig {
network,
genesis,
validator: match &node.mode {
NodeMode::Full => None,
NodeMode::Validator { validator } => {
let validator = self
.account(validator)
.with_context(|| format!("invalid validator in {node_name:?}"))?;
Some(validator)
}
},
parent_node,
ethapi: node.ethapi,
env,
peer_count,
};
let node = m
.create_node(&node_name, &node_config)
.await
.context("failed to create node")?;
self.nodes.insert(node_name, node);
Ok(())
}
/// Start a node.
///
/// Fails if the node hasn't been created yet.
async fn start_node(
&mut self,
m: &mut R,
subnet_name: &SubnetName,
node_id: &NodeId,
node: &Node,
) -> anyhow::Result<()> {
let node_name = subnet_name.node(node_id);
let seeds = node
.seed_nodes
.iter()
.map(|s| self.node(&subnet_name.node(s)))
.collect::<Result<Vec<_>, _>>()
.with_context(|| format!("failed to collect seeds for {node_name:?}"))?;
let node = self.node(&node_name)?;
m.start_node(node, &seeds)
.await
.with_context(|| format!("failed to start {node_name:?}"))?;
Ok(())
}
async fn create_and_start_rootnet(
&mut self,
m: &mut R,
root_name: &SubnetName,
rootnet: &Rootnet,
) -> anyhow::Result<()> {
match rootnet {
Rootnet::External {
chain_id,
deployment,
urls,
} => {
// Establish balances.
for (id, a) in self.accounts.iter() {
let reference = ResourceHash::digest(format!("funding {id} from faucet"));
m.fund_from_faucet(a, Some(reference))
.await
.context("faucet failed")?;
}
// Establish root contract locations.
let deployment = match deployment {
IpcDeployment::New { deployer } => {
let deployer = self.account(deployer).context("invalid deployer")?;
m.new_deployment(root_name, deployer, urls.clone())
.await
.context("failed to deploy IPC contracts")?
}
IpcDeployment::Existing { gateway, registry } => {
m.existing_deployment(root_name, *gateway, *registry)?
}
};
let subnet = m
.create_root_subnet(root_name, Either::Left(ChainID::from(*chain_id)))
.context("failed to create root subnet")?;
self.subnets.insert(root_name.clone(), subnet);
self.deployments.insert(root_name.clone(), deployment);
self.externals.clone_from(urls);
}
Rootnet::New {
validators,
balances,
nodes,
env,
} => {
self.create_root_genesis(m, root_name, validators.clone(), balances.clone())
.context("failed to create root genesis")?;
let genesis = self.genesis(root_name)?;
let subnet = m
.create_root_subnet(root_name, Either::Right(genesis))
.context("failed to create root subnet")?;
let deployment = m.default_deployment(root_name)?;
self.subnets.insert(root_name.clone(), subnet);
self.deployments.insert(root_name.clone(), deployment);
self.create_and_start_nodes(m, root_name, nodes, env)
.await
.context("failed to start root nodes")?;
}
}
Ok(())
}
#[async_recursion]
async fn create_and_start_subnet(
&mut self,
m: &mut R,
parent_subnet_name: &SubnetName,
subnet_id: &SubnetId,
subnet: &Subnet,
) -> anyhow::Result<()> {
let subnet_name = parent_subnet_name.subnet(subnet_id);
// Create the subnet
{
// Assume that all subnets are deployed with the default contracts.
self.deployments
.insert(subnet_name.clone(), m.default_deployment(&subnet_name)?);
// Where can we reach the gateway and the registry.
let parent_submit_config = self.submit_config(parent_subnet_name)?;
// Create the subnet on the parent.
let created_subnet = m
.create_subnet(
&parent_submit_config,
&subnet_name,
&SubnetConfig {
creator: self.account(&subnet.creator).context("invalid creator")?,
// Make the number such that the last validator to join activates the subnet.
min_validators: subnet.validators.len(),
bottom_up_checkpoint: &subnet.bottom_up_checkpoint,
},
)
.await
.with_context(|| format!("failed to create {subnet_name}"))?;
self.subnets.insert(subnet_name.clone(), created_subnet);
};
// Fund the accounts, join the subnet, start the nodes
{
let parent_submit_config = self.submit_config(parent_subnet_name)?;
let created_subnet = self.subnet(&subnet_name)?;
// Fund validator and balances collateral all the way from the root down to the parent.
for (fund_source, fund_target) in subnet_name.ancestor_hops(false) {
// Where can we send the subnet request.
let fund_submit_config = self.submit_config(&fund_source)?;
// Which subnet are we funding.
let fund_subnet = self.subnet(&fund_target)?;
let cs = subnet
.validators
.iter()
.map(|(id, c)| ("validator", id, c.0.clone()));
let bs = subnet
.balances
.iter()
.map(|(id, b)| ("balance", id, b.0.clone()));
for (label, id, amount) in cs.chain(bs) {
let account = self
.account(id)
.with_context(|| format!("invalid {label} in {subnet_name}"))?;
// Assign a reference so we can remember that we did it, within each subnet,
// which can turn this into an idempotent operation.
let reference = ResourceHash::digest(format!(
"funds from the top for {label} {id} for {subnet_name}"
));
m.fund_subnet(
&fund_submit_config,
account,
fund_subnet,
amount,
Some(reference),
)
.await
.with_context(|| format!("failed to fund {id} in {fund_target:?}"))?;
}
}
// Join with the validators on the subnet.
for (id, c) in &subnet.validators {
let account = self
.account(id)
.with_context(|| format!("invalid validator {id} in {subnet_name}"))?;
let b = subnet.balances.get(id).cloned().unwrap_or_default();
let reference =
ResourceHash::digest(format!("initial join by {id} for {subnet_name}"));
m.join_subnet(
&parent_submit_config,
account,
created_subnet,
c.clone(),
b,
Some(reference),
)
.await
.with_context(|| format!("failed to join with validator {id} in {subnet_name}"))?;
}
// Create genesis by fetching from the parent.
let genesis = m
.create_subnet_genesis(&parent_submit_config, created_subnet)
.await
.with_context(|| format!("failed to create subnet genesis in {subnet_name}"))?;
self.genesis.insert(subnet_name.clone(), genesis);
// Create and start nodes.
self.create_and_start_nodes(m, &subnet_name, &subnet.nodes, &subnet.env)
.await
.with_context(|| format!("failed to start subnet nodes in {subnet_name}"))?;
}
// Interact with the running subnet.
{
let created_subnet = self.subnet(&subnet_name)?;
let created_deployment = self.deployment(&subnet_name)?;
// Where can we reach the gateway and the registry.
let parent_submit_config = self.submit_config(parent_subnet_name)?;
// Fund all non-validator balances (which have been passed to join_validator as a pre-fund request).
// These could be done as pre-funds if the command is available on its own.
for (id, b) in &subnet.balances {
let account = self
.account(id)
.with_context(|| format!("invalid balance in {subnet_name}"))?;
if subnet.validators.contains_key(id) {
continue;
}
let reference = ResourceHash::digest(format!("fund {id} in {subnet_name}"));
m.fund_subnet(
&parent_submit_config,
account,
created_subnet,
b.0.clone(),
Some(reference),
)
.await
.with_context(|| format!("failed to fund {id} in {subnet_name}"))?;
}
// Create relayers for bottom-up checkpointing.
let mut relayers = Vec::<(RelayerName, M::Relayer)>::new();
for (id, relayer) in &subnet.relayers {
let submitter = self
.account(&relayer.submitter)
.context("invalid relayer")?;
let follow_node = self
.node(&subnet_name.node(&relayer.follow_node))
.context("invalid follow node")?;
let submit_node = match (subnet_name.parent(), &relayer.submit_node) {
(Some(p), ParentNode::Internal(s)) => TargetConfig::Internal(self.node(&p.node(s)).context("invalid submit node")?),
(Some(p), ParentNode::External(url)) if p.is_root() => TargetConfig::External(url.clone()),
(Some(_), ParentNode::External(_)) => bail!(
"invalid relayer {id} in {subnet_name}: parent is not root, but submit node is external"
),
(None, _) => bail!(
"invalid relayer {id} in {subnet_name}: there is no parent subnet to relay to"
),
};
let relayer_name = subnet_name.relayer(id);
let relayer = m
.create_relayer(
&SubmitConfig {
nodes: vec![submit_node],
..parent_submit_config
},
&relayer_name,
RelayerConfig {
follow_config: &SubmitConfig {
nodes: vec![TargetConfig::Internal(follow_node)],
subnet: created_subnet,
deployment: created_deployment,
},
submitter,
env: &subnet.env,
},
)
.await
.with_context(|| format!("failed to create relayer {id}"))?;
relayers.push((relayer_name, relayer));
}
self.relayers.extend(relayers.into_iter());
}
// Recursively create and start all subnet nodes.
for (subnet_id, subnet) in &subnet.subnets {
self.create_and_start_subnet(m, &subnet_name, subnet_id, subnet)
.await
.with_context(|| format!("failed to start subnet {subnet_id} in {subnet_name}"))?;
}
Ok(())
}
}
/// Sort some values in a topological order.
///
/// Cycles can be allowed, in which case it will do its best to order the items
/// with the least amount of dependencies first. This is so we can support nodes
/// mutually be seeded by each other.
fn topo_sort<K, V, F, I>(
items: &BTreeMap<K, V>,
allow_cycles: bool,
f: F,
) -> anyhow::Result<Vec<(&K, &V)>>
where
F: Fn(&V) -> I,
K: Ord + Display + Clone,
I: IntoIterator<Item = K>,
{
let mut deps = items
.iter()
.map(|(k, v)| (k, BTreeSet::from_iter(f(v))))
.collect::<BTreeMap<_, _>>();
for (k, ds) in deps.iter() {
for d in ds {
if !deps.contains_key(d) {
bail!("non-existing dependency: {d} <- {k}")
}
}
}
let mut sorted = Vec::new();
while !deps.is_empty() {
let leaf: K = match deps.iter().find(|(_, ds)| ds.is_empty()) {
Some((leaf, _)) => (*leaf).clone(),
None if allow_cycles => {
let mut dcs = deps.iter().map(|(k, ds)| (k, ds.len())).collect::<Vec<_>>();
dcs.sort_by_key(|(_, c)| *c);
let leaf = dcs.first().unwrap().0;
(*leaf).clone()
}
None => bail!("circular reference in dependencies"),
};
deps.remove(&leaf);
for (_, ds) in deps.iter_mut() {
ds.remove(&leaf);
}
if let Some(kv) = items.get_key_value(&leaf) {
sorted.push(kv);
}
}
Ok(sorted)
}
/// Sort nodes in a subnet in topological order, so we strive to first
/// start the ones others use as a seed node. However, do allow cycles
/// so that we can have nodes mutually bootstrap from each other.
fn sort_by_seeds(nodes: &BTreeMap<NodeId, Node>) -> anyhow::Result<Vec<(&NodeId, &Node)>> {
topo_sort(nodes, true, |n| {
BTreeSet::from_iter(n.seed_nodes.iter().cloned())
})
}
#[cfg(test)]
mod tests {
use std::collections::BTreeMap;
use super::topo_sort;
#[test]
fn test_topo_sort() {
let mut tree = BTreeMap::default();
tree.insert(1, vec![]);
tree.insert(2, vec![5]);
tree.insert(3, vec![1, 5]);
tree.insert(4, vec![2, 3]);
tree.insert(5, vec![1]);
let sorted = topo_sort(&tree, false, |ds| ds.clone())
.unwrap()
.into_iter()
.map(|(k, _)| *k)
.collect::<Vec<_>>();
assert_eq!(sorted, vec![1, 5, 2, 3, 4]);
tree.insert(1, vec![5]);
topo_sort(&tree, false, |ds| ds.clone()).expect_err("shouldn't allow cycles");
let sorted = topo_sort(&tree, true, |ds| ds.clone()).expect("should allow cycles");
assert_eq!(sorted.len(), tree.len());
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/materializer/src/arb.rs | fendermint/testing/materializer/src/arb.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use ethers::{
core::rand::{rngs::StdRng, SeedableRng},
types::H160,
};
use fendermint_vm_core::chainid;
use lazy_static::lazy_static;
use std::{
cmp::min,
collections::BTreeMap,
ops::{Mul, SubAssign},
};
use url::Url;
use fendermint_vm_genesis::Collateral;
use fvm_shared::{
bigint::{BigInt, Integer, Zero},
econ::TokenAmount,
};
use quickcheck::{Arbitrary, Gen};
use crate::{
manifest::{
Account, Balance, BalanceMap, CheckpointConfig, CollateralMap, EnvMap, IpcDeployment,
Manifest, Node, NodeMap, NodeMode, ParentNode, Relayer, Rootnet, Subnet, SubnetMap,
},
AccountId, NodeId, RelayerId, ResourceId, SubnetId,
};
const RESOURCE_ID_CHARSET: &[u8] =
b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_";
lazy_static! {
/// Assume we have as much tFIL on the root as the faucet would give.
static ref DEFAULT_BALANCE: Balance = Balance(TokenAmount::from_whole(100));
}
/// Select some items from a slice.
fn choose_at_least<T: Clone>(g: &mut Gen, min_size: usize, xs: &[T]) -> Vec<T> {
let min_size = min(min_size, xs.len());
if min_size == xs.len() {
return Vec::from(xs);
}
// Say we have 10 items and we have 3 slots to fill.
//
// Imagine a simple algorithm that selects 1 item 3 times without replacement.
// Initially each item has 1/10 chance to be selected, then 1/9, then 1/8,
// but we would need to track which item has already been chosen.
//
// We want to do a single pass over the data.
//
// If we consider the 1st item, the chance that it doesn't get selected for any of the slots is:
// P_excl(1st) = 9/10 * 8/9 * 7/8 = 7/10
// P_incl(1st) = 1 - P_excl(1st) = 3/10
//
// So we roll the dice and with 30% probability we include the 1st item in the list.
//
// Then we have total_weighto cases to consider:
// 1. We included the 1st item, so now we have 2 slots and remaining 9 items to choose from.
// P_incl(2nd | incl(1st)) = 1 - 8/9 * 7/8 = 1 - 7/9 = 2/9
// 2. We excluded the 1st item, so we still have 3 slots to fill and remaining 9 items to choose from.
// P_incl(2nd | excl(1st)) = 1 - 8/9 * 7/8 * 6/7 = 1 - 6/9 = 3/9
//
// Thus, the probability of including each item is `remaining slots / remaining items`
let mut remaining_slots = min_size + usize::arbitrary(g) % (xs.len() - min_size);
let mut remaining_items = xs.len();
let mut chosen = Vec::new();
for x in xs {
if remaining_slots == 0 {
break;
}
if usize::arbitrary(g) % remaining_items < remaining_slots {
chosen.push(x.clone());
remaining_slots -= 1;
}
remaining_items -= 1;
}
chosen
}
fn choose_one<T: Clone>(g: &mut Gen, xs: &[T]) -> T {
g.choose(xs).expect("empty slice to choose from").clone()
}
impl Arbitrary for ResourceId {
fn arbitrary(g: &mut Gen) -> Self {
let len = 3 + usize::arbitrary(g) % 6;
let id = (0..len)
.map(|_| {
let idx = usize::arbitrary(g) % RESOURCE_ID_CHARSET.len();
char::from(RESOURCE_ID_CHARSET[idx])
})
.collect();
Self(id)
}
}
impl Arbitrary for Balance {
fn arbitrary(g: &mut Gen) -> Self {
Self(Collateral::arbitrary(g).0)
}
}
impl Arbitrary for Manifest {
fn arbitrary(g: &mut Gen) -> Self {
gen_manifest(g, 3, 3, DEFAULT_BALANCE.clone())
}
}
fn gen_manifest(
g: &mut Gen,
max_children: usize,
max_level: usize,
default_balance: Balance,
) -> Manifest {
let account_ids = (0..3 + usize::arbitrary(g) % 3)
.map(|_| AccountId::arbitrary(g))
.collect::<Vec<_>>();
let accounts = account_ids
.iter()
.map(|id| (id.clone(), Account {}))
.collect();
let mut balances: BalanceMap = account_ids
.iter()
.map(|id| (id.clone(), default_balance.clone()))
.collect();
let rootnet = if bool::arbitrary(g) {
Rootnet::External {
chain_id: chainid::from_str_hashed(&String::arbitrary(g))
.unwrap_or(12345u64.into())
.into(),
deployment: if bool::arbitrary(g) {
let [gateway, registry] = gen_addresses::<2>(g);
IpcDeployment::Existing { gateway, registry }
} else {
IpcDeployment::New {
deployer: choose_one(g, &account_ids),
}
},
urls: gen_urls(g),
}
} else {
let initial_balances = balances.clone();
let subnet = gen_root_subnet(g, &account_ids, &mut balances);
Rootnet::New {
validators: subnet.validators,
balances: initial_balances,
nodes: subnet.nodes,
env: gen_env(g),
}
};
// Collect the parent nodes on the rootnet that subnets can target.
let parent_nodes: Vec<ParentNode> = match &rootnet {
Rootnet::External { urls, .. } => urls.iter().cloned().map(ParentNode::External).collect(),
Rootnet::New { ref nodes, .. } => nodes.keys().cloned().map(ParentNode::Internal).collect(),
};
// The rootnet is L1, immediate subnets are L2.
let subnets = gen_subnets(
g,
max_children,
max_level,
2,
&account_ids,
&account_ids,
&parent_nodes,
&mut balances,
);
Manifest {
accounts,
rootnet,
subnets,
}
}
/// Generate random ethereum address.
fn gen_addresses<const N: usize>(g: &mut Gen) -> [H160; N] {
let mut rng = StdRng::seed_from_u64(u64::arbitrary(g));
std::array::from_fn(|_| H160::random_using(&mut rng))
}
/// Generate something that looks like it could be a JSON-RPC endpoint of an L1.
///
/// Return more, as if we had a list of nodes to choose from.
fn gen_urls(g: &mut Gen) -> Vec<Url> {
let mut urls = Vec::new();
for _ in 0..1 + usize::arbitrary(g) % 3 {
let id = ResourceId::arbitrary(g);
// The glif.io addresses are load balanced, but let's pretend we can target a specific node.
// Alternatively we could vary the ports or whatever.
let url = format!("https://{}.api.calibration.node.glif.io/rpc/v1", id.0);
let url = Url::parse(&url).expect("URL should parse");
urls.push(url);
}
urls
}
/// Recursively generate some subnets.
#[allow(clippy::too_many_arguments)]
fn gen_subnets(
g: &mut Gen,
max_children: usize,
max_level: usize,
level: usize,
account_ids: &[AccountId],
parent_account_ids: &[AccountId],
parent_nodes: &[ParentNode],
remaining_balances: &mut BalanceMap,
) -> SubnetMap {
let mut subnets = SubnetMap::default();
if level > max_level {
return subnets;
}
// Let the root have at least 1 child, otherwise it's not interesting.
let min_children = if level == 2 { 1 } else { 0 };
let num_children = if max_children <= min_children {
min_children
} else {
min_children + usize::arbitrary(g) % (max_children - min_children)
};
for _ in 0..num_children {
// Pick one of the accounts on the parent subnet as creator.
// This way they should have some non-zero balance to pay for the fees.
let creator = choose_one(g, parent_account_ids);
// Every subnet needs validators, so at least 1 needs to be chosen.
let validators: CollateralMap = choose_at_least(g, 1, account_ids)
.into_iter()
.map(|a| {
let c = gen_collateral(g, &a, remaining_balances);
(a, c)
})
.collect();
// It's not necessary to have accounts in a subnet; but let's pick at least one
// so that we have someone to use on this subnet to pick as a creator or relayer
// on child subnets.
let balances: BalanceMap = choose_at_least(g, 1, account_ids)
.into_iter()
.map(|a| {
let b: Balance = gen_balance(g, &a, remaining_balances);
(a, b)
})
.collect();
// Run at least a quroum of validators.
let total_weight: TokenAmount = validators.values().map(|c| c.0.clone()).sum();
let quorum_weight = total_weight.mul(2).div_floor(3);
let mut node_ids = Vec::new();
let mut nodes = NodeMap::default();
let mut running_weight = TokenAmount::zero();
for (v, w) in validators.iter() {
let mode = if running_weight <= quorum_weight || bool::arbitrary(g) {
NodeMode::Validator {
validator: v.clone(),
}
} else {
NodeMode::Full
};
let seed_nodes = if node_ids.is_empty() {
vec![]
} else {
choose_at_least(g, 1, &node_ids)
};
let node = Node {
mode,
ethapi: bool::arbitrary(g),
seed_nodes,
parent_node: if parent_nodes.is_empty() {
None
} else {
Some(choose_one(g, parent_nodes))
},
};
let id = NodeId::arbitrary(g);
node_ids.push(id.clone());
nodes.insert(id, node);
running_weight += w.0.clone();
}
let relayers = if parent_nodes.is_empty() {
BTreeMap::default()
} else {
(0..1 + usize::arbitrary(g) % 3)
.map(|_| {
let r = Relayer {
submitter: choose_one(g, parent_account_ids),
follow_node: choose_one(g, &node_ids),
submit_node: choose_one(g, parent_nodes),
};
let id = RelayerId::arbitrary(g);
(id, r)
})
.collect()
};
let parent_nodes = node_ids
.into_iter()
.map(ParentNode::Internal)
.collect::<Vec<_>>();
let parent_account_ids = balances.keys().cloned().collect::<Vec<_>>();
let child_subnets = gen_subnets(
g,
max_children,
max_level,
level + 1,
account_ids,
&parent_account_ids,
&parent_nodes,
remaining_balances,
);
let subnet = Subnet {
creator,
validators,
balances,
nodes,
relayers,
subnets: child_subnets,
env: gen_env(g),
bottom_up_checkpoint: CheckpointConfig {
// Adding 1 because 0 is not accepted by the contracts.
period: u64::arbitrary(g).mod_floor(&86400u64) + 1,
},
};
let sid = SubnetId::arbitrary(g);
subnets.insert(sid, subnet);
}
subnets
}
/// Generate a random root-like subnet. The motivation for this is just to reuse the validator allocation for a new rootnet.
fn gen_root_subnet(
g: &mut Gen,
account_ids: &[AccountId],
remaining_balances: &mut BalanceMap,
) -> Subnet {
let ss = gen_subnets(
g,
1,
2,
2,
account_ids,
account_ids,
&[],
remaining_balances,
);
debug_assert_eq!(ss.len(), 1, "should have exactly 1 subnet");
let mut s = ss.into_iter().next().unwrap().1;
s.relayers.clear();
s
}
fn gen_env(g: &mut Gen) -> EnvMap {
let mut env = EnvMap::default();
for _ in 0..usize::arbitrary(g) % 5 {
let prefix = if bool::arbitrary(g) { "CMT" } else { "FM" };
let key = format!("{prefix}_{}", ResourceId::arbitrary(g).0);
env.insert(key, String::arbitrary(g));
}
env
}
/// Choose some balance, up to 10% of the remaining balance of the account, minimum 1 atto.
///
/// Modify the reamaining balance so we don't run out.
fn gen_balance(g: &mut Gen, account_id: &AccountId, balances: &mut BalanceMap) -> Balance {
let r = balances
.get_mut(account_id)
.expect("account doesn't have balance");
let m = r.0.atto().div_ceil(&BigInt::from(10));
let b = BigInt::arbitrary(g).mod_floor(&m).max(BigInt::from(1));
let b = TokenAmount::from_atto(b);
r.0.sub_assign(b.clone());
Balance(b)
}
fn gen_collateral(g: &mut Gen, account_id: &AccountId, balances: &mut BalanceMap) -> Collateral {
let b = gen_balance(g, account_id, balances);
Collateral(b.0)
}
#[cfg(test)]
mod tests {
use std::collections::{BTreeSet, HashSet};
use quickcheck::Arbitrary;
use quickcheck_macros::quickcheck;
use super::choose_at_least;
#[derive(Clone, Debug)]
struct TestSample {
items: Vec<u8>,
min_size: usize,
sample: Vec<u8>,
}
impl Arbitrary for TestSample {
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
let mut items = HashSet::<u8>::arbitrary(g);
items.insert(u8::arbitrary(g));
let items = items.into_iter().collect::<Vec<_>>();
let min_size = 1 + usize::arbitrary(g) % items.len();
let sample = choose_at_least(g, min_size, &items);
TestSample {
items,
min_size,
sample,
}
}
}
#[quickcheck]
fn test_sample_at_least(data: TestSample) {
let sample_set = BTreeSet::from_iter(&data.sample);
let item_set = BTreeSet::from_iter(&data.items);
assert!(
data.sample.len() >= data.min_size,
"sampled at least the required amount"
);
assert!(
data.sample.len() <= data.items.len(),
"didn't sample more than available"
);
assert!(
sample_set.is_subset(&item_set),
"sample items are taken from the existing ones"
);
assert_eq!(data.sample.len(), sample_set.len(), "sample is unique");
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/materializer/src/docker/node.rs | fendermint/testing/materializer/src/docker/node.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::{
collections::BTreeMap,
fmt::Display,
path::{Path, PathBuf},
str::FromStr,
time::{Duration, Instant},
};
use anyhow::{anyhow, bail, Context};
use bollard::Docker;
use ethers::{providers::Middleware, types::H160};
use fvm_shared::bigint::Zero;
use lazy_static::lazy_static;
use tendermint_rpc::Client;
use url::Url;
use super::{
container::DockerContainer,
dropper::{DropChute, DropPolicy},
network::NetworkName,
runner::DockerRunner,
user_id, DockerMaterials, DockerPortRange, Volumes, COMETBFT_IMAGE, FENDERMINT_IMAGE,
};
use crate::{
docker::DOCKER_ENTRY_FILE_NAME,
env_vars,
manifest::EnvMap,
materializer::{NodeConfig, TargetConfig},
materials::export_file,
HasCometBftApi, HasEthApi, NodeName, ResourceHash,
};
/// The static environment variables are the ones we can assign during node creation,
/// ie. they don't depend on other nodes' values which get determined during their creation.
const STATIC_ENV: &str = "static.env";
/// The dynamic environment variables are ones we can only during the start of the node,
/// by which time all other nodes will have been created. Examples of this are network
/// identities which depend on network keys being created; in order to create a fully
/// connected network, we first need all network keys to be created, then we can look
/// all of them up during the start of each node.
/// These go into a separate file just so it's easy to recreate them.
const DYNAMIC_ENV: &str = "dynamic.env";
const COMETBFT_NODE_ID: &str = "cometbft-node-id";
const FENDERMINT_PEER_ID: &str = "fendermint-peer-id";
const RESOLVER_P2P_PORT: u32 = 26655;
const COMETBFT_P2P_PORT: u32 = 26656;
const COMETBFT_RPC_PORT: u32 = 26657;
const FENDERMINT_ABCI_PORT: u32 = 26658;
const ETHAPI_RPC_PORT: u32 = 8445;
const METRICS_RPC_PORT: u32 = 9184;
lazy_static! {
static ref STATIC_ENV_PATH: String = format!("/opt/docker/{STATIC_ENV}");
static ref DYNAMIC_ENV_PATH: String = format!("/opt/docker/{DYNAMIC_ENV}");
static ref DOCKER_ENTRY_PATH: String = format!("/opt/docker/{DOCKER_ENTRY_FILE_NAME}");
}
/// A Node consists of multiple docker containers.
pub struct DockerNode {
/// Logical name of the node in the subnet hierarchy.
node_name: NodeName,
network_name: String,
fendermint: DockerContainer,
cometbft: DockerContainer,
ethapi: Option<DockerContainer>,
port_range: DockerPortRange,
/// This is the file system directory were all the artifacts
/// regarding this node are stored, such as docker volumes and keys.
path: PathBuf,
}
impl Display for DockerNode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Display::fmt(&self.node_name, f)
}
}
impl DockerNode {
pub async fn get_or_create<'a>(
root: impl AsRef<Path>,
docker: Docker,
dropper: DropChute,
drop_policy: &DropPolicy,
node_name: &NodeName,
node_config: &NodeConfig<'a, DockerMaterials>,
port_range: DockerPortRange,
) -> anyhow::Result<Self> {
let fendermint_name = container_name(node_name, "fendermint");
let cometbft_name = container_name(node_name, "cometbft");
let ethapi_name = container_name(node_name, "ethapi");
let fendermint = DockerContainer::get(
docker.clone(),
dropper.clone(),
drop_policy,
fendermint_name.clone(),
)
.await?;
let cometbft = DockerContainer::get(
docker.clone(),
dropper.clone(),
drop_policy,
cometbft_name.clone(),
)
.await?;
let ethapi = DockerContainer::get(
docker.clone(),
dropper.clone(),
drop_policy,
ethapi_name.clone(),
)
.await?;
// Directory for the node's data volumes
let node_dir = root.as_ref().join(node_name);
std::fs::create_dir_all(&node_dir).context("failed to create node dir")?;
// Get the current user ID to use with docker containers.
let user = user_id(&node_dir)?;
let make_runner = |image, volumes| {
DockerRunner::new(
docker.clone(),
dropper.clone(),
drop_policy.clone(),
node_name.clone(),
user,
image,
volumes,
Some(node_config.network.network_name().to_string()),
)
};
// Create a directory for keys
let keys_dir = node_dir.join("keys");
if !keys_dir.exists() {
std::fs::create_dir(&keys_dir)?;
}
// Create a directory for cometbft
let cometbft_dir = node_dir.join("cometbft");
if !cometbft_dir.exists() {
std::fs::create_dir(&cometbft_dir)?;
}
// Create a directory for fendermint
let fendermint_dir = node_dir.join("fendermint");
if !fendermint_dir.exists() {
std::fs::create_dir(&fendermint_dir)?;
std::fs::create_dir(fendermint_dir.join("data"))?;
std::fs::create_dir(fendermint_dir.join("logs"))?;
std::fs::create_dir(fendermint_dir.join("snapshots"))?;
}
// Create a directory for ethapi logs
let ethapi_dir = node_dir.join("ethapi");
if !ethapi_dir.exists() {
std::fs::create_dir_all(ethapi_dir.join("logs"))?;
}
// We'll need to run some cometbft and fendermint commands.
// NOTE: Currently the Fendermint CLI commands live in the
// `app` crate in a way that they can't be imported. We
// could move them to the `lib.rs` from `main.rs` and
// then we wouldn't need docker for some of these steps.
// However, at least this way they are tested.
let cometbft_runner =
make_runner(COMETBFT_IMAGE, vec![(cometbft_dir.clone(), "/cometbft")]);
let fendermint_runner = make_runner(
FENDERMINT_IMAGE,
vec![
(keys_dir.clone(), "/fendermint/keys"),
(cometbft_dir.clone(), "/cometbft"),
(node_config.genesis.path.clone(), "/fendermint/genesis.json"),
],
);
// Only run init once, just in case it would overwrite previous values.
if !cometbft_dir.join("config").exists() {
// Init cometbft to establish the network key.
cometbft_runner
.run_cmd("init")
.await
.context("cannot init cometbft")?;
}
// Capture the cometbft node identity.
let cometbft_node_id = cometbft_runner
.run_cmd("show-node-id")
.await
.context("cannot show node ID")?
.into_iter()
.last()
.ok_or_else(|| anyhow!("empty cometbft node ID"))
.and_then(parse_cometbft_node_id)?;
export_file(keys_dir.join(COMETBFT_NODE_ID), cometbft_node_id)?;
// Convert fendermint genesis to cometbft.
fendermint_runner
.run_cmd(
"genesis \
--genesis-file /fendermint/genesis.json \
into-tendermint \
--out /cometbft/config/genesis.json \
",
)
.await
.context("failed to convert genesis")?;
// Convert validator private key to cometbft.
if let Some(v) = node_config.validator {
let validator_key_path = v.secret_key_path();
std::fs::copy(validator_key_path, keys_dir.join("validator_key.sk"))
.context("failed to copy validator key")?;
fendermint_runner
.run_cmd(
"key into-tendermint \
--secret-key /fendermint/keys/validator_key.sk \
--out /cometbft/config/priv_validator_key.json \
",
)
.await
.context("failed to convert validator key")?;
}
// Create a network key for the resolver.
fendermint_runner
.run_cmd("key gen --out-dir /fendermint/keys --name network_key")
.await
.context("failed to create network key")?;
// Capture the fendermint node identity.
let fendermint_peer_id = fendermint_runner
.run_cmd("key show-peer-id --public-key /fendermint/keys/network_key.pk")
.await
.context("cannot show peer ID")?
.into_iter()
.last()
.ok_or_else(|| anyhow!("empty fendermint peer ID"))
.and_then(parse_fendermint_peer_id)?;
export_file(keys_dir.join(FENDERMINT_PEER_ID), fendermint_peer_id)?;
// If there is no static env var file, create one with all the common variables.
let static_env = node_dir.join(STATIC_ENV);
if !static_env.exists() {
let genesis = &node_config.genesis.genesis;
let ipc = genesis
.ipc
.as_ref()
.ok_or_else(|| anyhow!("ipc config missing"))?;
let resolver_host_port: u32 = port_range.from;
// Start with the subnet level variables.
let mut env: EnvMap = node_config.env.clone();
env.extend(env_vars![
"RUST_BACKTRACE" => 1,
"FM_DATA_DIR" => "/fendermint/data",
"FM_LOG_DIR" => "/fendermint/logs",
"FM_SNAPSHOTS_DIR" => "/fendermint/snapshots",
"FM_CHAIN_NAME" => genesis.chain_name.clone(),
"FM_IPC__SUBNET_ID" => ipc.gateway.subnet_id,
"FM_RESOLVER__NETWORK__LOCAL_KEY" => "/fendermint/keys/network_key.sk",
"FM_RESOLVER__CONNECTION__LISTEN_ADDR" => format!("/ip4/0.0.0.0/tcp/{RESOLVER_P2P_PORT}"),
"FM_TENDERMINT_RPC_URL" => format!("http://{cometbft_name}:{COMETBFT_RPC_PORT}"),
"TENDERMINT_RPC_URL" => format!("http://{cometbft_name}:{COMETBFT_RPC_PORT}"),
"TENDERMINT_WS_URL" => format!("ws://{cometbft_name}:{COMETBFT_RPC_PORT}/websocket"),
"FM_ABCI__LISTEN__PORT" => FENDERMINT_ABCI_PORT,
"FM_ETH__LISTEN__PORT" => ETHAPI_RPC_PORT,
"FM_METRICS__LISTEN__PORT" => METRICS_RPC_PORT,
]);
if node_config.validator.is_some() {
env.extend(env_vars![
"FM_VALIDATOR_KEY__KIND" => "ethereum",
"FM_VALIDATOR_KEY__PATH" => "/fendermint/keys/validator_key.sk",
]);
}
// Configure the outbound peers so once fully connected, CometBFT can stop looking for peers.
if !node_config.peer_count.is_zero() {
env.insert(
"CMT_P2P_MAX_NUM_OUTBOUND_PEERS".into(),
(node_config.peer_count - 1).to_string(),
);
}
if let Some(ref pc) = node_config.parent_node {
let gateway: H160 = pc.deployment.gateway.into();
let registry: H160 = pc.deployment.registry.into();
env.extend(env_vars![
"FM_IPC__TOPDOWN__PARENT_REGISTRY" => format!("{registry:?}"),
"FM_IPC__TOPDOWN__PARENT_GATEWAY" => format!("{gateway:?}"),
]);
let topdown = match pc.node {
// Assume Lotus
TargetConfig::External(ref url) => env_vars![
"FM_IPC__TOPDOWN__CHAIN_HEAD_DELAY" => 20,
"FM_IPC__TOPDOWN__PARENT_HTTP_ENDPOINT" => url,
"FM_IPC__TOPDOWN__EXPONENTIAL_BACK_OFF" => 5,
"FM_IPC__TOPDOWN__EXPONENTIAL_RETRY_LIMIT" => 5 ,
"FM_IPC__TOPDOWN__POLLING_INTERVAL" => 10,
"FM_IPC__TOPDOWN__PROPOSAL_DELAY" => 2,
"FM_IPC__TOPDOWN__MAX_PROPOSAL_RANGE" => 100,
],
// Assume Fendermint
TargetConfig::Internal(node) => {
let parent_ethapi = node.ethapi.as_ref().ok_or_else(|| {
anyhow!(
"{node_name} cannot follow {}; ethapi is not running",
node.node_name
)
})?;
env_vars![
"FM_IPC__TOPDOWN__CHAIN_HEAD_DELAY" => 1,
"FM_IPC__TOPDOWN__PARENT_HTTP_ENDPOINT" => format!("http://{}:{ETHAPI_RPC_PORT}", parent_ethapi.hostname()),
"FM_IPC__TOPDOWN__EXPONENTIAL_BACK_OFF" => 5,
"FM_IPC__TOPDOWN__EXPONENTIAL_RETRY_LIMIT" => 5 ,
"FM_IPC__TOPDOWN__POLLING_INTERVAL" => 1,
"FM_IPC__TOPDOWN__PROPOSAL_DELAY" => 0,
"FM_IPC__TOPDOWN__MAX_PROPOSAL_RANGE" => 10,
]
}
};
env.extend(topdown);
}
env.extend(env_vars![
"CMT_PROXY_APP" => format!("tcp://{fendermint_name}:{FENDERMINT_ABCI_PORT}"),
"CMT_P2P_PEX" => true,
"CMT_RPC_MAX_SUBSCRIPTION_CLIENTS" => 10,
"CMT_RPC_MAX_SUBSCRIPTIONS_PER_CLIENT" => 1000,
]);
// Export the env to a file.
export_env(&static_env, &env).context("failed to export env")?;
}
// If there is no dynamic env var file, create an empty one so it can be mounted.
let dynamic_env = node_dir.join(DYNAMIC_ENV);
if !dynamic_env.exists() {
// The values will be assigned when the node is started.
export_env(&dynamic_env, &Default::default())?;
}
// All containers will be started with the docker entry and all env files.
let volumes = |vs: Volumes| {
let common: Volumes = vec![
(static_env.clone(), STATIC_ENV_PATH.as_str()),
(dynamic_env.clone(), DYNAMIC_ENV_PATH.as_str()),
(
root.as_ref().join("scripts").join(DOCKER_ENTRY_FILE_NAME),
DOCKER_ENTRY_PATH.as_str(),
),
];
[common, vs].concat()
};
// Wrap an entry point with the docker entry script.
let entrypoint = |ep: &str| {
vec![
DOCKER_ENTRY_PATH.to_string(),
ep.to_string(),
STATIC_ENV_PATH.to_string(),
DYNAMIC_ENV_PATH.to_string(),
]
};
// Create a fendermint container mounting:
let fendermint = match fendermint {
Some(c) => c,
None => {
let creator = make_runner(
FENDERMINT_IMAGE,
volumes(vec![
(keys_dir.clone(), "/fendermint/keys"),
(fendermint_dir.join("data"), "/fendermint/data"),
(fendermint_dir.join("logs"), "/fendermint/logs"),
(fendermint_dir.join("snapshots"), "/fendermint/snapshots"),
]),
);
creator
.create(
fendermint_name,
vec![
(port_range.resolver_p2p_host_port(), RESOLVER_P2P_PORT),
(port_range.fendermint_metrics_host_port(), METRICS_RPC_PORT),
],
entrypoint("fendermint run"),
)
.await
.context("failed to create fendermint")?
}
};
// Create a CometBFT container
let cometbft = match cometbft {
Some(c) => c,
None => {
let creator = make_runner(
COMETBFT_IMAGE,
volumes(vec![(cometbft_dir.clone(), "/cometbft")]),
);
creator
.create(
cometbft_name,
vec![
(port_range.cometbft_p2p_host_port(), COMETBFT_P2P_PORT),
(port_range.cometbft_rpc_host_port(), COMETBFT_RPC_PORT),
],
entrypoint("cometbft start"),
)
.await
.context("failed to create fendermint")?
}
};
// Create a ethapi container
let ethapi = match ethapi {
None if node_config.ethapi => {
let creator = make_runner(
FENDERMINT_IMAGE,
volumes(vec![(ethapi_dir.join("logs"), "/fendermint/logs")]),
);
let c = creator
.create(
ethapi_name,
vec![(port_range.ethapi_rpc_host_port(), ETHAPI_RPC_PORT)],
entrypoint("fendermint eth run"),
)
.await
.context("failed to create ethapi")?;
Some(c)
}
other => other,
};
// Construct the DockerNode
Ok(DockerNode {
node_name: node_name.clone(),
network_name: node_config.network.network_name().to_string(),
fendermint,
cometbft,
ethapi,
port_range,
path: node_dir,
})
}
pub async fn start(&self, seed_nodes: &[&Self]) -> anyhow::Result<()> {
let cometbft_seeds = collect_seeds(seed_nodes, |n| {
let host = &n.cometbft.hostname();
let id = n.cometbft_node_id()?;
Ok(format!("{id}@{host}:{COMETBFT_P2P_PORT}"))
})?;
let resolver_seeds = collect_seeds(seed_nodes, |n| {
let host = &n.fendermint.hostname();
let id = n.fendermint_peer_id()?;
Ok(format!("/dns/{host}/tcp/{RESOLVER_P2P_PORT}/p2p/{id}"))
})?;
let env = env_vars! [
"CMT_P2P_SEEDS" => cometbft_seeds,
"FM_RESOLVER__DISCOVERY__STATIC_ADDRESSES" => resolver_seeds,
];
export_env(self.path.join(DYNAMIC_ENV), &env)?;
// Start all three containers.
self.fendermint.start().await?;
self.cometbft.start().await?;
if let Some(ref ethapi) = self.ethapi {
ethapi.start().await?;
}
Ok(())
}
/// Allow time for things to consolidate and APIs to start.
pub async fn wait_for_started(&self, timeout: Duration) -> anyhow::Result<bool> {
let start = Instant::now();
loop {
if start.elapsed() > timeout {
return Ok(false);
}
tokio::time::sleep(Duration::from_secs(1)).await;
let client = self.cometbft_http_provider()?;
if let Err(e) = client.abci_info().await {
continue;
}
if let Some(client) = self.ethapi_http_provider()? {
if let Err(e) = client.get_chainid().await {
continue;
}
}
return Ok(true);
}
}
/// Read the CometBFT node ID (network identity) from the file we persisted during creation.
pub fn cometbft_node_id(&self) -> anyhow::Result<String> {
read_file(self.path.join("keys").join(COMETBFT_NODE_ID))
}
/// Read the libp2p peer ID (network identity) from the file we persisted during creation.
pub fn fendermint_peer_id(&self) -> anyhow::Result<String> {
read_file(self.path.join("keys").join(FENDERMINT_PEER_ID))
}
pub async fn fendermint_logs(&self) -> Vec<String> {
self.fendermint.logs().await
}
pub async fn cometbft_logs(&self) -> Vec<String> {
self.cometbft.logs().await
}
pub async fn ethapi_logs(&self) -> Vec<String> {
match self.ethapi {
None => Vec::new(),
Some(ref c) => c.logs().await,
}
}
/// The HTTP endpoint of the Ethereum API *inside Docker*, if it's enabled.
pub fn internal_ethapi_http_endpoint(&self) -> Option<Url> {
self.ethapi.as_ref().map(|c| {
url::Url::parse(&format!("http://{}:{}", c.hostname(), ETHAPI_RPC_PORT))
.expect("valid url")
})
}
/// Name of the docker network.
pub fn network_name(&self) -> &NetworkName {
&self.network_name
}
}
impl HasEthApi for DockerNode {
fn ethapi_http_endpoint(&self) -> Option<url::Url> {
self.ethapi.as_ref().map(|_| {
url::Url::parse(&format!(
"http://127.0.0.1:{}",
self.port_range.ethapi_rpc_host_port()
))
.expect("valid url")
})
}
}
impl HasCometBftApi for DockerNode {
fn cometbft_http_endpoint(&self) -> tendermint_rpc::Url {
tendermint_rpc::Url::from_str(&format!(
"http://127.0.0.1:{}",
self.port_range.cometbft_rpc_host_port()
))
.unwrap()
}
}
/// Create a container name from a node name and a logical container name, e.g. "cometbft"
/// in a way that we can use it as a hostname without being too long.
///
/// It consists of `{node-id}-{container}-{hash(node-name)}`,
/// e.g. "node-12-cometbft-a1b2c3"
fn container_name(node_name: &NodeName, container: &str) -> String {
let node_id = node_name
.path()
.file_stem()
.unwrap_or_default()
.to_string_lossy()
.to_string();
let hash = ResourceHash::digest(node_name.path_string());
let hash = hash.to_string();
let hash = &hash.as_str()[..6];
format!("{node_id}-{container}-{}", hash)
}
/// Collect comma separated values from seeds nodes.
fn collect_seeds<F>(seed_nodes: &[&DockerNode], f: F) -> anyhow::Result<String>
where
F: Fn(&DockerNode) -> anyhow::Result<String>,
{
let ss = seed_nodes
.iter()
.map(|n| f(n))
.collect::<anyhow::Result<Vec<_>>>()
.context("failed to collect seeds")?;
Ok(ss.join(","))
}
fn export_env(file_path: impl AsRef<Path>, env: &EnvMap) -> anyhow::Result<()> {
let env = env
.iter()
.map(|(k, v)| format!("{k}={v}"))
.collect::<Vec<_>>();
export_file(file_path, env.join("\n"))
}
fn read_file(file_path: impl AsRef<Path>) -> anyhow::Result<String> {
std::fs::read_to_string(&file_path)
.with_context(|| format!("failed to read {}", file_path.as_ref().to_string_lossy()))
}
fn parse_cometbft_node_id(value: impl AsRef<str>) -> anyhow::Result<String> {
let value = value.as_ref().trim().to_string();
if hex::decode(&value).is_err() {
bail!("failed to parse CometBFT node ID: {value}");
}
Ok(value)
}
/// libp2p peer ID is base58 encoded.
fn parse_fendermint_peer_id(value: impl AsRef<str>) -> anyhow::Result<String> {
let value = value.as_ref().trim().to_string();
// We could match the regex
if value.len() != 53 {
bail!("failed to parse Fendermint peer ID: {value}");
}
Ok(value)
}
#[cfg(test)]
mod tests {
use super::{DockerRunner, COMETBFT_IMAGE};
use crate::{
docker::{
dropper::{self, DropPolicy},
node::parse_cometbft_node_id,
},
NodeName, TestnetName,
};
use bollard::Docker;
fn make_runner() -> DockerRunner<NodeName> {
let nn = TestnetName::new("test-network").root().node("test-node");
let docker = Docker::connect_with_local_defaults().expect("failed to connect to docker");
let (_drop_handle, drop_chute) = dropper::start(docker.clone());
let drop_policy = DropPolicy::EPHEMERAL;
DockerRunner::new(
docker,
drop_chute,
drop_policy,
nn,
0,
COMETBFT_IMAGE,
Vec::new(),
None,
)
}
#[tokio::test]
async fn test_docker_run_output() {
let runner = make_runner();
// Based on my manual testing, this will initialise the config and then show the ID:
// `docker run --rm cometbft/cometbft:v0.37.x show-node-id`
let logs = runner
.run_cmd("show-node-id")
.await
.expect("failed to show ID");
assert!(!logs.is_empty());
assert!(
parse_cometbft_node_id(logs.last().unwrap()).is_ok(),
"last line is a node ID"
);
}
#[tokio::test]
async fn test_docker_run_error() {
let runner = make_runner();
let _err = runner
.run_cmd("show-peer-id")
.await
.expect_err("wrong command should fail");
}
#[test]
fn test_valid_cometbft_id() {
assert!(
parse_cometbft_node_id("eb9470dd3bfa7311f1de3f3d3d69a628531adcfe").is_ok(),
"sample ID is valid"
);
assert!(parse_cometbft_node_id("I[2024-02-23|14:20:21.724] Generated genesis file module=main path=/cometbft/config/genesis.json").is_err(), "logs aren't valid");
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/materializer/src/docker/runner.rs | fendermint/testing/materializer/src/docker/runner.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::{collections::HashMap, fmt::Display};
use anyhow::{bail, Context};
use bollard::{
container::{
AttachContainerOptions, AttachContainerResults, Config, CreateContainerOptions,
RemoveContainerOptions,
},
network::ConnectNetworkOptions,
secret::{ContainerInspectResponse, HostConfig, PortBinding},
Docker,
};
use futures::StreamExt;
use crate::{docker::current_network, manifest::EnvMap, ResourceName, TestnetResource};
use super::{
container::DockerContainer,
dropper::{DropChute, DropPolicy},
network::NetworkName,
DockerConstruct, Volumes,
};
pub struct DockerRunner<N> {
docker: Docker,
dropper: DropChute,
drop_policy: DropPolicy,
name: N,
user: u32,
image: String,
volumes: Volumes,
network_name: Option<NetworkName>,
env: EnvMap,
}
impl<N> DockerRunner<N>
where
N: AsRef<ResourceName> + TestnetResource + Display,
{
#[allow(clippy::too_many_arguments)]
pub fn new(
docker: Docker,
dropper: DropChute,
drop_policy: DropPolicy,
name: N,
user: u32,
image: &str,
volumes: Volumes,
network_name: Option<NetworkName>,
) -> Self {
Self {
docker,
dropper,
drop_policy,
name,
user,
image: image.to_string(),
volumes,
network_name,
env: EnvMap::default(),
}
}
pub fn with_env(mut self, env: EnvMap) -> Self {
self.env = env;
self
}
// Tag containers with resource names.
fn labels(&self) -> HashMap<String, String> {
[
("testnet", self.name.testnet().path()),
("resource", self.name.as_ref().path()),
]
.into_iter()
.map(|(n, p)| (n.to_string(), p.to_string_lossy().to_string()))
.collect()
}
fn env(&self) -> Vec<String> {
// Set the network otherwise we might be be able to parse addresses we created.
let network = current_network();
let mut env = vec![
format!("FM_NETWORK={}", network),
format!("IPC_NETWORK={}", network),
format!("NETWORK={}", network),
];
env.extend(self.env.iter().map(|(k, v)| format!("{k}={v}")));
env
}
/// Run a short lived container.
pub async fn run_cmd(&self, cmd: &str) -> anyhow::Result<Vec<String>> {
let cmdv = split_cmd(cmd);
let config = Config {
image: Some(self.image.clone()),
user: Some(self.user.to_string()),
cmd: Some(cmdv),
attach_stderr: Some(true),
attach_stdout: Some(true),
tty: Some(true),
labels: Some(self.labels()),
env: Some(self.env()),
host_config: Some(HostConfig {
// We'll remove it explicitly at the end after collecting the output.
auto_remove: Some(false),
init: Some(true),
binds: Some(
self.volumes
.iter()
.map(|(h, c)| format!("{}:{c}", h.to_string_lossy()))
.collect(),
),
network_mode: self.network_name.clone(),
..Default::default()
}),
..Default::default()
};
let id = self
.docker
.create_container::<&str, _>(None, config)
.await
.context("failed to create container")?
.id;
let AttachContainerResults { mut output, .. } = self
.docker
.attach_container::<String>(
&id,
Some(AttachContainerOptions {
stdout: Some(true),
stderr: Some(true),
stream: Some(true),
..Default::default()
}),
)
.await
.context("failed to attach to container")?;
self.docker
.start_container::<&str>(&id, None)
.await
.context("failed to start container")?;
// Collect docker attach output
let mut out = Vec::new();
while let Some(Ok(output)) = output.next().await {
out.push(output.to_string());
}
eprintln!("RESOURCE: {} ({id})", self.name);
eprintln!("CMD: {cmd}");
for o in out.iter() {
eprint!("OUT: {o}");
}
eprintln!("---");
let inspect: ContainerInspectResponse = self
.docker
.inspect_container(&id, None)
.await
.context("failed to inspect container")?;
self.docker
.remove_container(
&id,
Some(RemoveContainerOptions {
force: true,
..Default::default()
}),
)
.await?;
if let Some(ref state) = inspect.state {
let exit_code = state.exit_code.unwrap_or_default();
if exit_code != 0 {
bail!(
"container exited with code {exit_code}: '{}'",
state.error.clone().unwrap_or_default()
);
}
}
Ok(out)
}
/// Create a container to be started later.
pub async fn create(
&self,
name: String,
// Host <-> Container port mappings
ports: Vec<(u32, u32)>,
entrypoint: Vec<String>,
) -> anyhow::Result<DockerContainer> {
let config = Config {
hostname: Some(name.clone()),
image: Some(self.image.clone()),
user: Some(self.user.to_string()),
entrypoint: Some(entrypoint),
labels: Some(self.labels()),
env: Some(self.env()),
cmd: None,
host_config: Some(HostConfig {
init: Some(true),
binds: Some(
self.volumes
.iter()
.map(|(h, c)| format!("{}:{c}", h.to_string_lossy()))
.collect(),
),
port_bindings: Some(
ports
.into_iter()
.flat_map(|(h, c)| {
let binding = PortBinding {
host_ip: None,
host_port: Some(h.to_string()),
};
// Emitting both TCP and UDP, just in case.
vec![
(format!("{c}/tcp"), Some(vec![binding.clone()])),
(format!("{c}/udp"), Some(vec![binding])),
]
})
.collect(),
),
..Default::default()
}),
..Default::default()
};
let id = self
.docker
.create_container::<String, _>(
Some(CreateContainerOptions {
name: name.clone(),
..Default::default()
}),
config,
)
.await
.context("failed to create container")?
.id;
eprintln!("RESOURCE: {}", self.name);
eprintln!("CREATED CONTAINER: {} ({})", name, id);
eprintln!("---");
// host_config.network_mode should work as well.
if let Some(network_name) = self.network_name.as_ref() {
self.docker
.connect_network(
network_name,
ConnectNetworkOptions {
container: id.clone(),
..Default::default()
},
)
.await
.context("failed to connect container to network")?;
}
Ok(DockerContainer::new(
self.docker.clone(),
self.dropper.clone(),
DockerConstruct {
id,
name,
keep: self.drop_policy.keep(true),
},
))
}
}
pub fn split_cmd(cmd: &str) -> Vec<String> {
cmd.split_ascii_whitespace()
.map(|s| s.to_string())
.collect()
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/materializer/src/docker/relayer.rs | fendermint/testing/materializer/src/docker/relayer.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::{fmt::Display, path::Path};
use anyhow::Context;
use bollard::Docker;
use crate::{
docker::{
runner::{split_cmd, DockerRunner},
user_id, FENDERMINT_IMAGE,
},
manifest::EnvMap,
materials::{DefaultAccount, DefaultSubnet},
RelayerName, ResourceHash, TestnetResource,
};
use super::{container::DockerContainer, dropper::DropChute, network::NetworkName, DropPolicy};
pub struct DockerRelayer {
relayer_name: RelayerName,
relayer: DockerContainer,
}
impl Display for DockerRelayer {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Display::fmt(&self.relayer_name, f)
}
}
impl DockerRelayer {
/// Get or create the relayer container.
///
/// This assumes that the submitter and the involved parent and child
/// subnets have been added to the `ipc-cli` config.
#[allow(clippy::too_many_arguments)]
pub async fn get_or_create<'a>(
root: impl AsRef<Path>,
docker: Docker,
dropper: DropChute,
drop_policy: &DropPolicy,
relayer_name: &RelayerName,
subnet: &DefaultSubnet,
submitter: &DefaultAccount,
network_name: Option<NetworkName>,
env: &EnvMap,
) -> anyhow::Result<Self> {
let container_name = container_name(relayer_name);
// If the container exists, return it.
if let Some(relayer) = DockerContainer::get(
docker.clone(),
dropper.clone(),
drop_policy,
container_name.clone(),
)
.await?
{
return Ok(Self {
relayer_name: relayer_name.clone(),
relayer,
});
}
// We'll need to mount the IPC configuration for the relayer.
let ipc_dir = root.as_ref().join(subnet.name.testnet()).join("ipc");
let user = user_id(&ipc_dir)?;
// The CLI only logs to the output. Its log level can be configured with the general env vars.
let volumes = vec![(ipc_dir, "/fendermint/.ipc")];
let creator = DockerRunner::new(
docker,
dropper,
drop_policy.clone(),
relayer_name.clone(),
user,
FENDERMINT_IMAGE,
volumes,
network_name,
)
.with_env(env.clone());
// TODO: Do we need to use any env vars with the relayer?
let entrypoint = split_cmd(&format!(
"ipc-cli \
--config-path /fendermint/.ipc/config.toml \
checkpoint relayer \
--subnet {} \
--submitter {:?} \
",
subnet.subnet_id,
submitter.eth_addr()
));
let relayer = creator
.create(container_name, Default::default(), entrypoint)
.await
.context("failed to create relayer")?;
Ok(Self {
relayer_name: relayer_name.clone(),
relayer,
})
}
/// Start the relayer, unless it's already running.
pub async fn start(&self) -> anyhow::Result<()> {
self.relayer.start().await
}
}
/// Create a container name from the relayer name.
///
/// It consists of `{relayer-id}-relayer-{hash(relayer-name)}`
fn container_name(relayer_name: &RelayerName) -> String {
let relayer_id = relayer_name
.path()
.file_stem()
.unwrap_or_default()
.to_string_lossy()
.to_string();
let hash = ResourceHash::digest(relayer_name.path_string());
let hash = hash.to_string();
let hash = &hash.as_str()[..6];
format!("{relayer_id}-relayer-{}", hash)
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/materializer/src/docker/network.rs | fendermint/testing/materializer/src/docker/network.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::{collections::HashMap, fmt::Display};
use anyhow::{anyhow, Context};
use bollard::{
network::{CreateNetworkOptions, ListNetworksOptions},
service::{Network, NetworkCreateResponse},
Docker,
};
use crate::TestnetName;
use super::{
dropper::{DropChute, DropCommand, DropPolicy},
DockerConstruct,
};
pub type NetworkName = String;
pub struct DockerNetwork {
docker: Docker,
dropper: DropChute,
/// There is a single docker network created for the entire testnet.
testnet_name: TestnetName,
network: DockerConstruct,
}
impl Display for DockerNetwork {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Display::fmt(&self.network_name(), f)
}
}
impl DockerNetwork {
pub fn testnet_name(&self) -> &TestnetName {
&self.testnet_name
}
pub fn network_name(&self) -> &NetworkName {
&self.network.name
}
/// Check if an externally managed network already exists;
/// if not, create a new docker network for the testnet.
pub async fn get_or_create(
docker: Docker,
dropper: DropChute,
testnet_name: TestnetName,
drop_policy: &DropPolicy,
) -> anyhow::Result<Self> {
let network_name = testnet_name.path_string();
let mut filters = HashMap::new();
filters.insert("name".to_string(), vec![network_name.clone()]);
let networks: Vec<Network> = docker
.list_networks(Some(ListNetworksOptions { filters }))
.await
.context("failed to list docker networks")?;
let (id, is_new) = match networks.first() {
None => {
let network: NetworkCreateResponse = docker
.create_network(CreateNetworkOptions {
name: network_name.clone(),
..Default::default()
})
.await
.context("failed to create docker network")?;
let id = network
.id
.clone()
.ok_or_else(|| anyhow!("created docker network has no id"))?;
(id, true)
}
Some(network) => {
let id = network
.id
.clone()
.ok_or_else(|| anyhow!("docker network {network_name} has no id"))?;
(id, false)
}
};
Ok(Self {
docker,
dropper,
testnet_name,
network: DockerConstruct {
id,
name: network_name,
keep: drop_policy.keep(is_new),
},
})
}
}
impl Drop for DockerNetwork {
fn drop(&mut self) {
if self.network.keep {
return;
}
if self
.dropper
.send(DropCommand::DropNetwork(self.network.name.clone()))
.is_err()
{
tracing::error!(
network_name = self.network.name,
"dropper no longer listening"
);
}
}
}
#[cfg(test)]
mod tests {
use bollard::Docker;
use std::time::Duration;
use super::DockerNetwork;
use crate::{
docker::dropper::{self, DropPolicy},
TestnetName,
};
#[tokio::test]
async fn test_network() {
let tn = TestnetName::new("test-network");
let docker = Docker::connect_with_local_defaults().expect("failed to connect to docker");
let (drop_handle, drop_chute) = dropper::start(docker.clone());
let drop_policy = DropPolicy::default();
let n1 = DockerNetwork::get_or_create(
docker.clone(),
drop_chute.clone(),
tn.clone(),
&drop_policy,
)
.await
.expect("failed to create network");
let n2 = DockerNetwork::get_or_create(docker.clone(), drop_chute, tn.clone(), &drop_policy)
.await
.expect("failed to get network");
assert!(
!n1.network.keep,
"when created, the network should not be marked to keep"
);
assert!(
n2.network.keep,
"when already exists, the network should be kept"
);
assert_eq!(n1.network.id, n2.network.id);
assert_eq!(n1.network.name, n2.network.name);
assert_eq!(n1.network.name, "testnets/test-network");
let id = n1.network.id.clone();
let exists = || async {
tokio::time::sleep(Duration::from_millis(250)).await;
let ns = docker.list_networks::<String>(None).await.unwrap();
ns.iter().any(|n| n.id == Some(id.clone()))
};
drop(n2);
assert!(exists().await, "network still exists after n2 dropped");
drop(n1);
let _ = drop_handle.await;
assert!(
!exists().await,
"network should be removed when n1 is dropped"
);
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/materializer/src/docker/container.rs | fendermint/testing/materializer/src/docker/container.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use anyhow::{anyhow, Context};
use futures::StreamExt;
use std::collections::HashMap;
use bollard::{
container::{ListContainersOptions, LogsOptions},
secret::{ContainerInspectResponse, ContainerStateStatusEnum},
service::ContainerSummary,
Docker,
};
use super::{
dropper::{DropChute, DropCommand, DropPolicy},
DockerConstruct,
};
/// Time to wait before killing the container if it doesn't want to stop.
const KILL_TIMEOUT_SECS: i64 = 5;
pub struct DockerContainer {
docker: Docker,
dropper: DropChute,
container: DockerConstruct,
}
impl DockerContainer {
pub fn new(docker: Docker, dropper: DropChute, container: DockerConstruct) -> Self {
Self {
docker,
dropper,
container,
}
}
pub fn hostname(&self) -> &str {
&self.container.name
}
/// Get a container by name, if it exists.
pub async fn get(
docker: Docker,
dropper: DropChute,
drop_policy: &DropPolicy,
name: String,
) -> anyhow::Result<Option<Self>> {
let mut filters = HashMap::new();
filters.insert("name".to_string(), vec![name.clone()]);
let containers: Vec<ContainerSummary> = docker
.list_containers(Some(ListContainersOptions {
all: true,
filters,
..Default::default()
}))
.await
.context("failed to list docker containers")?;
match containers.first() {
None => Ok(None),
Some(container) => {
let id = container
.id
.clone()
.ok_or_else(|| anyhow!("docker container {name} has no id"))?;
Ok(Some(Self::new(
docker,
dropper,
DockerConstruct {
id,
name,
keep: drop_policy.keep(false),
},
)))
}
}
}
/// Start the container, unless it's already running.
pub async fn start(&self) -> anyhow::Result<()> {
let inspect: ContainerInspectResponse = self
.docker
.inspect_container(&self.container.id, None)
.await
.with_context(|| {
format!(
"failed to inspect container: {} ({})",
self.container.name, self.container.id,
)
})?;
// Idempotency; we could be re-running the materializer after it failed somewhere along testnet creation.
if let Some(ContainerStateStatusEnum::RUNNING) = inspect.state.and_then(|s| s.status) {
return Ok(());
}
eprintln!(
"STARTING CONTAINER: {} ({})",
self.container.name, self.container.id
);
self.docker
.start_container::<&str>(&self.container.id, None)
.await
.with_context(|| {
format!(
"failed to start container: {} ({})",
self.container.name, self.container.id
)
})?;
Ok(())
}
/// Simplistic way of collecting logs of containers used in the test,
/// mostly to debug build failures on CI.
pub async fn logs(&self) -> Vec<String> {
let mut log_stream = self.docker.logs::<&str>(
&self.container.name,
Some(LogsOptions {
stdout: true,
stderr: true,
follow: false,
..Default::default()
}),
);
let mut out = Vec::new();
while let Some(Ok(log)) = log_stream.next().await {
out.push(log.to_string().trim().to_string());
}
out
}
}
impl Drop for DockerContainer {
fn drop(&mut self) {
if self.container.keep {
return;
}
if self
.dropper
.send(DropCommand::DropContainer(self.container.name.clone()))
.is_err()
{
tracing::error!(
container_name = self.container.name,
"dropper no longer listening"
);
}
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/materializer/src/docker/mod.rs | fendermint/testing/materializer/src/docker/mod.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use anyhow::{anyhow, bail, Context};
use async_trait::async_trait;
use bollard::{
container::{ListContainersOptions, RemoveContainerOptions},
network::ListNetworksOptions,
secret::{ContainerSummary, Network},
Docker,
};
use either::Either;
use ethers::{
core::rand::{rngs::StdRng, SeedableRng},
types::H160,
};
use fendermint_vm_actor_interface::eam::EthAddress;
use fendermint_vm_core::{chainid, Timestamp};
use fendermint_vm_genesis::{
ipc::{GatewayParams, IpcParams},
Account, Actor, ActorMeta, Collateral, Genesis, SignerAddr, Validator, ValidatorKey,
};
use fvm_shared::{bigint::Zero, chainid::ChainID, econ::TokenAmount, version::NetworkVersion};
use ipc_api::subnet_id::SubnetID;
use ipc_provider::config::subnet::{
EVMSubnet, Subnet as IpcCliSubnet, SubnetConfig as IpcCliSubnetConfig,
};
use ipc_provider::config::Config as IpcCliConfig;
use lazy_static::lazy_static;
use regex::Regex;
use serde::{Deserialize, Serialize};
use std::{
collections::{BTreeMap, HashMap},
os::unix::fs::MetadataExt,
path::{Path, PathBuf},
str::FromStr,
time::Duration,
};
use url::Url;
use crate::{
manifest::Balance,
materializer::{
Materializer, NodeConfig, RelayerConfig, SubmitConfig, SubnetConfig, TargetConfig,
},
materials::{
export_file, export_json, export_script, import_json, DefaultAccount, DefaultDeployment,
DefaultGenesis, DefaultSubnet, Materials,
},
CliName, NodeName, RelayerName, ResourceHash, ResourceName, SubnetName, TestnetName,
TestnetResource,
};
mod container;
mod dropper;
mod network;
mod node;
mod relayer;
mod runner;
pub use dropper::DropPolicy;
pub use network::DockerNetwork;
pub use node::DockerNode;
pub use relayer::DockerRelayer;
use self::{dropper::DropHandle, network::NetworkName, runner::DockerRunner};
// TODO: Add these to the materializer.
const COMETBFT_IMAGE: &str = "cometbft/cometbft:v0.38.x";
const FENDERMINT_IMAGE: &str = "fendermint:latest";
const STATE_JSON_FILE_NAME: &str = "materializer-state.json";
const DOCKER_ENTRY_SCRIPT: &str = include_str!("../../scripts/docker-entry.sh");
const DOCKER_ENTRY_FILE_NAME: &str = "docker-entry.sh";
const PORT_RANGE_START: u32 = 30000;
const PORT_RANGE_SIZE: u32 = 100;
lazy_static! {
static ref STARTUP_TIMEOUT: Duration = Duration::from_secs(30);
}
type Volumes = Vec<(PathBuf, &'static str)>;
#[macro_export]
macro_rules! env_vars {
( $($key:literal => $value:expr),* $(,)? ) => {
BTreeMap::from([ $( ($key.to_string(), $value.to_string()) ),* ])
};
}
pub struct DockerMaterials;
impl Materials for DockerMaterials {
type Deployment = DefaultDeployment;
type Account = DefaultAccount;
type Genesis = DefaultGenesis;
type Subnet = DefaultSubnet;
type Network = DockerNetwork;
type Node = DockerNode;
type Relayer = DockerRelayer;
}
/// A thing constructed by docker.
#[derive(Debug, Clone)]
pub struct DockerConstruct {
/// Unique ID of the thing.
pub id: String,
/// The name of the thing that we can use in docker commands.
pub name: String,
/// Indicate whether the thing was created outside the test,
/// or it can be destroyed when it goes out of scope.
pub keep: bool,
}
/// Allocated (inclusive) range we can use to expose containers' ports on the host.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DockerPortRange {
pub from: u32,
pub to: u32,
}
/// Mapping ports assuming a 100 size ranges.
///
/// The ports on the host are assigned so that they end with the same number as the internal one,
/// which is hopefully a little bit intuitive for anyone who is familiar with the default values.
impl DockerPortRange {
/// Mapping the internal 26655 port to the host.
pub fn resolver_p2p_host_port(&self) -> u32 {
self.from + 55
}
/// Mapping the internal 26656 port to the host.
pub fn cometbft_p2p_host_port(&self) -> u32 {
self.from + 56
}
/// Mapping the internal 26657 port to the host.
pub fn cometbft_rpc_host_port(&self) -> u32 {
self.from + 57
}
/// Mapping the internal 8445 port to the host.
pub fn ethapi_rpc_host_port(&self) -> u32 {
self.from + 45
}
/// Mapping the internal 9184 of fendermint to the host.
pub fn fendermint_metrics_host_port(&self) -> u32 {
self.from + 84
}
}
/// State of the materializer that it persists, so that it can resume operations.
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct DockerMaterializerState {
/// Port ranges ever allocated by this materializer.
port_ranges: BTreeMap<NodeName, DockerPortRange>,
}
pub struct DockerMaterializer {
dir: PathBuf,
rng: StdRng,
docker: bollard::Docker,
drop_handle: dropper::DropHandle,
drop_chute: dropper::DropChute,
drop_policy: dropper::DropPolicy,
state: DockerMaterializerState,
}
impl DockerMaterializer {
/// Create a materializer with a directory where all the
/// testnets can live next to each other.
pub fn new(dir: &Path, seed: u64) -> anyhow::Result<Self> {
let docker =
Docker::connect_with_local_defaults().context("failed to connect to Docker")?;
// Create a runtime for the execution of drop tasks.
let (drop_handle, drop_chute) = dropper::start(docker.clone());
// Read in the state if it exists, otherwise create a default one.
let state = import_json(dir.join(STATE_JSON_FILE_NAME))
.context("failed to read state")?
.unwrap_or_default();
let m = Self {
dir: dir.into(),
rng: StdRng::seed_from_u64(seed),
docker,
drop_handle,
drop_chute,
state,
drop_policy: DropPolicy::default(),
};
m.save_state().context("failed to save state")?;
m.export_scripts().context("failed to export scripts")?;
Ok(m)
}
pub fn with_policy(mut self, policy: DropPolicy) -> Self {
self.drop_policy = policy;
self
}
/// Remove all traces of a testnet.
pub async fn remove(&mut self, testnet_name: &TestnetName) -> anyhow::Result<()> {
let testnet = testnet_name.path_string();
let mut filters = HashMap::new();
filters.insert("label".to_string(), vec![format!("testnet={}", testnet)]);
let containers: Vec<ContainerSummary> = self
.docker
.list_containers(Some(ListContainersOptions {
all: true,
filters,
..Default::default()
}))
.await
.context("failed to list docker containers")?;
let ids = containers.into_iter().filter_map(|c| c.id);
for id in ids {
eprintln!("removing docker container {id}");
self.docker
.remove_container(
&id,
Some(RemoveContainerOptions {
force: true,
v: true,
..Default::default()
}),
)
.await
.with_context(|| format!("failed to remove container {id}"))?;
}
let mut filters = HashMap::new();
filters.insert("name".to_string(), vec![testnet]);
let networks: Vec<Network> = self
.docker
.list_networks(Some(ListNetworksOptions { filters }))
.await
.context("failed to list networks")?;
let ids = networks.into_iter().filter_map(|n| n.id);
for id in ids {
eprintln!("removing docker network {id}");
self.docker
.remove_network(&id)
.await
.context("failed to remove network")?;
}
let dir = self.dir.join(testnet_name.path());
if let Err(e) = std::fs::remove_dir_all(&dir) {
if !e.to_string().contains("No such file") {
bail!(
"failed to remove testnet directory {}: {e:?}",
dir.to_string_lossy()
);
}
};
Ok(())
}
/// Replace the dropper with a new one and return the existing one so that we can await all the drop tasks being completed.
pub fn take_dropper(&mut self) -> DropHandle {
let (mut drop_handle, mut drop_chute) = dropper::start(self.docker.clone());
std::mem::swap(&mut drop_handle, &mut self.drop_handle);
std::mem::swap(&mut drop_chute, &mut self.drop_chute);
// By dropping the `drop_chute` the only the existing docker constructs will keep a reference to it.
// The caller can decide when it's time to wait on the handle, when the testnet have been dropped.
drop_handle
}
/// Path to a directory based on a resource name.
fn path<T: AsRef<ResourceName>>(&self, name: T) -> PathBuf {
let name: &ResourceName = name.as_ref();
self.dir.join(&name.0)
}
/// Path where the state of the materializer is saved.
fn state_path(&self) -> PathBuf {
self.dir.join(STATE_JSON_FILE_NAME)
}
/// Directory where scripts are exported, to be mounted into containers.
fn scripts_dir(&self) -> PathBuf {
self.dir.join("scripts")
}
/// Export scripts that need to be mounted.
fn export_scripts(&self) -> anyhow::Result<()> {
let scripts_dir = self.scripts_dir();
export_script(scripts_dir.join("docker-entry.sh"), DOCKER_ENTRY_SCRIPT)?;
Ok(())
}
/// Update the config file of the `ipc-cli` in a given testnet.
fn update_ipc_cli_config<F, T>(&mut self, testnet_name: &TestnetName, f: F) -> anyhow::Result<T>
where
F: FnOnce(&mut IpcCliConfig) -> T,
{
let file_name = self.ipc_dir(testnet_name).join("config.toml");
let mut config = if !file_name.exists() {
IpcCliConfig {
keystore_path: Some("~/.ipc".to_string()),
subnets: Default::default(),
}
} else {
IpcCliConfig::from_file(&file_name).context("failed to read ipc-cli config")?
};
let value = f(&mut config);
let config_toml =
toml::to_string_pretty(&config).context("failed to serialize ipc-cli config")?;
export_file(&file_name, config_toml).context("failed to write ipc-cli config")?;
Ok(value)
}
/// Update the state, save it to JSON, then return whatever value the update returns.
fn update_state<F, T>(&mut self, f: F) -> anyhow::Result<T>
where
F: FnOnce(&mut DockerMaterializerState) -> T,
{
let value = f(&mut self.state);
self.save_state()?;
Ok(value)
}
/// Write the state to a JSON file.
fn save_state(&self) -> anyhow::Result<()> {
export_json(self.state_path(), &self.state).context("failed to export state")
}
/// Return an existing genesis by parsing it from the `genesis.json` of the subnet,
/// or create a new one and export it.
fn get_or_create_genesis<F>(
&self,
subnet_name: &SubnetName,
make_genesis: F,
) -> anyhow::Result<DefaultGenesis>
where
F: FnOnce() -> anyhow::Result<Genesis>,
{
let subnet_path = self.path(subnet_name);
let genesis_path = subnet_path.join("genesis.json");
let genesis = match import_json(&genesis_path).context("failed to read genesis")? {
Some(genesis) => genesis,
None => {
let genesis = make_genesis().context("failed to make genesis")?;
export_json(&genesis_path, &genesis).context("failed to export genesis")?;
genesis
}
};
Ok(DefaultGenesis {
name: subnet_name.clone(),
genesis,
path: genesis_path,
})
}
/// Pick a range for a container. Remember the choice so that we can recreate
/// this materializer in a test and allocate more if needed without clashes.
fn port_range(&mut self, node_name: &NodeName) -> anyhow::Result<DockerPortRange> {
if let Some(range) = self.state.port_ranges.get(node_name) {
return Ok(range.clone());
}
// Currently the range allocations are not dropped from the materializer,
// so the length can be used to derive the next available port. Otherwise
// we could loop through to find an unused slot.
let node_count = self.state.port_ranges.len() as u32;
let from = PORT_RANGE_START + PORT_RANGE_SIZE * node_count;
let to = from + PORT_RANGE_SIZE;
let range = DockerPortRange { from, to };
self.update_state(|s| s.port_ranges.insert(node_name.clone(), range.clone()))?;
Ok(range)
}
fn ipc_dir(&self, testnet_name: &TestnetName) -> PathBuf {
self.path(testnet_name).join("ipc")
}
fn accounts_dir(&self, testnet_name: &TestnetName) -> PathBuf {
self.path(testnet_name).join("accounts")
}
/// Create an instance of an `fendermint` command runner.
fn fendermint_cli_runner(
&self,
subnet_name: &SubnetName,
network_name: Option<&NetworkName>,
) -> anyhow::Result<DockerRunner<CliName>> {
let subnet_dir = self.path(subnet_name);
// Use the owner of the directory for the container, so we don't get permission issues.
let user = user_id(&subnet_dir)?;
// Mount the subnet so we can create files there
let volumes = vec![(subnet_dir, "/fendermint/subnet")];
let cli_name = subnet_name.cli("fendermint");
let runner = DockerRunner::new(
self.docker.clone(),
self.drop_chute.clone(),
self.drop_policy.clone(),
cli_name,
user,
FENDERMINT_IMAGE,
volumes,
network_name.cloned(),
);
Ok(runner)
}
/// Create an instance of an `ipc-cli` command runner.
fn ipc_cli_runner(
&self,
testnet_name: &TestnetName,
network_name: Option<&NetworkName>,
) -> anyhow::Result<DockerRunner<CliName>> {
// Create a directory to hold the wallet.
let ipc_dir = self.ipc_dir(testnet_name);
let accounts_dir = self.accounts_dir(testnet_name);
// Create a `~/.ipc` directory, as expected by default by the `ipc-cli`.
std::fs::create_dir_all(&ipc_dir).context("failed to create .ipc dir")?;
// Use the owner of the directory for the container, so we don't get permission issues.
let user = user_id(&ipc_dir)?;
// Mount the `~/.ipc` directory and all the keys to be imported.
let volumes = vec![
(ipc_dir, "/fendermint/.ipc"),
(accounts_dir, "/fendermint/accounts"),
];
let cli_name = testnet_name.root().cli("ipc");
let runner = DockerRunner::new(
self.docker.clone(),
self.drop_chute.clone(),
self.drop_policy.clone(),
cli_name,
user,
FENDERMINT_IMAGE,
volumes,
network_name.cloned(),
);
Ok(runner)
}
/// Import the private key of an account into the `ipc-cli` wallet.
async fn ipc_cli_wallet_import(
runner: &DockerRunner<CliName>,
account: &DefaultAccount,
) -> anyhow::Result<()> {
let account_id = account.account_id();
let account_id: &str = account_id.as_ref();
let cmd = format!(
"ipc-cli wallet import \
--wallet-type evm \
--path /fendermint/accounts/{account_id}/secret.hex \
"
);
// TODO: It would be nice to skip if already imported, but not crucial.
runner
.run_cmd(&cmd)
.await
.context("failed to import wallet")?;
Ok(())
}
/// Add the subnet to the `config.toml` of the `ipc-cli`.
fn ipc_cli_config_add_subnet(
&mut self,
submit_config: &SubmitConfig<DockerMaterials>,
) -> anyhow::Result<()> {
let testnet_name = submit_config.subnet.name.testnet();
let subnet_id = submit_config.subnet.subnet_id.clone();
// Find a node to which the `ipc-cli` can connect to create the subnet.
// Using the internal HTTP address, assumign that the dockerized `ipc-cli`
// will always mount the config file and talk to the nodes within the docker network.
let url: Url = submit_config
.nodes
.iter()
.filter_map(|tc| match tc {
TargetConfig::External(url) => Some(url.clone()),
TargetConfig::Internal(node) => node.internal_ethapi_http_endpoint(),
})
.next()
.ok_or_else(|| anyhow!("there has to be some nodes with eth API enabled"))?;
// Create a `config.toml`` file for the `ipc-cli` based on the deployment of the parent.
self.update_ipc_cli_config(&testnet_name, |config| {
config.add_subnet(IpcCliSubnet {
id: subnet_id,
config: IpcCliSubnetConfig::Fevm(EVMSubnet {
provider_http: url,
provider_timeout: Some(Duration::from_secs(30)),
auth_token: None,
registry_addr: submit_config.deployment.registry.into(),
gateway_addr: submit_config.deployment.gateway.into(),
}),
})
})
.context("failed to update CLI config")?;
Ok(())
}
/// Run some kind of command with the `ipc-cli` that needs to be executed as
/// transaction by an account on a given subnet.
async fn ipc_cli_run_cmd<'a>(
&mut self,
submit_config: &SubmitConfig<'a, DockerMaterials>,
account: &DefaultAccount,
cmd: String,
) -> anyhow::Result<Vec<String>> {
// Make sure the config file exists before trying to run any commands.
self.ipc_cli_config_add_subnet(submit_config)?;
let submit_node = submit_config
.nodes
.iter()
.filter_map(|tc| match tc {
TargetConfig::Internal(node) => Some(node),
TargetConfig::External(_) => None,
})
.next();
let runner = self.ipc_cli_runner(
&submit_config.subnet.name.testnet(),
submit_node.map(|n| n.network_name()),
)?;
// Make sure the account we run the command with exists in the wallet.
Self::ipc_cli_wallet_import(&runner, account).await?;
let logs = runner
.run_cmd(&cmd)
.await
.context("failed to run ipc-cli command")?;
Ok(logs)
}
fn reference_path(&self, sn: &SubnetName, rh: &ResourceHash) -> PathBuf {
self.path(sn.testnet()).join("refs").join(hex::encode(rh.0))
}
fn has_reference(&self, sn: &SubnetName, reference: &Option<ResourceHash>) -> bool {
reference
.as_ref()
.map(|rh| self.reference_path(sn, rh).exists())
.unwrap_or_default()
}
fn add_reference(
&self,
sn: &SubnetName,
reference: &Option<ResourceHash>,
) -> anyhow::Result<()> {
if let Some(ref rh) = reference {
export_file(self.reference_path(sn, rh), "").context("failed to write reference")
} else {
Ok(())
}
}
}
#[async_trait]
impl Materializer<DockerMaterials> for DockerMaterializer {
async fn create_network(
&mut self,
testnet_name: &TestnetName,
) -> anyhow::Result<<DockerMaterials as Materials>::Network> {
DockerNetwork::get_or_create(
self.docker.clone(),
self.drop_chute.clone(),
testnet_name.clone(),
&self.drop_policy,
)
.await
}
/// Create a new key-value pair, or return an existing one.
fn create_account(
&mut self,
account_name: &crate::AccountName,
) -> anyhow::Result<DefaultAccount> {
DefaultAccount::get_or_create(&mut self.rng, &self.dir, account_name)
}
async fn fund_from_faucet<'s, 'a>(
&'s mut self,
account: &'a DefaultAccount,
reference: Option<ResourceHash>,
) -> anyhow::Result<()>
where
's: 'a,
{
todo!("use curl or something to trigger the faucet")
}
async fn new_deployment<'s, 'a>(
&'s mut self,
subnet_name: &SubnetName,
deployer: &'a DefaultAccount,
urls: Vec<Url>,
) -> anyhow::Result<DefaultDeployment>
where
's: 'a,
{
todo!("use the deploy scripts to create a new IPC stack on L1")
}
fn existing_deployment(
&mut self,
subnet_name: &SubnetName,
gateway: H160,
registry: H160,
) -> anyhow::Result<DefaultDeployment> {
Ok(DefaultDeployment {
name: subnet_name.clone(),
gateway: EthAddress::from(gateway),
registry: EthAddress::from(registry),
})
}
fn default_deployment(
&mut self,
subnet_name: &SubnetName,
) -> anyhow::Result<DefaultDeployment> {
Ok(DefaultDeployment::builtin(subnet_name.clone()))
}
/// Check if a genesis file already exists. If so, parse it, otherwise
/// create an in-memory representation of a genesis file and export it.
fn create_root_genesis<'a>(
&mut self,
subnet_name: &SubnetName,
validators: BTreeMap<&'a DefaultAccount, Collateral>,
balances: BTreeMap<&'a DefaultAccount, Balance>,
) -> anyhow::Result<DefaultGenesis> {
self.get_or_create_genesis(subnet_name, || {
let chain_name = subnet_name.path_string();
let chain_id = chainid::from_str_hashed(&chain_name)?;
// TODO: Some of these hardcoded values can go into the manifest.
let genesis = Genesis {
chain_name,
timestamp: Timestamp::current(),
network_version: NetworkVersion::V21,
base_fee: TokenAmount::zero(),
power_scale: 3,
validators: validators
.into_iter()
.map(|(v, c)| Validator {
public_key: ValidatorKey(*v.public_key()),
power: c,
})
.collect(),
accounts: balances
.into_iter()
.map(|(a, b)| Actor {
meta: ActorMeta::Account(Account {
owner: SignerAddr(a.fvm_addr()),
}),
balance: b.0,
})
.collect(),
eam_permission_mode: fendermint_vm_genesis::PermissionMode::Unrestricted,
ipc: Some(IpcParams {
gateway: GatewayParams {
subnet_id: SubnetID::new_root(chain_id.into()),
// TODO: The gateway constructor doesn't allow 0 bottom-up-checkpoint-period even on the rootnet!
bottom_up_check_period: 1,
majority_percentage: 67,
active_validators_limit: 100,
},
}),
};
Ok(genesis)
})
}
fn create_root_subnet(
&mut self,
subnet_name: &SubnetName,
params: Either<ChainID, &DefaultGenesis>,
) -> anyhow::Result<DefaultSubnet> {
let subnet_id = match params {
Either::Left(id) => SubnetID::new_root(id.into()),
Either::Right(g) => {
let ipc = g
.genesis
.ipc
.as_ref()
.ok_or_else(|| anyhow!("IPC configuration missing from genesis"))?;
ipc.gateway.subnet_id.clone()
}
};
Ok(DefaultSubnet {
name: subnet_name.clone(),
subnet_id,
})
}
/// Get or create all docker containers that constitute to a Node.
async fn create_node<'s, 'a>(
&'s mut self,
node_name: &NodeName,
node_config: &NodeConfig<'a, DockerMaterials>,
) -> anyhow::Result<DockerNode>
where
's: 'a,
{
// Pick a port range on the host.
let port_range = self
.port_range(node_name)
.context("failed to pick port range")?;
// We could write a (shared) docker-compose.yaml file and .env file per node,
// however the `bollard` library doesn't support docker-compose, so different
// techniques would need to be used. Alternatively we can just use `Docker`
// and run three different containers.
DockerNode::get_or_create(
&self.dir,
self.docker.clone(),
self.drop_chute.clone(),
&self.drop_policy,
node_name,
node_config,
port_range,
)
.await
.context("failed to create node")
}
async fn start_node<'s, 'a>(
&'s mut self,
node: &'a DockerNode,
seed_nodes: &'a [&'a DockerNode],
) -> anyhow::Result<()>
where
's: 'a,
{
// Overwrite the env file which has seed addresses, then start the node (unless it's already running).
node.start(seed_nodes).await?;
node.wait_for_started(*STARTUP_TIMEOUT).await?;
// Trying to avoid `Tendermint RPC error: server returned malformatted JSON (no 'result' or 'error')` on first subnet creation attempt.
tokio::time::sleep(Duration::from_secs(5)).await;
Ok(())
}
async fn create_subnet<'s, 'a>(
&'s mut self,
parent_submit_config: &SubmitConfig<'a, DockerMaterials>,
subnet_name: &SubnetName,
subnet_config: &SubnetConfig<'a, DockerMaterials>,
) -> anyhow::Result<DefaultSubnet>
where
's: 'a,
{
let subnet_dir = self.path(subnet_name);
let subnet_id_file = subnet_dir.join("subnet-id");
// Check if we have already created the subnet.
if subnet_id_file.exists() {
let subnet_id = std::fs::read_to_string(&subnet_id_file)
.context("failed to read subnet ID from file")?;
let subnet_id = SubnetID::from_str(&subnet_id).with_context(|| {
format!(
"failed to parse subnet ID in {}: {}",
subnet_id_file.to_string_lossy(),
subnet_id
)
})?;
let subnet = DefaultSubnet {
subnet_id,
name: subnet_name.clone(),
};
return Ok(subnet);
}
// TODO: Move --permission-mode to the config
// TODO: Move --supply-source-kind to the config
let cmd = format!(
"ipc-cli subnet create \
--parent {} \
--from {:?} \
--min-validators {} \
--min-validator-stake {} \
--bottomup-check-period {} \
--permission-mode collateral \
--supply-source-kind native \
",
parent_submit_config.subnet.subnet_id,
subnet_config.creator.eth_addr(),
subnet_config.min_validators,
TokenAmount::from_nano(1), // The minimum for native mode that the CLI parses
subnet_config.bottom_up_checkpoint.period
);
// Now run the command and capture the output.
let logs = self
.ipc_cli_run_cmd(parent_submit_config, subnet_config.creator, cmd)
.await
.context("failed to create subnet")?;
// Parse the subnet ID from the command output.
let subnet_id = logs
.last()
.and_then(find_subnet_id)
.ok_or_else(|| anyhow!("cannot find a subnet ID in the logs"))?
.context("failed to parse subnet ID")?;
export_file(subnet_id_file, subnet_id.to_string()).context("failed to export subnet ID")?;
Ok(DefaultSubnet {
name: subnet_name.clone(),
subnet_id,
})
}
async fn fund_subnet<'s, 'a>(
&'s mut self,
parent_submit_config: &SubmitConfig<'a, DockerMaterials>,
account: &'a DefaultAccount,
subnet: &'a DefaultSubnet,
amount: fvm_shared::econ::TokenAmount,
reference: Option<ResourceHash>,
) -> anyhow::Result<()>
where
's: 'a,
{
if self.has_reference(&subnet.name, &reference) {
return Ok(());
}
let cmd = format!(
"ipc-cli cross-msg fund \
--subnet {} \
--from {:?} \
--to {:?} \
{} \
",
subnet.subnet_id,
account.eth_addr(),
account.eth_addr(),
amount
);
let logs = self
.ipc_cli_run_cmd(parent_submit_config, account, cmd)
.await
.context("failed to fund subnet")?;
self.add_reference(&subnet.name, &reference)
}
async fn join_subnet<'s, 'a>(
&'s mut self,
parent_submit_config: &SubmitConfig<'a, DockerMaterials>,
account: &'a DefaultAccount,
subnet: &'a DefaultSubnet,
collateral: fendermint_vm_genesis::Collateral,
balance: Balance,
reference: Option<ResourceHash>,
) -> anyhow::Result<()>
where
's: 'a,
{
if self.has_reference(&subnet.name, &reference) {
return Ok(());
}
let cmd = format!(
"ipc-cli subnet join \
--subnet {} \
--from {:?} \
--collateral {} \
--initial-balance {} \
",
subnet.subnet_id,
account.eth_addr(),
collateral.0,
balance.0
);
self.ipc_cli_run_cmd(parent_submit_config, account, cmd)
.await
.context("failed to join subnet")?;
self.add_reference(&subnet.name, &reference)
}
async fn create_subnet_genesis<'s, 'a>(
&'s mut self,
parent_submit_config: &SubmitConfig<'a, DockerMaterials>,
subnet: &'a DefaultSubnet,
) -> anyhow::Result<DefaultGenesis>
where
's: 'a,
{
let network_name =
parent_submit_config.find_node(|n| Some(n.network_name().clone()), |_| None);
let parent_url: Url = parent_submit_config
.find_node(|n| n.internal_ethapi_http_endpoint(), |u| Some(u.clone()))
.ok_or_else(|| anyhow!("there has to be some nodes with eth API enabled"))?;
// TODO: Move --base-fee to config
// TODO: Move --power-scale to config
let cmd = format!(
"genesis \
--genesis-file /fendermint/subnet/genesis.json \
ipc from-parent \
--subnet-id {} \
--parent-endpoint {} \
--parent-gateway {:?} \
--parent-registry {:?} \
--base-fee {} \
--power-scale {} \
",
subnet.subnet_id,
parent_url,
parent_submit_config.deployment.gateway,
parent_submit_config.deployment.registry,
TokenAmount::zero().atto(),
9, // to work with nanoFIL
);
let runner = self.fendermint_cli_runner(&subnet.name, network_name.as_ref())?;
runner
.run_cmd(&cmd)
.await
.context("failed to fetch genesis from parent")?;
let genesis_path = self.path(&subnet.name).join("genesis.json");
let genesis = import_json::<Genesis>(&genesis_path)
.context("failed to read genesis.json")?
.ok_or_else(|| anyhow!("genesis.json doesn't exist after fetching from parent"))?;
let genesis = DefaultGenesis {
name: subnet.name.clone(),
genesis,
path: genesis_path,
};
Ok(genesis)
}
async fn create_relayer<'s, 'a>(
&'s mut self,
parent_submit_config: &SubmitConfig<'a, DockerMaterials>,
relayer_name: &RelayerName,
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | true |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/materializer/src/docker/dropper.rs | fendermint/testing/materializer/src/docker/dropper.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use bollard::{
container::{RemoveContainerOptions, StopContainerOptions},
Docker,
};
/// Timeout before we kill the container if it doesn't want to stop.
const KILL_TIMEOUT_SECS: i64 = 5;
/// Commands to destroy docker constructs when they go out of scope.
pub enum DropCommand {
DropNetwork(String),
DropContainer(String),
}
pub type DropChute = tokio::sync::mpsc::UnboundedSender<DropCommand>;
pub type DropHandle = tokio::task::JoinHandle<()>;
/// Decide whether to keep or discard constructs when they go out of scope.
#[derive(Clone, Debug)]
pub struct DropPolicy {
pub keep_existing: bool,
pub keep_created: bool,
}
impl DropPolicy {
/// A network meant to be ephemeral, which aims to drop even what exists,
/// assuming it only exists because it was created by itself earlier,
/// but due to some error it failed to be removed.
pub const EPHEMERAL: DropPolicy = DropPolicy {
keep_existing: false,
keep_created: false,
};
/// Keep everything around, which is good for CLI applications that
/// set up networks that should exist until explicitly removed.
pub const PERSISTENT: DropPolicy = DropPolicy {
keep_existing: true,
keep_created: true,
};
/// Policy which only tries to remove artifacts which were created
/// by this materializer, but leaves existing resources around.
/// This can be useful for reading manifests for networks that
/// exists outside the tests, run tests agains the containers,
/// then leave them around for another round of testing, while
/// still maintaining the option of adding some ephemeral resources
/// form the test itself.
pub const DROP_CREATED: DropPolicy = DropPolicy {
keep_created: false,
keep_existing: true,
};
/// Decide if something should be kept when it's out of scope.
pub fn keep(&self, is_new: bool) -> bool {
if is_new {
self.keep_created
} else {
self.keep_existing
}
}
}
impl Default for DropPolicy {
fn default() -> Self {
Self::DROP_CREATED
}
}
/// Start a background task to remove docker constructs.
///
/// The loop will exit when all clones of the sender channel have been dropped.
pub fn start(docker: Docker) -> (DropHandle, DropChute) {
let (tx, mut rx) = tokio::sync::mpsc::unbounded_channel();
let handle = tokio::task::spawn(async move {
while let Some(cmd) = rx.recv().await {
match cmd {
DropCommand::DropNetwork(id) => {
eprintln!("dropping docker network {id}");
if let Err(e) = docker.remove_network(&id).await {
eprintln!("failed to remove docker network: {e}");
tracing::error!(
error = e.to_string(),
id,
"failed to remove docker network"
);
}
}
DropCommand::DropContainer(id) => {
eprintln!("dropping docker container {id}");
if let Err(e) = docker
.stop_container(
&id,
Some(StopContainerOptions {
t: KILL_TIMEOUT_SECS,
}),
)
.await
{
tracing::error!(
error = e.to_string(),
id,
"failed to stop docker container"
);
}
if let Err(e) = docker
.remove_container(
&id,
Some(RemoveContainerOptions {
force: true,
v: true,
..Default::default()
}),
)
.await
{
eprintln!("failed to remove container: {e}");
tracing::error!(
error = e.to_string(),
id,
"failed to remove docker container"
);
}
}
}
}
});
(handle, tx)
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/materializer/src/materials/mod.rs | fendermint/testing/materializer/src/materials/mod.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::{os::unix::fs::PermissionsExt, path::Path};
mod defaults;
use anyhow::Context;
pub use defaults::*;
use serde::{de::DeserializeOwned, Serialize};
/// Type family of all the things a [Materializer] can create.
///
/// Kept separate from the [Materializer] so that we can wrap one in another
/// and pass the same types along.
pub trait Materials {
/// Represents the entire hierarchy of a testnet, e.g. a common docker network
/// and directory on the file system. It has its own type so the materializer
/// doesn't have to remember what it created for a testnet, and different
/// testnets can be kept isolated from each other.
type Network: Send + Sync;
/// Capture where the IPC stack (the gateway and the registry) has been deployed on a subnet.
/// These are the details which normally go into the `ipc-cli` configuration files.
type Deployment: Sync + Send;
/// Represents an account identity, typically a key-value pair.
type Account: Ord + Sync + Send;
/// Represents the genesis.json file (can be a file location, or a model).
type Genesis: Sync + Send;
/// The address of a dynamically created subnet.
type Subnet: Sync + Send;
/// The handle to a node; could be a (set of) docker container(s) or remote addresses.
type Node: Sync + Send;
/// The handle to a relayer process.
type Relayer: Sync + Send;
}
/// Write some content to a file.
///
/// It will create all the directories along the path.
pub fn export(
output_dir: impl AsRef<Path>,
name: &str,
ext: &str,
contents: impl AsRef<str>,
) -> anyhow::Result<()> {
let file_name = if ext.is_empty() {
name.into()
} else {
format!("{name}.{ext}")
};
let dir_path = output_dir.as_ref();
let file_path = dir_path.join(file_name);
export_file(file_path, contents)
}
/// Export text to a file.
pub fn export_file(file_path: impl AsRef<Path>, contents: impl AsRef<str>) -> anyhow::Result<()> {
if let Some(dir_path) = file_path.as_ref().parent() {
if !dir_path.exists() {
std::fs::create_dir_all(dir_path).with_context(|| {
format!("failed to create directory {}", dir_path.to_string_lossy())
})?;
}
}
std::fs::write(&file_path, contents.as_ref()).with_context(|| {
format!(
"failed to write to {}",
file_path.as_ref().to_string_lossy()
)
})?;
Ok(())
}
/// Export executable shell script.
pub fn export_script(file_path: impl AsRef<Path>, contents: impl AsRef<str>) -> anyhow::Result<()> {
export_file(&file_path, contents)?;
std::fs::set_permissions(&file_path, std::fs::Permissions::from_mode(0o774))
.context("failed to set file permissions")?;
Ok(())
}
/// Export an object as JSON.
pub fn export_json(file_path: impl AsRef<Path>, value: impl Serialize) -> anyhow::Result<()> {
let json = serde_json::to_string_pretty(&value).context("failed to serialize to JSON")?;
export_file(file_path, json)
}
/// Read a JSON file, if it exists.
pub fn import_json<T: DeserializeOwned>(file_path: impl AsRef<Path>) -> anyhow::Result<Option<T>> {
let file_path = file_path.as_ref();
if file_path.exists() {
let json = std::fs::read_to_string(file_path)
.with_context(|| format!("failed to read {}", file_path.to_string_lossy()))?;
let value = serde_json::from_str::<T>(&json)
.with_context(|| format!("failed to parse {}", file_path.to_string_lossy()))?;
Ok(Some(value))
} else {
Ok(None)
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/materializer/src/materials/defaults.rs | fendermint/testing/materializer/src/materials/defaults.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::{
fmt::{Debug, Display},
path::{Path, PathBuf},
};
use anyhow::Context;
use ethers::core::rand::Rng;
use fendermint_crypto::{to_b64, PublicKey, SecretKey};
use fendermint_vm_actor_interface::{eam::EthAddress, init::builtin_actor_eth_addr, ipc};
use fendermint_vm_genesis::Genesis;
use fvm_shared::address::Address;
use ipc_api::subnet_id::SubnetID;
use super::export;
use crate::{AccountId, AccountName, SubnetName};
pub struct DefaultDeployment {
pub name: SubnetName,
pub gateway: EthAddress,
pub registry: EthAddress,
}
impl Display for DefaultDeployment {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Display::fmt(&self.name, f)
}
}
impl DefaultDeployment {
/// Deployment with the addresses that the Fendermint Genesis allocates.
pub fn builtin(name: SubnetName) -> Self {
Self {
name,
gateway: builtin_actor_eth_addr(ipc::GATEWAY_ACTOR_ID),
registry: builtin_actor_eth_addr(ipc::SUBNETREGISTRY_ACTOR_ID),
}
}
}
pub struct DefaultGenesis {
pub name: SubnetName,
/// In-memory representation of the `genesis.json` file.
pub genesis: Genesis,
/// Path to the `genesis.json` file.
pub path: PathBuf,
}
impl Display for DefaultGenesis {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Display::fmt(&self.name, f)
}
}
pub struct DefaultSubnet {
pub name: SubnetName,
/// ID allocated to the subnet during creation.
pub subnet_id: SubnetID,
}
impl Display for DefaultSubnet {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Display::fmt(&self.name, f)
}
}
#[derive(PartialEq, Eq)]
pub struct DefaultAccount {
name: AccountName,
secret_key: SecretKey,
public_key: PublicKey,
/// Path to the directory where the keys are exported.
path: PathBuf,
}
impl PartialOrd for DefaultAccount {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Ord for DefaultAccount {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.name.cmp(&other.name)
}
}
impl Debug for DefaultAccount {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("DefaultAccount")
.field("name", &self.name)
.field("public_key", &self.public_key)
.finish()
}
}
impl Display for DefaultAccount {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Display::fmt(&self.name, f)
}
}
impl DefaultAccount {
pub fn account_id(&self) -> AccountId {
self.name.0.id()
}
pub fn eth_addr(&self) -> EthAddress {
EthAddress::from(self.public_key)
}
/// We assume that all accounts that interact with IPC are ethereum accounts.
pub fn fvm_addr(&self) -> Address {
self.eth_addr().into()
}
pub fn get_or_create<R: Rng>(
rng: &mut R,
root: impl AsRef<Path>,
name: &AccountName,
) -> anyhow::Result<Self> {
let dir = root.as_ref().join(name.path());
let sk = dir.join("secret.hex");
let (sk, is_new) = if sk.exists() {
let sk = std::fs::read_to_string(sk).context("failed to read private key")?;
let sk = hex::decode(sk).context("cannot decode hex private key")?;
let sk = SecretKey::try_from(sk).context("failed to parse secret key")?;
(sk, false)
} else {
let sk = SecretKey::random(rng);
(sk, true)
};
let pk = sk.public_key();
let acc = Self {
name: name.clone(),
secret_key: sk,
public_key: pk,
path: dir,
};
if is_new {
acc.export()?;
}
Ok(acc)
}
/// Create (or overwrite) an account with a given secret key.
pub fn create(
root: impl AsRef<Path>,
name: &AccountName,
sk: SecretKey,
) -> anyhow::Result<Self> {
let pk = sk.public_key();
let dir = root.as_ref().join(name.path());
let acc = Self {
name: name.clone(),
secret_key: sk,
public_key: pk,
path: dir,
};
acc.export()?;
Ok(acc)
}
/// Write the keys to files.
fn export(&self) -> anyhow::Result<()> {
let sk = self.secret_key.serialize();
let pk = self.public_key.serialize();
export(&self.path, "secret", "b64", to_b64(sk.as_ref()))?;
export(&self.path, "secret", "hex", hex::encode(sk))?;
export(&self.path, "public", "b64", to_b64(pk.as_ref()))?;
export(&self.path, "public", "hex", hex::encode(pk))?;
export(&self.path, "eth-addr", "", format!("{:?}", self.eth_addr()))?;
export(&self.path, "fvm-addr", "", self.fvm_addr().to_string())?;
Ok(())
}
pub fn secret_key_path(&self) -> PathBuf {
self.path.join("secret.b64")
}
pub fn public_key(&self) -> &PublicKey {
&self.public_key
}
pub fn secret_key(&self) -> &SecretKey {
&self.secret_key
}
}
#[cfg(test)]
mod tests {
use ethers::core::rand::{rngs::StdRng, SeedableRng};
use tempfile::TempDir;
use crate::TestnetName;
use super::DefaultAccount;
#[test]
fn test_account() {
let mut rng = StdRng::from_entropy();
let dir = TempDir::new().expect("temp dir created");
let tn = TestnetName::new("account-test");
let an1 = tn.account("account-1");
let an2 = tn.account("account-2");
let a1n = DefaultAccount::get_or_create(&mut rng, &dir, &an1)
.expect("failed to create account-1");
let a1e =
DefaultAccount::get_or_create(&mut rng, &dir, &an1).expect("failed to get account-1");
let a2n = DefaultAccount::get_or_create(&mut rng, &dir, &an2)
.expect("failed to create account-2");
assert_eq!(a1n, a1e, "should reload existing account");
assert!(a1n != a2n, "should create new account per name");
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/materializer/tests/docker.rs | fendermint/testing/materializer/tests/docker.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! Utility methods and entry point for tests using the docker materializer.
//!
//! # Example
//!
//! `cargo test -p fendermint_materializer --test docker -- --nocapture`
use std::{
collections::BTreeSet,
env::current_dir,
path::PathBuf,
pin::Pin,
time::{Duration, Instant},
};
use anyhow::{anyhow, Context};
use ethers::providers::Middleware;
use fendermint_materializer::{
docker::{DockerMaterializer, DockerMaterials},
manifest::Manifest,
testnet::Testnet,
HasCometBftApi, HasEthApi, TestnetName,
};
use futures::Future;
use lazy_static::lazy_static;
use tendermint_rpc::Client;
pub type DockerTestnet = Testnet<DockerMaterials, DockerMaterializer>;
lazy_static! {
static ref CI_PROFILE: bool = std::env::var("PROFILE").unwrap_or_default() == "ci";
static ref STARTUP_TIMEOUT: Duration = Duration::from_secs(60);
static ref TEARDOWN_TIMEOUT: Duration = Duration::from_secs(30);
static ref PRINT_LOGS_ON_ERROR: bool = *CI_PROFILE;
}
/// Want to keep the testnet artifacts in the `tests/testnets` directory.
fn tests_dir() -> PathBuf {
let dir = current_dir().unwrap();
debug_assert!(
dir.ends_with("materializer"),
"expected the current directory to be the crate"
);
dir.join("tests")
}
/// Directory where we keep the docker-materializer related data files.
fn test_data_dir() -> PathBuf {
tests_dir().join("docker-materializer-data")
}
/// Parse a manifest from the `tests/manifests` directory.
fn read_manifest(file_name: &str) -> anyhow::Result<Manifest> {
let manifest = tests_dir().join("manifests").join(file_name);
let manifest = Manifest::from_file(&manifest)?;
Ok(manifest)
}
/// Parse a manifest file in the `manifests` directory, clean up any corresponding
/// testnet resources, then materialize a testnet and run some tests.
pub async fn with_testnet<F, G>(manifest_file_name: &str, alter: G, test: F) -> anyhow::Result<()>
where
// https://users.rust-lang.org/t/function-that-takes-a-closure-with-mutable-reference-that-returns-a-future/54324
F: for<'a> FnOnce(
&Manifest,
&mut DockerMaterializer,
&'a mut DockerTestnet,
) -> Pin<Box<dyn Future<Output = anyhow::Result<()>> + 'a>>,
G: FnOnce(&mut Manifest),
{
let testnet_name = TestnetName::new(
PathBuf::from(manifest_file_name)
.file_stem()
.expect("filename missing")
.to_string_lossy()
.to_string(),
);
let mut manifest = read_manifest(manifest_file_name)?;
// Make any test-specific modifications to the manifest if that makes sense.
alter(&mut manifest);
// Make sure it's a sound manifest.
manifest
.validate(&testnet_name)
.await
.context("failed to validate manifest")?;
// NOTE: Add `with_policy(DropPolicy::PERSISTENT)` if you want containers to stick around for inspection,
// but logs and env vars should be available on disk even if the testnet is torn down at the end.
let mut materializer = DockerMaterializer::new(&test_data_dir(), 0)?;
// make sure we start with clean slate by removing any previous files
materializer
.remove(&testnet_name)
.await
.context("failed to remove testnet")?;
let mut testnet = Testnet::setup(&mut materializer, &testnet_name, &manifest)
.await
.context("failed to set up testnet")?;
let started = wait_for_startup(&testnet).await?;
let res = if started {
test(&manifest, &mut materializer, &mut testnet).await
} else {
Err(anyhow!("the startup sequence timed out"))
};
// Print all logs on failure.
// Some might be available in logs in the files which are left behind,
// e.g. for `fendermint` we have logs, but maybe not for `cometbft`.
if res.is_err() && *PRINT_LOGS_ON_ERROR {
for (name, node) in testnet.nodes() {
let name = name.path_string();
for log in node.fendermint_logs().await {
eprintln!("{name}/fendermint: {log}");
}
for log in node.cometbft_logs().await {
eprintln!("{name}/cometbft: {log}");
}
for log in node.ethapi_logs().await {
eprintln!("{name}/ethapi: {log}");
}
}
}
// Tear down the testnet.
drop(testnet);
// Allow some time for containers to be dropped.
// This only happens if the testnet setup succeeded,
// otherwise the system shuts down too quick, but
// at least we can inspect the containers.
// If they don't all get dropped, `docker system prune` helps.
let drop_handle = materializer.take_dropper();
let _ = tokio::time::timeout(*TEARDOWN_TIMEOUT, drop_handle).await;
res
}
/// Allow time for things to consolidate and APIs to start.
async fn wait_for_startup(testnet: &DockerTestnet) -> anyhow::Result<bool> {
let start = Instant::now();
let mut started = BTreeSet::new();
'startup: loop {
if start.elapsed() > *STARTUP_TIMEOUT {
return Ok(false);
}
tokio::time::sleep(Duration::from_secs(5)).await;
for (name, dnode) in testnet.nodes() {
if started.contains(name) {
continue;
}
let client = dnode.cometbft_http_provider()?;
if let Err(e) = client.abci_info().await {
eprintln!("CometBFT on {name} still fails: {e}");
continue 'startup;
}
if let Some(client) = dnode.ethapi_http_provider()? {
if let Err(e) = client.get_chainid().await {
eprintln!("EthAPI on {name} still fails: {e}");
continue 'startup;
}
}
eprintln!("APIs on {name} started");
started.insert(name.clone());
}
// All of them succeeded.
return Ok(true);
}
}
// Run these tests serially because they share a common `materializer-state.json` file with the port mappings.
// Unfortunately the `#[serial]` macro can only be applied to module blocks, not this.
mod docker_tests;
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/materializer/tests/golden.rs | fendermint/testing/materializer/tests/golden.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
mod json {
use fendermint_materializer::manifest::Manifest;
use fendermint_testing::golden_json;
use quickcheck::Arbitrary;
golden_json! { "manifest/json", manifest, Manifest::arbitrary }
}
mod yaml {
use fendermint_materializer::manifest::Manifest;
use fendermint_testing::golden_yaml;
use quickcheck::Arbitrary;
golden_yaml! { "manifest/yaml", manifest, Manifest::arbitrary }
}
mod toml {
use fendermint_materializer::manifest::Manifest;
use fendermint_testing::golden_toml;
use quickcheck::Arbitrary;
golden_toml! { "manifest/toml", manifest, Manifest::arbitrary }
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/materializer/tests/docker_tests/layer2.rs | fendermint/testing/materializer/tests/docker_tests/layer2.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use anyhow::{anyhow, bail, Context};
use ethers::core::types as et;
use ethers::providers::Middleware;
use futures::FutureExt;
use std::sync::Arc;
use std::time::Duration;
use fendermint_materializer::{HasEthApi, ResourceId};
use fendermint_vm_actor_interface::init::builtin_actor_eth_addr;
use fendermint_vm_actor_interface::ipc;
use fendermint_vm_message::conv::from_fvm::to_eth_address;
use ipc_actors_abis::gateway_getter_facet::{GatewayGetterFacet, ParentFinality};
use ipc_actors_abis::subnet_actor_getter_facet::SubnetActorGetterFacet;
use crate::with_testnet;
const MANIFEST: &str = "layer2.yaml";
const CHECKPOINT_PERIOD: u64 = 10;
const SLEEP_SECS: u64 = 5;
const MAX_RETRIES: u32 = 5;
/// Test that top-down syncing and bottom-up checkpoint submission work.
#[serial_test::serial]
#[tokio::test]
async fn test_topdown_and_bottomup() {
with_testnet(
MANIFEST,
|manifest| {
// Try to make sure the bottom-up checkpoint period is quick enough for reasonable test runtime.
let subnet = manifest
.subnets
.get_mut(&ResourceId::from("england"))
.expect("subnet not found");
subnet.bottom_up_checkpoint.period = CHECKPOINT_PERIOD;
},
|_, _, testnet| {
let test = async {
let brussels = testnet.node(&testnet.root().node("brussels"))?;
let london = testnet.node(&testnet.root().subnet("england").node("london"))?;
let england = testnet.subnet(&testnet.root().subnet("england"))?;
let london_provider = Arc::new(
london
.ethapi_http_provider()?
.ok_or_else(|| anyhow!("ethapi should be enabled"))?,
);
let brussels_provider = Arc::new(
brussels
.ethapi_http_provider()?
.ok_or_else(|| anyhow!("ethapi should be enabled"))?,
);
// Gateway actor on the child
let england_gateway = GatewayGetterFacet::new(
builtin_actor_eth_addr(ipc::GATEWAY_ACTOR_ID),
london_provider.clone(),
);
// Subnet actor on the parent
let england_subnet = SubnetActorGetterFacet::new(
to_eth_address(&england.subnet_id.subnet_actor())
.and_then(|a| a.ok_or_else(|| anyhow!("not an eth address")))?,
brussels_provider.clone(),
);
// Query the latest committed parent finality and compare to the parent.
{
let mut retry = 0;
loop {
let finality: ParentFinality = england_gateway
.get_latest_parent_finality()
.call()
.await
.context("failed to get parent finality")?;
// If the latest finality is not zero it means the syncer is working,
if finality.height.is_zero() {
if retry < MAX_RETRIES {
eprintln!("waiting for syncing with the parent...");
tokio::time::sleep(Duration::from_secs(SLEEP_SECS)).await;
retry += 1;
continue;
}
bail!("the parent finality is still zero");
}
// Check that the block hash of the parent is actually the same at that height.
let parent_block: Option<et::Block<_>> = brussels_provider
.get_block(finality.height.as_u64())
.await
.context("failed to get parent block")?;
let Some(parent_block_hash) = parent_block.and_then(|b| b.hash) else {
bail!("cannot find parent block at final height");
};
if parent_block_hash.0 != finality.block_hash {
bail!("the finality block hash is different from the API");
}
break;
}
}
// Check that the parent knows about a checkpoint submitted from the child.
{
let mut retry = 0;
loop {
// NOTE: The implementation of the following method seems like a nonsense;
// I don't know if there is a way to ask the gateway what the latest
// checkpoint is, so we'll just have to go to the parent directly.
// let (has_checkpoint, epoch, _): (bool, et::U256, _) = england_gateway
// .get_current_bottom_up_checkpoint()
// .call()
// .await
// .context("failed to get current bottomup checkpoint")?;
let ckpt_height: et::U256 = england_subnet
.last_bottom_up_checkpoint_height()
.call()
.await
.context("failed to query last checkpoint height")?;
if !ckpt_height.is_zero() {
break;
}
if retry < MAX_RETRIES {
eprintln!("waiting for a checkpoint to be submitted...");
tokio::time::sleep(Duration::from_secs(SLEEP_SECS)).await;
retry += 1;
continue;
}
bail!("hasn't submitted a bottom-up checkpoint");
}
}
Ok(())
};
test.boxed_local()
},
)
.await
.unwrap()
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/materializer/tests/docker_tests/standalone.rs | fendermint/testing/materializer/tests/docker_tests/standalone.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::time::{Duration, Instant};
use anyhow::{bail, Context};
use ethers::{
core::k256::ecdsa::SigningKey,
middleware::SignerMiddleware,
providers::{JsonRpcClient, Middleware, PendingTransaction, Provider},
signers::{Signer, Wallet},
types::{transaction::eip2718::TypedTransaction, Eip1559TransactionRequest, H160},
};
use fendermint_materializer::{manifest::Rootnet, materials::DefaultAccount, HasEthApi};
use futures::FutureExt;
use crate::with_testnet;
const MANIFEST: &str = "standalone.yaml";
pub type TestMiddleware<C> = SignerMiddleware<Provider<C>, Wallet<SigningKey>>;
/// Create a middleware that will assign nonces and sign the message.
async fn make_middleware<C>(
provider: Provider<C>,
sender: &DefaultAccount,
) -> anyhow::Result<TestMiddleware<C>>
where
C: JsonRpcClient,
{
let chain_id = provider
.get_chainid()
.await
.context("failed to get chain ID")?;
let wallet: Wallet<SigningKey> = Wallet::from_bytes(sender.secret_key().serialize().as_ref())?
.with_chain_id(chain_id.as_u64());
Ok(SignerMiddleware::new(provider, wallet))
}
/// Test that a transaction sent to the mempool can be retrieved by its ethereum hash
/// from the ethereum API instance it was sent to even before it is included in the block.
#[serial_test::serial]
#[tokio::test]
async fn test_sent_tx_found_in_mempool() {
with_testnet(
MANIFEST,
|manifest| {
// Slow down consensus to where we can see the effect of the transaction not being found by Ethereum hash.
if let Rootnet::New { ref mut env, .. } = manifest.rootnet {
env.insert("CMT_CONSENSUS_TIMEOUT_COMMIT".into(), "10s".into());
};
},
|_, _, testnet| {
let test = async {
let bob = testnet.account("bob")?;
let charlie = testnet.account("charlie")?;
let pangea = testnet.node(&testnet.root().node("pangea"))?;
let provider = pangea
.ethapi_http_provider()?
.expect("ethapi should be enabled");
let middleware = make_middleware(provider, bob)
.await
.context("failed to set up middleware")?;
// Create the simplest transaction possible: send tokens between accounts.
let to: H160 = charlie.eth_addr().into();
let transfer = Eip1559TransactionRequest::new().to(to).value(1);
let pending: PendingTransaction<_> = middleware
.send_transaction(transfer, None)
.await
.context("failed to send txn")?;
let tx_hash = pending.tx_hash();
// We expect that the transaction is pending, however it should not return an error.
match middleware.get_transaction(tx_hash).await {
Ok(Some(_)) => {}
Ok(None) => bail!("pending transaction not found by eth hash"),
Err(e) => {
bail!("failed to get pending transaction: {e}")
}
}
Ok(())
};
test.boxed_local()
},
)
.await
.unwrap()
}
/// Test that transactions sent out-of-order with regards to the nonce are not rejected,
/// but rather get included in block eventually, their submission managed by the ethereum
/// API facade.
#[serial_test::serial]
#[tokio::test]
async fn test_out_of_order_mempool() {
const MAX_WAIT_TIME: Duration = Duration::from_secs(10);
const SLEEP_TIME: Duration = Duration::from_secs(1);
with_testnet(
MANIFEST,
|_| {},
|_, _, testnet| {
let test = async {
let bob = testnet.account("bob")?;
let charlie = testnet.account("charlie")?;
let pangea = testnet.node(&testnet.root().node("pangea"))?;
let provider = pangea
.ethapi_http_provider()?
.expect("ethapi should be enabled");
let middleware = make_middleware(provider, bob)
.await
.context("failed to set up middleware")?;
// Create the simplest transaction possible: send tokens between accounts.
let to: H160 = charlie.eth_addr().into();
let tx = Eip1559TransactionRequest::new().to(to).value(1);
let mut tx: TypedTransaction = tx.into();
// Fill out the nonce, gas, etc.
middleware
.fill_transaction(&mut tx, None)
.await
.context("failed to fill tx")?;
// Create a few more transactions to be sent out-of-order.
let mut txs = vec![tx];
for i in 1..5 {
let mut tx = txs[0].clone();
let nonce = tx.nonce().expect("fill_transaction filled the nonce");
tx.set_nonce(nonce.saturating_add(i.into()));
txs.push(tx)
}
let mut pending_txs = Vec::new();
// Submit transactions in opposite order.
for (i, tx) in txs.iter().enumerate().rev() {
let sig = middleware
.signer()
.sign_transaction(tx)
.await
.context("failed to sign tx")?;
let rlp = tx.rlp_signed(&sig);
let pending_tx: PendingTransaction<_> = middleware
.send_raw_transaction(rlp)
.await
.with_context(|| format!("failed to send tx {i}"))?;
pending_txs.push(pending_tx)
}
// Check that they eventually get included.
let start = Instant::now();
'pending: loop {
for tx in pending_txs.iter() {
let receipt = middleware
.get_transaction_receipt(tx.tx_hash())
.await
.context("failed to get receipt")?;
if receipt.is_none() {
if start.elapsed() > MAX_WAIT_TIME {
bail!("some transactions are still not executed")
} else {
tokio::time::sleep(SLEEP_TIME).await;
continue 'pending;
}
}
}
// All of them have receipt.
break 'pending;
}
Ok(())
};
test.boxed_local()
},
)
.await
.unwrap()
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/materializer/tests/docker_tests/mod.rs | fendermint/testing/materializer/tests/docker_tests/mod.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! These test modules are all imported by the top level `docker.rs` module,
//! so that they can be annotated with the `#[serial]` macro and run one by one,
//! sharing their materializer state.
// Tests using the manifest bearing their name.
pub mod layer2;
pub mod root_only;
pub mod standalone;
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/materializer/tests/docker_tests/root_only.rs | fendermint/testing/materializer/tests/docker_tests/root_only.rs | use std::time::Duration;
// Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use anyhow::{anyhow, bail};
use ethers::{providers::Middleware, types::U64};
use fendermint_materializer::HasEthApi;
use futures::FutureExt;
use crate::with_testnet;
const MANIFEST: &str = "root-only.yaml";
#[serial_test::serial]
#[tokio::test]
async fn test_full_node_sync() {
with_testnet(
MANIFEST,
|_| {},
|_, _, testnet| {
let test = async {
// Allow a little bit of time for node-2 to catch up with node-1.
tokio::time::sleep(Duration::from_secs(5)).await;
// Check that node2 is following node1.
let node2 = testnet.root().node("node-2");
let dnode2 = testnet.node(&node2)?;
let provider = dnode2
.ethapi_http_provider()?
.ok_or_else(|| anyhow!("node-2 has ethapi enabled"))?;
let bn = provider.get_block_number().await?;
if bn <= U64::one() {
bail!("expected a block beyond genesis");
}
Ok(())
};
test.boxed_local()
},
)
.await
.unwrap()
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/src/lib.rs | fendermint/testing/src/lib.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
#[cfg(feature = "arb")]
pub mod arb;
#[cfg(feature = "golden")]
pub mod golden;
#[cfg(feature = "smt")]
pub mod smt;
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/src/smt.rs | fendermint/testing/src/smt.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use arbitrary::Unstructured;
/// State machine tests inspired by [ScalaCheck](https://github.com/typelevel/scalacheck/blob/main/doc/UserGuide.md#stateful-testing)
/// and [quickcheck-state-machine](https://hackage.haskell.org/package/quickcheck-state-machine).
pub trait StateMachine {
/// System Under Test.
type System;
/// The idealised reference state we are testing aginst.
type State: Clone;
/// The random commands we can apply on the state in each step.
type Command;
/// The return result from command application.
type Result;
/// Generate a random initial state.
fn gen_state(&self, u: &mut Unstructured) -> arbitrary::Result<Self::State>;
/// Create a new System Under Test reflecting the given initial state.
///
/// The [System] should free all of its resources when it goes out of scope.
fn new_system(&self, state: &Self::State) -> Self::System;
/// Generate a random command given the latest state.
fn gen_command(
&self,
u: &mut Unstructured,
state: &Self::State,
) -> arbitrary::Result<Self::Command>;
/// Apply a command on the System Under Test.
fn run_command(&self, system: &mut Self::System, cmd: &Self::Command) -> Self::Result;
/// Use assertions to check that the result returned by the System Under Test
/// was correct, given the model pre-state.
fn check_result(&self, cmd: &Self::Command, pre_state: &Self::State, result: Self::Result);
/// Apply a command on the model state.
///
/// We could use `Cow` here if we wanted to preserve the history of state and
/// also avoid cloning when there's no change.
fn next_state(&self, cmd: &Self::Command, state: Self::State) -> Self::State;
/// Use assertions to check that the state transition on the System Under Test
/// was correct, by comparing to the model post-state.
///
/// This can be used to check invariants which should always be true.
///
/// Returns a flag indicating whether we should continue testing this system.
fn check_system(
&self,
cmd: &Self::Command,
post_state: &Self::State,
post_system: &Self::System,
) -> bool;
}
/// Run a state machine test by generating `max_steps` commands.
///
/// It is expected to panic if some post condition fails.
pub fn run<T: StateMachine>(
u: &mut Unstructured,
t: &T,
max_steps: usize,
) -> arbitrary::Result<()> {
let mut state = t.gen_state(u)?;
let mut system = t.new_system(&state);
for _ in 0..max_steps {
ensure_has_randomness(u)?;
let cmd = t.gen_command(u, &state)?;
let res = t.run_command(&mut system, &cmd);
t.check_result(&cmd, &state, res);
state = t.next_state(&cmd, state);
if !t.check_system(&cmd, &state, &system) {
break;
}
}
Ok(())
}
/// Once we run out of randomness, most of the arbitrary data generated by it will
/// be zeroes, which is is not very realistic. Calling this method can highlight
/// this and give us a chance to adjust the min/max size of the builder.
pub fn ensure_has_randomness(u: &Unstructured) -> arbitrary::Result<()> {
// Any error returned by this method is actually ignored by `arbtest`.
// if u.is_empty() {
// return Err(arbitrary::Error::NotEnoughData);
// }
assert!(
!u.is_empty(),
"Ran out of randomness; increase min/max size."
);
Ok(())
}
/// Default `arbtest` builder.
pub fn default_builder() -> arbtest::Builder {
arbtest::builder()
}
/// Make a builder with a certain size of random byte vector.
///
/// If the size is less than what is needed by the test,
/// my experience is that it will generate a lot of zeroes
/// or other default values for anything as it runs out of
/// random bytes.
///
/// The maximum is 4_294_967_295.
pub fn fixed_size_builder(size: u32) -> arbtest::Builder {
arbtest::builder().min_size(size).max_size(size)
}
/// Seed a new builder. The seed carries the size as well as the initial randomness.
pub fn seeded_builder(seed: u64) -> arbtest::Builder {
arbtest::builder().seed(seed)
}
/// Run a state machine test as a `#[test]`.
///
/// # Example
///
/// ```ignore
/// state_machine_test!(counter, 100 ms, 32 bytes, 100 steps, CounterStateMachine { buggy: false });
/// state_machine_test!(counter_seed_1, 0x001a560e00000020, 100 steps, CounterStateMachine { buggy: true });
/// ```
///
/// If the test fails, it will print out the seed which can be used to reproduce the error.
/// One can use [state_machine_seed!] to do that with minimal changes to the parameters.
///
/// The machine instance is reused between tests, which makes it possible to use it for
/// caching resources that take a long time to initialize, without having to resort to
/// for example `lazy_static!` global variables.
#[macro_export]
macro_rules! state_machine_test {
// Run on a fixed time and randomness size budget.
($name:ident, $ms:literal ms, $size:literal bytes, $steps:literal steps, $smt:expr) => {
#[test]
fn $name() {
let machine = $smt;
$crate::smt::fixed_size_builder($size)
.budget_ms($ms)
.run(|u| $crate::smt::run(u, &machine, $steps))
}
};
// Run with a fixed randomness.
($name:ident, $size:literal bytes, $steps:literal steps, $smt:expr) => {
#[test]
fn $name() {
let machine = $smt;
$crate::smt::fixed_size_builder($size).run(|u| $crate::smt::run(u, &machine, $steps))
}
};
// Run for a certain number of steps varying the size.
($name:ident, $steps:literal steps, $smt:expr) => {
#[test]
fn $name() {
let machine = $smt;
$crate::smt::default_builder().run(|u| $crate::smt::run(u, &machine, $steps))
}
};
// Reproduce a result.
($name:ident, $seed:literal, $steps:literal steps, $smt:expr) => {
#[test]
fn $name() {
let machine = $smt;
$crate::smt::seeded_builder($seed).run(|u| $crate::smt::run(u, &machine, $steps))
}
};
}
/// Run a state machine test as a `#[test]` with a `seed` to reproduce a failure.
///
/// # Example
///
/// ```ignore
/// state_machine_seed!(counter, 0x001a560e00000020, 100 steps, CounterStateMachine { buggy: true });
/// ```
#[macro_export]
macro_rules! state_machine_seed {
($name:ident, $seed:literal, $steps:literal steps, $smt:expr) => {
paste::paste! {
#[test]
fn [<$name _with_seed_ $seed>]() {
let machine = $smt;
$crate::smt::builder_with_seed($seed)
.run(|u| $crate::smt::run(u, &machine, $steps))
}
}
};
}
#[cfg(test)]
mod tests {
use arbitrary::{Result, Unstructured};
use super::{fixed_size_builder, seeded_builder, StateMachine};
/// A sample System Under Test.
struct Counter {
n: i32,
}
impl Counter {
pub fn new() -> Self {
Self { n: 0 }
}
pub fn get(&self) -> i32 {
self.n
}
pub fn inc(&mut self) {
self.n += 1;
}
pub fn dec(&mut self) {
self.n -= 1;
}
pub fn reset(&mut self) {
self.n = 0;
}
}
#[derive(Clone, Copy)]
enum CounterCommand {
Get,
Inc,
Dec,
Reset,
}
struct CounterStateMachine {
/// Introduce some bug to check the negative case.
buggy: bool,
}
impl StateMachine for CounterStateMachine {
type System = Counter;
type State = i32;
type Command = &'static CounterCommand;
type Result = Option<i32>;
fn gen_state(&self, u: &mut Unstructured) -> Result<Self::State> {
if self.buggy {
Ok(u.arbitrary::<i32>()?.abs() + 1)
} else {
Ok(0)
}
}
fn new_system(&self, _state: &Self::State) -> Self::System {
Counter::new()
}
fn gen_command(&self, u: &mut Unstructured, _state: &Self::State) -> Result<Self::Command> {
use CounterCommand::*;
u.choose(&[Get, Inc, Dec, Reset])
}
fn run_command(&self, system: &mut Self::System, cmd: &Self::Command) -> Self::Result {
use CounterCommand::*;
match cmd {
Get => return Some(system.get()),
Inc => system.inc(),
Dec => system.dec(),
Reset => system.reset(),
}
None
}
fn check_result(&self, cmd: &Self::Command, pre_state: &Self::State, result: Self::Result) {
if let CounterCommand::Get = cmd {
assert_eq!(result.as_ref(), Some(pre_state))
}
}
fn next_state(&self, cmd: &Self::Command, state: Self::State) -> Self::State {
use CounterCommand::*;
match cmd {
Inc => state + 1,
Dec => state - 1,
Reset => 0,
Get => state,
}
}
fn check_system(
&self,
_cmd: &Self::Command,
post_state: &Self::State,
post_system: &Self::System,
) -> bool {
// We can check the state if we want to, or we can wait for a Get command.
assert_eq!(post_state, &post_system.get());
true
}
}
state_machine_test!(counter, 512 bytes, 100 steps, CounterStateMachine { buggy: false });
/// Test the equivalent of:
///
/// ```ignore
/// state_machine_test!(counter, 512 bytes, 100 steps, CounterStateMachine { buggy: true });
/// ```
///
/// Which would have an output like:
///
/// ```text
/// ---- smt::tests::counter_with_seed stdout ----
/// thread 'smt::tests::counter panicked at 'assertion failed: `(left == right)`
/// left: `296607493`,
/// right: `1`', testing/integration/src/smt.rs:233:13
///
///
/// arb_test failed!
/// Seed: 0x4327d37100000200
/// ```
#[test]
#[should_panic]
fn counter_with_bug() {
let t = CounterStateMachine { buggy: true };
fixed_size_builder(512).run(|u| super::run(u, &t, 100))
}
/// Test the equivalent of:
///
/// ```ignore
/// state_machine_seed!(counter, 0x4327d37100000200, 100 steps, CounterStateMachine { buggy: true });
/// ```
#[test]
#[should_panic]
fn counter_with_seed() {
let t = CounterStateMachine { buggy: true };
seeded_builder(0x4327d37100000200).run(|u| super::run(u, &t, 100))
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/src/golden.rs | fendermint/testing/src/golden.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use cid::Cid;
use serde::{de::DeserializeOwned, Serialize};
use std::fmt::Debug;
use std::fs::File;
use std::io::{Read, Write};
use std::path::Path;
/// Path to a golden file.
fn path(prefix: &str, name: &str, ext: &str) -> String {
// All files will have the same name but different extension.
// They should be under `fendermint/vm/message/golden`.
let path = Path::new("golden").join(prefix).join(name);
format!("{}.{}", path.display(), ext)
}
/// Read the contents of an existing golden file, or create it by turning `fallback` into string first.
fn read_or_create<T>(
prefix: &str,
name: &str,
ext: &str,
fallback: &T,
to_string: fn(&T) -> String,
) -> String {
let p = path(prefix, name, ext);
let p = Path::new(&p);
if !p.exists() {
if let Some(p) = p.parent() {
std::fs::create_dir_all(p).expect("failed to create golden directory");
}
let s = to_string(fallback);
let mut f = File::create(p)
.unwrap_or_else(|e| panic!("Cannot create golden file at {:?}: {}", p, e));
f.write_all(s.as_bytes()).unwrap();
}
let mut f =
File::open(p).unwrap_or_else(|e| panic!("Cannot open golden file at {:?}: {}", p, e));
let mut s = String::new();
f.read_to_string(&mut s).expect("Cannot read golden file.");
s.trim_end().to_owned()
}
/// Check that a golden file we created earlier can still be read by the current model by
/// comparing to a debug string (which should at least be readable enough to show what changed).
///
/// If the golden file doesn't exist, create one now.
fn test_txt<T>(
prefix: &str,
name: &str,
arb_data: fn(g: &mut quickcheck::Gen) -> T,
ext: &str,
to_string: fn(&T) -> String,
from_string: fn(&String) -> Result<T, String>,
) -> T
where
T: Serialize + DeserializeOwned + Debug,
{
// We may not need this, but it shouldn't be too expensive to generate.
let mut g = quickcheck::Gen::new(10);
let data0 = arb_data(&mut g);
// Debug string of a wrapper.
let to_debug = |w: &T| format!("{:?}", w);
let repr = read_or_create(prefix, name, ext, &data0, to_string);
let data1: T = from_string(&repr)
.unwrap_or_else(|e| panic!("Cannot deserialize {prefix}/{name}.{ext}: {e}"));
// Use the deserialised data as fallback for the debug string, so if the txt doesn't exist, it's created
// from what we just read back.
let txt = read_or_create(prefix, name, "txt", &data1, to_debug);
// This will fail if either the CBOR or the Debug format changes.
// At that point we should either know that it's a legitimate regression because we changed the model,
// or catch it as an unexpected regression, indicating that we made some backwards incompatible change.
assert_eq!(to_debug(&data1), txt.trim_end());
data1
}
/// Test CBOR golden file.
///
/// Note that the CBOR files will be encoded as hexadecimal strings.
/// To view them in something like https://cbor.dev/ you can use for example `xxd`:
///
/// ```text
/// cat example.cbor | xxd -r -p > example.cbor.bin
/// ```
pub fn test_cbor_txt<T: Serialize + DeserializeOwned + Debug>(
prefix: &str,
name: &str,
arb_data: fn(g: &mut quickcheck::Gen) -> T,
) -> T {
test_txt(
prefix,
name,
arb_data,
"cbor",
|d| {
let bz = fvm_ipld_encoding::to_vec(d).expect("failed to serialize");
hex::encode(bz)
},
|s| {
let bz = hex::decode(s).map_err(|e| format!("faled to decode hex: {e}"))?;
fvm_ipld_encoding::from_slice(&bz).map_err(|e| format!("failed to decode CBOR: {e}"))
},
)
}
/// Same as [`test_cbor_txt`] but with JSON.
pub fn test_json_txt<T: Serialize + DeserializeOwned + Debug>(
prefix: &str,
name: &str,
arb_data: fn(g: &mut quickcheck::Gen) -> T,
) -> T {
test_txt(
prefix,
name,
arb_data,
"json",
|d| serde_json::to_string_pretty(d).expect("failed to serialize"),
|s| serde_json::from_str(s).map_err(|e| format!("failed to decode JSON: {e}")),
)
}
/// Same as [`test_json_txt`] but with YAML.
pub fn test_yaml_txt<T: Serialize + DeserializeOwned + Debug>(
prefix: &str,
name: &str,
arb_data: fn(g: &mut quickcheck::Gen) -> T,
) -> T {
test_txt(
prefix,
name,
arb_data,
"yaml",
|d| serde_yaml::to_string(d).expect("failed to serialize"),
|s| serde_yaml::from_str(s).map_err(|e| format!("failed to decode YAML: {e}")),
)
}
/// Same as [`test_json_txt`] but with TOML.
pub fn test_toml_txt<T: Serialize + DeserializeOwned + Debug>(
prefix: &str,
name: &str,
arb_data: fn(g: &mut quickcheck::Gen) -> T,
) -> T {
test_txt(
prefix,
name,
arb_data,
"toml",
|d| toml::to_string(d).expect("failed to serialize"),
|s| toml::from_str(s).map_err(|e| format!("failed to decode TOML: {e}")),
)
}
/// Test that the CID of something we deserialized from CBOR matches what we saved earlier,
/// ie. that we produce the same CID, which is important if it's the basis of signing.
pub fn test_cid<T: Debug>(prefix: &str, name: &str, data: T, cid: fn(&T) -> Cid) {
let exp_cid = hex::encode(cid(&data).to_bytes());
let got_cid = read_or_create(prefix, name, "cid", &exp_cid, |d| d.to_owned());
assert_eq!(got_cid, exp_cid)
}
/// Create a test which calls [`test_cbor_txt`].
///
/// # Example
///
/// ```ignore
/// golden_cbor! { "query/response", actor_state, |g| {
/// ActorState::arbitrary(g)
/// }}
/// ```
#[macro_export]
macro_rules! golden_cbor {
($prefix:literal, $name:ident, $gen:expr) => {
#[test]
fn $name() {
let label = stringify!($name);
$crate::golden::test_cbor_txt($prefix, &label, $gen);
}
};
}
/// Create a test which calls [`test_json_txt`].
///
/// # Example
///
/// ```ignore
/// golden_json! { "genesis", genesis, Genesis::arbitrary}
/// ```
#[macro_export]
macro_rules! golden_json {
($prefix:literal, $name:ident, $gen:expr) => {
#[test]
fn $name() {
let label = stringify!($name);
$crate::golden::test_json_txt($prefix, &label, $gen);
}
};
}
/// Create a test which calls [`test_yaml_txt`].
///
/// # Example
///
/// ```ignore
/// golden_yaml! { "genesis", genesis, Genesis::arbitrary}
/// ```
#[macro_export]
macro_rules! golden_yaml {
($prefix:literal, $name:ident, $gen:expr) => {
#[test]
fn $name() {
let label = stringify!($name);
$crate::golden::test_yaml_txt($prefix, &label, $gen);
}
};
}
/// Create a test which calls [`test_toml_txt`].
///
/// # Example
///
/// ```ignore
/// golden_toml! { "genesis", genesis, Genesis::arbitrary}
/// ```
#[macro_export]
macro_rules! golden_toml {
($prefix:literal, $name:ident, $gen:expr) => {
#[test]
fn $name() {
let label = stringify!($name);
$crate::golden::test_toml_txt($prefix, &label, $gen);
}
};
}
/// Create a test which calls [`test_cid`].
///
/// # Example
///
/// ```ignore
/// golden_cid! { "fvm", message, |g| SignedMessage::arbitrary(g).message, |m| SignedMessage::cid(m).unwrap() }
/// ```
#[macro_export]
macro_rules! golden_cid {
($prefix:literal, $name:ident, $gen:expr, $cid:expr) => {
#[test]
fn $name() {
let label = stringify!($name);
let data = $crate::golden::test_cbor_txt($prefix, &label, $gen);
$crate::golden::test_cid($prefix, &label, data, $cid);
}
};
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/src/arb/cid.rs | fendermint/testing/src/arb/cid.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! Unfortunately ref-fvm depends on cid:0.8.6, which depends on quickcheck:0.9
//! whereas here we use quickcheck:0.1. This causes conflicts and the `Arbitrary`
//! implementations for `Cid` are not usable to us, nor can we patch all `cid`
//! dependencies to use 0.9 because then the IPLD and other FVM traits don't work.
//!
//! TODO: Remove this module when the `cid` dependency is updated.
//! NOTE: A simpler alternative is https://github.com/ChainSafe/forest/blob/v0.6.0/blockchain/blocks/src/lib.rs
use cid::{
multihash::{Code, MultihashDigest, MultihashGeneric},
CidGeneric, Version,
};
use rand::{distributions::WeightedIndex, prelude::Distribution, Rng, RngCore, SeedableRng};
use quickcheck::{Arbitrary, Gen};
#[derive(Clone)]
pub struct ArbVersion(pub Version);
impl Arbitrary for ArbVersion {
fn arbitrary(g: &mut Gen) -> Self {
let version = u64::from(bool::arbitrary(g));
Self(Version::try_from(version).unwrap())
}
}
#[derive(Clone)]
pub struct ArbCid<const S: usize>(pub CidGeneric<S>);
impl<const S: usize> Arbitrary for ArbCid<S> {
/// Copied from https://github.com/multiformats/rust-cid/blob/v0.10.0/src/arb.rs
fn arbitrary(g: &mut Gen) -> Self {
let cid = if S >= 32 && ArbVersion::arbitrary(g).0 == Version::V0 {
let data: Vec<u8> = Vec::arbitrary(g);
let hash = Code::Sha2_256
.digest(&data)
.resize()
.expect("digest too large");
CidGeneric::new_v0(hash).expect("sha2_256 is a valid hash for cid v0")
} else {
// In real world lower IPLD Codec codes more likely to happen, hence distribute them
// with bias towards smaller values.
let weights = [128, 32, 4, 4, 2, 2, 1, 1];
let dist = WeightedIndex::new(weights.iter()).unwrap();
let mut rng = rand::rngs::SmallRng::seed_from_u64(u64::arbitrary(g));
let codec = match dist.sample(&mut rng) {
0 => rng.gen_range(0..u64::pow(2, 7)),
1 => rng.gen_range(u64::pow(2, 7)..u64::pow(2, 14)),
2 => rng.gen_range(u64::pow(2, 14)..u64::pow(2, 21)),
3 => rng.gen_range(u64::pow(2, 21)..u64::pow(2, 28)),
4 => rng.gen_range(u64::pow(2, 28)..u64::pow(2, 35)),
5 => rng.gen_range(u64::pow(2, 35)..u64::pow(2, 42)),
6 => rng.gen_range(u64::pow(2, 42)..u64::pow(2, 49)),
7 => rng.gen_range(u64::pow(2, 56)..u64::pow(2, 63)),
_ => unreachable!(),
};
let hash = ArbMultihash::<S>::arbitrary(g).0;
CidGeneric::new_v1(codec, hash)
};
Self(cid)
}
}
#[derive(Clone)]
pub struct ArbMultihash<const S: usize>(pub MultihashGeneric<S>);
impl<const S: usize> Arbitrary for ArbMultihash<S> {
/// Generates a random valid multihash.
///
/// Copied from https://github.com/multiformats/rust-multihash/blob/v0.18.0/src/arb.rs
fn arbitrary(g: &mut Gen) -> Self {
// In real world lower multihash codes are more likely to happen, hence distribute them
// with bias towards smaller values.
let weights = [128, 64, 32, 16, 8, 4, 2, 1];
let dist = WeightedIndex::new(weights.iter()).unwrap();
let mut rng = rand::rngs::SmallRng::seed_from_u64(u64::arbitrary(g));
let code = match dist.sample(&mut rng) {
0 => rng.gen_range(0..u64::pow(2, 7)),
1 => rng.gen_range(u64::pow(2, 7)..u64::pow(2, 14)),
2 => rng.gen_range(u64::pow(2, 14)..u64::pow(2, 21)),
3 => rng.gen_range(u64::pow(2, 21)..u64::pow(2, 28)),
4 => rng.gen_range(u64::pow(2, 28)..u64::pow(2, 35)),
5 => rng.gen_range(u64::pow(2, 35)..u64::pow(2, 42)),
6 => rng.gen_range(u64::pow(2, 42)..u64::pow(2, 49)),
7 => rng.gen_range(u64::pow(2, 56)..u64::pow(2, 63)),
_ => unreachable!(),
};
// Maximum size is S byte due to the generic.
let size = rng.gen_range(0..S);
let mut data = [0; S];
rng.fill_bytes(&mut data);
Self(MultihashGeneric::wrap(code, &data[..size]).unwrap())
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/src/arb/address.rs | fendermint/testing/src/arb/address.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use fvm_shared::address::Address;
use quickcheck::{Arbitrary, Gen};
/// Unfortunately an arbitrary `DelegatedAddress` can be inconsistent with bytes that do not correspond to its length.
#[derive(Clone, Debug)]
pub struct ArbAddress(pub Address);
impl Arbitrary for ArbAddress {
fn arbitrary(g: &mut Gen) -> Self {
let addr = Address::arbitrary(g);
let bz = addr.to_bytes();
Self(Address::from_bytes(&bz).unwrap())
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/src/arb/mod.rs | fendermint/testing/src/arb/mod.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
mod address;
mod cid;
mod message;
mod subnetid;
mod token;
pub use crate::arb::address::ArbAddress;
pub use crate::arb::cid::ArbCid;
pub use crate::arb::message::ArbMessage;
pub use crate::arb::subnetid::{ArbSubnetAddress, ArbSubnetID};
pub use crate::arb::token::ArbTokenAmount;
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/src/arb/message.rs | fendermint/testing/src/arb/message.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use fvm_shared::message::Message;
use quickcheck::{Arbitrary, Gen};
use super::{ArbAddress, ArbTokenAmount};
#[derive(Clone, Debug)]
pub struct ArbMessage(pub Message);
impl Arbitrary for ArbMessage {
fn arbitrary(g: &mut Gen) -> Self {
let mut message = Message::arbitrary(g);
message.gas_fee_cap = ArbTokenAmount::arbitrary(g).0;
message.gas_premium = ArbTokenAmount::arbitrary(g).0;
message.value = ArbTokenAmount::arbitrary(g).0;
message.to = ArbAddress::arbitrary(g).0;
message.from = ArbAddress::arbitrary(g).0;
Self(message)
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/src/arb/subnetid.rs | fendermint/testing/src/arb/subnetid.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use fvm_shared::address::Address;
use ipc_api::subnet_id::SubnetID;
#[derive(Debug, Clone)]
pub struct ArbSubnetAddress(pub Address);
#[derive(Debug, Clone)]
pub struct ArbSubnetID(pub SubnetID);
impl quickcheck::Arbitrary for ArbSubnetID {
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
let child_count = usize::arbitrary(g) % 4;
let children = (0..child_count)
.map(|_| ArbSubnetAddress::arbitrary(g).0)
.collect::<Vec<_>>();
Self(SubnetID::new(u64::arbitrary(g), children))
}
}
impl arbitrary::Arbitrary<'_> for ArbSubnetID {
fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> {
let child_count = usize::arbitrary(u)? % 4;
let children = (0..child_count)
.map(|_| Ok(ArbSubnetAddress::arbitrary(u)?.0))
.collect::<Result<Vec<_>, _>>()?;
Ok(Self(SubnetID::new(u64::arbitrary(u)?, children)))
}
}
impl quickcheck::Arbitrary for ArbSubnetAddress {
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
let addr = if bool::arbitrary(g) {
Address::new_id(u64::arbitrary(g))
} else {
// Only expecting EAM managed delegated addresses.
let subaddr: [u8; 20] = std::array::from_fn(|_| u8::arbitrary(g));
Address::new_delegated(10, &subaddr).unwrap()
};
Self(addr)
}
}
impl arbitrary::Arbitrary<'_> for ArbSubnetAddress {
fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> {
let addr = if bool::arbitrary(u)? {
Address::new_id(u64::arbitrary(u)?)
} else {
// Only expecting EAM managed delegated addresses.
let mut subaddr = [0u8; 20];
for b in &mut subaddr {
*b = u8::arbitrary(u)?;
}
Address::new_delegated(10, &subaddr).unwrap()
};
Ok(Self(addr))
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/src/arb/token.rs | fendermint/testing/src/arb/token.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use ethers::types::U256;
use fvm_shared::{
bigint::{BigInt, Integer, Sign, MAX_BIGINT_SIZE},
econ::TokenAmount,
};
use lazy_static::lazy_static;
use quickcheck::Gen;
use std::str::FromStr;
lazy_static! {
/// The max below is taken from https://github.com/filecoin-project/ref-fvm/blob/fvm%40v3.0.0-alpha.24/shared/src/bigint/bigint_ser.rs#L80-L81
static ref MAX_BIGINT: BigInt =
BigInt::new(Sign::Plus, vec![u32::MAX; MAX_BIGINT_SIZE / 4 - 1]);
static ref MAX_U256: BigInt = BigInt::from_str(&U256::MAX.to_string()).unwrap();
/// `fvm_shared::sys::TokenAmount` is limited to `u128` range.
static ref MAX_U128: BigInt = BigInt::from(u128::MAX);
// Restrict maximum token value to what we can actually pass to Ethereum.
static ref MAX_ATTO: BigInt = MAX_BIGINT.clone().min(MAX_U128.clone());
}
#[derive(Clone, Debug)]
/// Unfortunately an arbitrary `TokenAmount` is not serializable if it has more than 128 bytes, we get "BigInt too large" error.
pub struct ArbTokenAmount(pub TokenAmount);
impl quickcheck::Arbitrary for ArbTokenAmount {
fn arbitrary(g: &mut Gen) -> Self {
let tokens = TokenAmount::arbitrary(g);
let atto = tokens.atto();
let atto = atto.mod_floor(&MAX_ATTO);
Self(TokenAmount::from_atto(atto))
}
}
impl arbitrary::Arbitrary<'_> for ArbTokenAmount {
fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> {
// Using double because the way it's generated is base don vectors,
// and they are often empty when the `size` parameter is small.
let atto = BigInt::arbitrary(u)? + BigInt::arbitrary(u)?;
let atto = atto.mod_floor(&MAX_ATTO);
Ok(Self(TokenAmount::from_atto(atto)))
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/delorean-cli/src/lib.rs | fendermint/testing/delorean-cli/src/lib.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! Run tests against multiple Fendermint+CometBFT docker container pairs locally:
//! 0. The default `snapshot-fendermint` and `snapshot-cometbft` pair
//! 1. A `snapshot-cometbft-1` and `snapshot-cometbft-2`, using `scripts/node-1.env` and `node-2`.env,
//! syncing with the default node from genesis on a block-by-block basis, and clear out their history
//! to force others who sync with them to use snapshots.
//! 2. A `snapshot-cometbft-3` using `scripts/node-3.env`,
//! which syncs with `node-1` and `node-2` using snapshots (a.k.a. state sync).
//!
//! Note that CometBFT state sync requires 2 RPC servers, which is why we need 3 nodes.
//!
//! See <https://docs.cometbft.com/v0.37/core/state-sync> and <https://docs.cometbft.com/v0.37/core/configuration>
//!
//! Examples:
//!
//! 1. All in one go
//! ```text
//! cd fendermint/testing/snapshot-test
//! cargo make
//! ```
//!
//! 2. One by one
//! ```text
//! cd fendermint/testing/snapshot-test
//! cargo make setup
//! cargo make node-1-setup
//! cargo make node-2-setup
//! cargo make node-3-setup
//! docker logs snapshot-cometbft-3
//! cargo make snapshot-teardown
//! cargo make teardown
//! ```
//!
//! Make sure you installed cargo-make by running `cargo install cargo-make` first.
use cid::Cid;
use fendermint_rpc::QueryClient;
use fendermint_vm_message::query::FvmQueryHeight;
use fvm_ipld_blockstore::Blockstore;
#[derive(Clone)]
pub struct RemoteBlockstore<C> {
client: C,
}
impl<C> RemoteBlockstore<C> {
pub fn new(client: C) -> Self {
Self { client }
}
}
impl<C: QueryClient> Blockstore for RemoteBlockstore<C> {
fn get(&self, k: &Cid) -> anyhow::Result<Option<Vec<u8>>> {
futures::executor::block_on(self.client.ipld(k, FvmQueryHeight::default()))
}
fn put_keyed(&self, _k: &Cid, _block: &[u8]) -> anyhow::Result<()> {
panic!("never intended to use put on the read-only blockstore")
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/delorean-cli/src/main.rs | fendermint/testing/delorean-cli/src/main.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! Helper commands for interacting with the Delorean/CETF actor via RPC
//!
//! The example assumes that Tendermint and Fendermint have been started
//! and are running locally.
//!
//! # Usage
//! ```text
//! cargo run --example delorean -- --secret-key test-data/keys/volvo.sk queue-tag
//! ```
use std::any;
use std::io::Write;
use std::path::PathBuf;
use anyhow::{anyhow, Context};
use bls_signatures::Serialize;
use bytes::Bytes;
use cetf_actor::State as CetfActorState;
use clap::{Parser, Subcommand};
use ethers::abi::Tokenizable;
use ethers::prelude::*;
use fendermint_actor_cetf::state::DEFAULT_HAMT_CONFIG;
use fendermint_actor_cetf::{self as cetf_actor, BlsSignature};
use delorean_cli::RemoteBlockstore;
use fendermint_rpc::query::{QueryClient, QueryResponse};
use fendermint_vm_actor_interface::eam;
use fendermint_vm_message::query::FvmQueryHeight;
use fvm_ipld_encoding::{CborStore, RawBytes};
use fvm_shared::address::Address;
use fvm_shared::chainid::ChainID;
use fvm_shared::econ::TokenAmount;
use k256::sha2::{Digest, Sha256};
use lazy_static::lazy_static;
use tendermint_rpc::Url;
use tracing::Level;
use fendermint_rpc::client::FendermintClient;
use fendermint_rpc::message::{GasParams, SignedMessageFactory};
use fendermint_rpc::tx::{CallClient, TxClient, TxCommit};
type MockProvider = ethers::providers::Provider<ethers::providers::MockProvider>;
type MockContractCall<T> = ethers::prelude::ContractCall<MockProvider, T>;
const DEMO_CONTRACT_SPEC_JSON: &str =
include_str!("../../../../contracts/out/Demo.sol/DeloreanDemo.json");
lazy_static! {
/// Default gas params based on the testkit.
static ref GAS_PARAMS: GasParams = GasParams {
gas_limit: 10_000_000_000,
gas_fee_cap: TokenAmount::default(),
gas_premium: TokenAmount::default(),
};
}
abigen!(
DeloreanContract,
r#"[
{
"type": "function",
"name": "releaseKey",
"inputs": [],
"outputs": [
{
"name": "",
"type": "bool",
"internalType": "bool"
}
],
"stateMutability": "nonpayable"
},
{
"type": "function",
"name": "signingTag",
"inputs": [],
"outputs": [
{
"name": "",
"type": "bytes32",
"internalType": "bytes32"
}
],
"stateMutability": "nonpayable"
}
]"#
);
#[derive(Parser, Debug)]
pub struct Options {
/// The URL of the Tendermint node's RPC endpoint.
#[arg(
long,
short,
default_value = "http://127.0.0.1:26657",
env = "TENDERMINT_RPC_URL"
)]
pub url: Url,
/// Enable DEBUG logs.
#[arg(long, short)]
pub verbose: bool,
#[command(subcommand)]
command: Commands,
/// Path to the secret key to deploy with, expected to be in Base64 format,
/// and that it has a corresponding f410 account in genesis.
#[arg(long, short, env = "DELORIAN_SECRET_KEY")]
pub secret_key: PathBuf,
}
#[derive(Debug, Subcommand)]
enum Commands {
RegisterBls {
#[arg(long, short)]
bls_secret_key: PathBuf,
},
QueueTag,
DeployDemoContract,
CallReleaseKeys {
address: String,
},
RegisteredKeys,
Encrypt {
contract_address: String,
#[arg(long, short)]
output: PathBuf,
},
Decrypt {
contract_address: String,
#[arg(long, short)]
output: Option<PathBuf>,
},
TestIfHeightsAreSignedProperly,
}
impl Options {
pub fn log_level(&self) -> Level {
if self.verbose {
Level::DEBUG
} else {
Level::INFO
}
}
}
/// See the module docs for how to run.
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let opts: Options = Options::parse();
tracing_subscriber::fmt()
.with_max_level(opts.log_level())
.init();
let client = FendermintClient::new_http(opts.url, None).expect("error creating client");
let store = RemoteBlockstore::new(client.clone());
let sk =
SignedMessageFactory::read_secret_key(&opts.secret_key).expect("error reading secret key");
let pk = sk.public_key();
let f1_addr = Address::new_secp256k1(&pk.serialize()).expect("valid public key");
// Query the account nonce from the state, so it doesn't need to be passed as an arg.
let sn = sequence(&client, &f1_addr)
.await
.expect("error getting sequence");
// Query the chain ID, so it doesn't need to be passed as an arg.
let chain_id = client
.state_params(FvmQueryHeight::default())
.await
.expect("error getting state params")
.value
.chain_id;
let mf = SignedMessageFactory::new(sk, f1_addr, sn, ChainID::from(chain_id));
let mut client = client.bind(mf);
match opts.command {
Commands::RegisterBls { bls_secret_key } => {
let bls_sk = {
let b64 = std::fs::read_to_string(&bls_secret_key)
.expect("failed to read bls secret key");
bls_signatures::PrivateKey::from_bytes(
&fendermint_crypto::from_b64(&b64)
.expect("failed to decode b64 bls secret key"),
)
.expect("failed to parse bls secret key")
};
let bls_pk = bls_sk.public_key();
let res = TxClient::<TxCommit>::transaction(
&mut client,
fendermint_vm_actor_interface::cetf::CETFSYSCALL_ACTOR_ADDR,
cetf_actor::Method::AddValidator as u64,
RawBytes::serialize(cetf_actor::AddValidatorParams {
address: f1_addr,
public_key: fendermint_actor_cetf::BlsPublicKey(
bls_pk
.as_bytes()
.try_into()
.expect("Failed to convert BLS public key to bytes"),
),
})
.expect("failed to serialize params"),
TokenAmount::from_whole(0),
GAS_PARAMS.clone(),
)
.await
.expect("transfer failed");
assert!(res.response.check_tx.code.is_ok(), "check is ok");
assert!(res.response.tx_result.code.is_ok(), "deliver is ok");
assert!(res.return_data.is_some());
}
Commands::RegisteredKeys => {
let QueryResponse { height, value } = client
.actor_state(
&fendermint_vm_actor_interface::cetf::CETFSYSCALL_ACTOR_ADDR,
FvmQueryHeight::default(),
)
.await
.expect("failed to get cetf actor state");
let (id, act_state) = value.expect("cetf actor state not found");
tracing::info!("Get Cetf State (id: {}) at height {}", id, height);
let state: CetfActorState = store
.get_cbor(&act_state.state)
.expect("failed to get cetf actor")
.expect("no actor state found");
let validator_map = cetf_actor::state::ValidatorBlsPublicKeyMap::load(
store,
&state.validators,
DEFAULT_HAMT_CONFIG,
"load validator hamt",
)
.expect("failed to load validator hamt");
validator_map
.for_each(|k, v| {
tracing::info!("Validator: {}, Bls: {:?}", k, v);
Ok(())
})
.expect("failed to iterate validator hamt");
}
Commands::QueueTag => {
let to_queue: [u8; 32] = std::array::from_fn(|i| i as u8);
let params = RawBytes::serialize(cetf_actor::EnqueueTagParams {
tag: to_queue.into(),
})
.expect("failed to serialize params");
tracing::info!("CBOR encoded input should look like: {:?}", params);
let res = TxClient::<TxCommit>::transaction(
&mut client,
fendermint_vm_actor_interface::cetf::CETFSYSCALL_ACTOR_ADDR,
cetf_actor::Method::EnqueueTag as u64,
params,
TokenAmount::from_whole(0),
GAS_PARAMS.clone(),
)
.await
.expect("transfer failed");
assert!(res.response.check_tx.code.is_ok(), "check is ok");
assert!(res.response.tx_result.code.is_ok(), "deliver is ok");
assert!(res.return_data.is_some());
let scheduled_epoch: u64 = res
.return_data
.expect("no return data")
.deserialize()
.expect("failed to deserialize return data");
tracing::info!("Scheduled epoch: {}", scheduled_epoch);
}
Commands::DeployDemoContract => {
let spec: serde_json::Value = serde_json::from_str(DEMO_CONTRACT_SPEC_JSON)?;
let example_contract = hex::decode(
&spec["bytecode"]["object"]
.as_str()
.expect("missing bytecode")
.trim_start_matches("0x"),
)?;
tracing::info!("Deploying Example Contract");
let res = TxClient::<TxCommit>::fevm_create(
&mut client,
Bytes::from(example_contract),
Bytes::default(),
TokenAmount::default(),
GAS_PARAMS.clone(),
)
.await
.expect("error deploying contract");
tracing::info!(tx_hash = ?res.response.hash, "deployment transaction");
let ret = res
.return_data
.ok_or(anyhow!(
"no CreateReturn data; response was {:?}",
res.response
))
.expect("failed to get CreateReturn data");
let address = ret.eth_address;
tracing::info!(address = ?address, "contract deployed");
}
Commands::CallReleaseKeys { address } => {
let contract = delorean_contract(&address);
let call = contract.release_key();
tracing::info!("Calling releaeKeys on contract at address {}", address);
let result: bool = invoke_or_call_contract(&mut client, &address, call, true).await?;
tracing::info!(result = ?result, "contract call result");
}
Commands::Encrypt { contract_address, output } => {
let signing_tag = retrieve_signing_tag(&mut client, &contract_address).await?;
tracing::info!("Retrieved signing tag 0x{} from contract", hex::encode(&signing_tag));
let agg_pubkey = get_agg_pubkey(&client, &store).await?;
tracing::info!("Computed aggregate BLS pubkey 0x{}", hex::encode(&agg_pubkey.as_bytes()));
tracing::info!("Encrypting...");
// encrypt whatever is on std-in into our armor writer
let mut armored = tlock_age::armor::ArmoredWriter::wrap_output(vec![]).unwrap();
tlock_age::encrypt(
&mut armored,
std::io::stdin().lock(),
&[0x0; 32], // I think this can be anything..
&agg_pubkey.as_bytes(),
signing_tag,
)?;
let encrypted = armored.finish().unwrap();
// write the encrypted data to the output file
std::fs::write(&output, &encrypted).expect("failed to write output file");
tracing::info!("Done!");
}
Commands::Decrypt { contract_address, output } => {
let signing_tag = retrieve_signing_tag(&mut client, &contract_address).await?;
tracing::info!("Retrieved signing tag 0x{} from contract", hex::encode(&signing_tag));
tracing::info!("Attempting to retrieve signature for tag");
let sig_bytes = get_signature_for_tag(&client, &store, signing_tag).await?;
tracing::info!("Got key/signature 0x{}", hex::encode(sig_bytes.0));
let mut decrypted = vec![];
tlock_age::decrypt(
&mut decrypted,
std::io::stdin().lock(),
&[0x0; 32],
&sig_bytes.0,
)?;
if let Some(output) = output {
std::fs::write(&output, &decrypted).expect("failed to write output file");
} else {
std::io::stdout().write_all(&decrypted).expect("failed to write to stdout");
}
}
Commands::TestIfHeightsAreSignedProperly => {
let QueryResponse { height, value } = client
.actor_state(
&fendermint_vm_actor_interface::cetf::CETFSYSCALL_ACTOR_ADDR,
FvmQueryHeight::default(),
)
.await
.expect("failed to get cetf actor state");
let (id, act_state) = value.expect("cetf actor state not found");
tracing::info!("Get Cetf State (id: {}) at height {}", id, height);
let state: CetfActorState = store
.get_cbor(&act_state.state)
.expect("failed to get cetf actor")
.expect("no actor state found");
let height: u64 = height.into();
let height = height - 1u64;
// Get all the validators BLS keys
let mut bls_keys_bytes = vec![];
let validator_map = cetf_actor::state::ValidatorBlsPublicKeyMap::load(
store.clone(),
&state.validators,
DEFAULT_HAMT_CONFIG,
"load validator hamt",
)
.expect("failed to load validator hamt");
validator_map
.for_each(|_k, v| {
bls_keys_bytes.push(*v);
Ok(())
})
.expect("failed to iterate validator hamt");
// Find the Signature of the Hashed Tag
let tag_bytes = height.to_be_bytes().to_vec();
let mut hasher = Sha256::new();
hasher.update(&tag_bytes);
let digest: [u8; 32] = hasher.finalize().into();
let signed_hashed_tag = cetf_actor::state::SignedHashedTagMap::load(
store.clone(),
&state.signed_hashed_tags,
DEFAULT_HAMT_CONFIG,
"load signed hashed tags",
)
.expect("failed to load signed hashed tags");
let sig_bytes: BlsSignature = *signed_hashed_tag
.get(&digest.into())
.expect("failed to get signature from signed hashed tag")
.expect("signature not found");
let sig = bls_signatures::Signature::from_bytes(&sig_bytes.0)
.expect("failed to parse signature from bytes");
let pub_keys = bls_keys_bytes
.iter()
.map(|b| {
bls_signatures::PublicKey::from_bytes(&b.0)
.expect("failed to parse public key from bytes")
})
.collect::<Vec<_>>();
// tracing::info!("Public Keys: {:?}", pub_keys);
tracing::info!("Tag: {:?}", tag_bytes);
tracing::info!("Hashed Tag: {:?}", digest);
tracing::info!("Signature: {:?}", sig_bytes.0);
assert!(
bls_signatures::verify_messages(&sig, &[tag_bytes.as_slice()], &pub_keys),
"Signature is invalid"
);
}
}
Ok(())
}
/// Invoke FEVM through Tendermint with the calldata encoded by ethers, decoding the result into the expected type.
async fn invoke_or_call_contract<T: Tokenizable>(
client: &mut (impl TxClient<TxCommit> + CallClient),
contract_eth_addr: &str,
call: MockContractCall<T>,
in_transaction: bool,
) -> anyhow::Result<T> {
let calldata: ethers::types::Bytes = call
.calldata()
.expect("calldata should contain function and parameters");
let contract_addr = eth_addr_to_eam(contract_eth_addr);
// We can perform the read as a distributed transaction (if we don't trust any particular node to give the right answer),
// or we can send a query with the same message and get a result without involving a transaction.
let return_data = if in_transaction {
let res = client
.fevm_invoke(
contract_addr,
calldata.0,
TokenAmount::default(),
GAS_PARAMS.clone(),
)
.await
.context("failed to invoke FEVM")?;
// tracing::info!(tx_hash = ?res.response.hash, "invoked transaction");
res.return_data
} else {
let res = client
.fevm_call(
contract_addr,
calldata.0,
TokenAmount::default(),
GAS_PARAMS.clone(),
FvmQueryHeight::default(),
)
.await
.context("failed to call FEVM")?;
res.return_data
};
let bytes = return_data.ok_or(anyhow!("Contract returned error. Key release denied."))?;
let res = decode_function_data(&call.function, bytes, false)
.context("error deserializing return data")?;
Ok(res)
}
/// Get the next sequence number (nonce) of an account.
async fn sequence(client: &impl QueryClient, addr: &Address) -> anyhow::Result<u64> {
let state = client
.actor_state(&addr, FvmQueryHeight::default())
.await
.context("failed to get actor state")?;
match state.value {
Some((_id, state)) => Ok(state.sequence),
None => Err(anyhow!("cannot find actor {addr}")),
}
}
/// Create an instance of the statically typed contract client.
fn delorean_contract(contract_eth_addr: &str) -> DeloreanContract<MockProvider> {
// A dummy client that we don't intend to use to call the contract or send transactions.
let (client, _mock) = ethers::providers::Provider::mocked();
let contract_h160_addr = ethers::core::types::Address::from_slice(
hex::decode(contract_eth_addr.trim_start_matches("0x"))
.unwrap()
.as_slice(),
);
let contract = DeloreanContract::new(contract_h160_addr, std::sync::Arc::new(client));
contract
}
/// Retrive the signing tag from a deployed Demo contract given its address
async fn retrieve_signing_tag(
client: &mut (impl TxClient<TxCommit> + CallClient),
contract_eth_addr: &str,
) -> anyhow::Result<[u8; 32]> {
let contract = delorean_contract(contract_eth_addr);
let call = contract.signing_tag();
let signing_tag: [u8; 32] = invoke_or_call_contract(client, contract_eth_addr, call, true)
.await
.context("failed to call contract")?;
Ok(signing_tag)
}
async fn get_agg_pubkey(
client: &impl QueryClient,
store: &RemoteBlockstore<FendermintClient>,
) -> anyhow::Result<bls_signatures::PublicKey> {
let QueryResponse { height, value } = client
.actor_state(
&fendermint_vm_actor_interface::cetf::CETFSYSCALL_ACTOR_ADDR,
FvmQueryHeight::default(),
)
.await
.expect("failed to get cetf actor state");
let (id, act_state) = value.expect("cetf actor state not found");
// tracing::info!("Get Cetf State (id: {}) at height {}", id, height);
let state: CetfActorState = store
.get_cbor(&act_state.state)
.expect("failed to get cetf actor")
.expect("no actor state found");
// Get all the validators BLS keys
let mut bls_keys_bytes = vec![];
let validator_map = cetf_actor::state::ValidatorBlsPublicKeyMap::load(
store.clone(),
&state.validators,
DEFAULT_HAMT_CONFIG,
"load validator hamt",
)
.expect("failed to load validator hamt");
validator_map
.for_each(|_k, v| {
bls_keys_bytes.push(*v);
Ok(())
})
.expect("failed to iterate validator hamt");
let pub_keys = bls_keys_bytes
.iter()
.map(|b| {
bls_signatures::PublicKey::from_bytes(&b.0)
.expect("failed to parse public key from bytes")
})
.collect::<Vec<_>>();
Ok(bls_signatures::aggregate_keys(&pub_keys).expect("failed to aggregate public keys"))
}
async fn get_signature_for_tag(
client: &impl QueryClient,
store: &RemoteBlockstore<FendermintClient>,
signing_tag: [u8; 32],
) -> anyhow::Result<cetf_actor::BlsSignature> {
let QueryResponse { height, value } = client
.actor_state(
&fendermint_vm_actor_interface::cetf::CETFSYSCALL_ACTOR_ADDR,
FvmQueryHeight::default(),
)
.await
.expect("failed to get cetf actor state");
let (id, act_state) = value.expect("cetf actor state not found");
tracing::info!("Get Cetf State (id: {}) at height {}", id, height);
let state: CetfActorState = store
.get_cbor(&act_state.state)
.expect("failed to get cetf actor")
.expect("no actor state found");
let signed_hashed_tag = cetf_actor::state::SignedHashedTagMap::load(
store.clone(),
&state.signed_hashed_tags,
DEFAULT_HAMT_CONFIG,
"load signed hashed tags",
)
.expect("failed to load signed hashed tags");
Ok(*signed_hashed_tag
.get(&signing_tag.into())
.expect("failed to get signature from signed hashed tag")
.expect("signature not found"))
}
fn eth_addr_to_eam(eth_addr: &str) -> Address {
let eth_addr = hex::decode(eth_addr.trim_start_matches("0x")).expect("valid hex");
Address::new_delegated(eam::EAM_ACTOR_ID, ð_addr)
.expect("ETH address to delegated should work")
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/testing/snapshot-test/src/lib.rs | fendermint/testing/snapshot-test/src/lib.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! Run tests against multiple Fendermint+CometBFT docker container pairs locally:
//! 0. The default `snapshot-fendermint` and `snapshot-cometbft` pair
//! 1. A `snapshot-cometbft-1` and `snapshot-cometbft-2`, using `scripts/node-1.env` and `node-2`.env,
//! syncing with the default node from genesis on a block-by-block basis, and clear out their history
//! to force others who sync with them to use snapshots.
//! 2. A `snapshot-cometbft-3` using `scripts/node-3.env`,
//! which syncs with `node-1` and `node-2` using snapshots (a.k.a. state sync).
//!
//! Note that CometBFT state sync requires 2 RPC servers, which is why we need 3 nodes.
//!
//! See <https://docs.cometbft.com/v0.37/core/state-sync> and <https://docs.cometbft.com/v0.37/core/configuration>
//!
//! Examples:
//!
//! 1. All in one go
//! ```text
//! cd fendermint/testing/snapshot-test
//! cargo make
//! ```
//!
//! 2. One by one
//! ```text
//! cd fendermint/testing/snapshot-test
//! cargo make setup
//! cargo make node-1-setup
//! cargo make node-2-setup
//! cargo make node-3-setup
//! docker logs snapshot-cometbft-3
//! cargo make snapshot-teardown
//! cargo make teardown
//! ```
//!
//! Make sure you installed cargo-make by running `cargo install cargo-make` first.
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/eth/api/src/lib.rs | fendermint/eth/api/src/lib.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use anyhow::anyhow;
use axum::routing::{get, post};
use fvm_shared::econ::TokenAmount;
use jsonrpc_v2::Data;
use std::{net::ToSocketAddrs, sync::Arc, time::Duration};
use tower_http::cors::CorsLayer;
mod apis;
mod cache;
mod client;
mod conv;
mod error;
mod filters;
mod gas;
mod handlers;
mod mpool;
mod state;
pub use client::{HybridClient, HybridClientDriver};
use error::{error, JsonRpcError};
use state::{JsonRpcState, Nonce};
/// This is passed to every method handler. It's generic in the client type to facilitate testing with mocks.
type JsonRpcData<C> = Data<JsonRpcState<C>>;
type JsonRpcServer = Arc<jsonrpc_v2::Server<jsonrpc_v2::MapRouter>>;
type JsonRpcResult<T> = Result<T, JsonRpcError>;
/// This is the state we will pass to [axum] so that we can extract it in handlers.
#[derive(Clone)]
pub struct AppState {
pub rpc_server: JsonRpcServer,
pub rpc_state: Arc<JsonRpcState<HybridClient>>,
}
#[derive(Debug, Clone)]
pub struct GasOpt {
pub min_gas_premium: TokenAmount,
pub num_blocks_max_prio_fee: u64,
pub max_fee_hist_size: u64,
}
/// Start listening to JSON-RPC requests.
pub async fn listen<A: ToSocketAddrs>(
listen_addr: A,
client: HybridClient,
filter_timeout: Duration,
cache_capacity: usize,
max_nonce_gap: Nonce,
gas_opt: GasOpt,
) -> anyhow::Result<()> {
if let Some(listen_addr) = listen_addr.to_socket_addrs()?.next() {
let rpc_state = Arc::new(JsonRpcState::new(
client,
filter_timeout,
cache_capacity,
max_nonce_gap,
gas_opt,
));
// Start the transaction cache pruning subscription.
mpool::start_tx_cache_clearing(
rpc_state.client.clone(),
rpc_state.tx_cache.clone(),
rpc_state.tx_buffer.clone(),
);
let rpc_server = make_server(rpc_state.clone());
let app_state = AppState {
rpc_server,
rpc_state,
};
let router = make_router(app_state);
let server = axum::Server::try_bind(&listen_addr)?.serve(router.into_make_service());
tracing::info!(?listen_addr, "bound Ethereum API");
server.await?;
Ok(())
} else {
Err(anyhow!("failed to convert to any socket address"))
}
}
/// Register method handlers with the JSON-RPC server construct.
fn make_server(state: Arc<JsonRpcState<HybridClient>>) -> JsonRpcServer {
let server = jsonrpc_v2::Server::new().with_data(Data(state));
let server = apis::register_methods(server);
server.finish()
}
/// Register routes in the `axum` HTTP router to handle JSON-RPC and WebSocket calls.
fn make_router(state: AppState) -> axum::Router {
axum::Router::new()
.route("/", post(handlers::http::handle))
.route("/", get(handlers::ws::handle))
.layer(CorsLayer::permissive())
.with_state(state)
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/eth/api/src/filters.rs | fendermint/eth/api/src/filters.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::{
collections::HashMap,
pin::Pin,
sync::Arc,
time::{Duration, Instant},
};
use anyhow::{anyhow, Context};
use ethers_core::types as et;
use fendermint_rpc::{client::FendermintClient, query::QueryClient};
use fendermint_vm_actor_interface::eam::EthAddress;
use fendermint_vm_message::{chain::ChainMessage, query::FvmQueryHeight, signed::DomainHash};
use futures::{Future, StreamExt};
use fvm_shared::{address::Address, chainid::ChainID, error::ExitCode};
use lru_time_cache::LruCache;
use serde::Serialize;
use tendermint_rpc::{
event::{Event, EventData},
query::{EventType, Query},
Client, Subscription,
};
use tokio::sync::{
mpsc::{Receiver, Sender},
RwLock,
};
use crate::{
conv::from_tm::{self, find_hash_event, map_rpc_block_txs, msg_hash, tx_hash},
error::JsonRpcError,
handlers::ws::{MethodNotification, Notification},
state::{enrich_block, WebSocketSender},
JsonRpcResult,
};
/// Check whether to keep a log according to the topic filter.
///
/// A note on specifying topic filters: Topics are order-dependent.
/// A transaction with a log with topics [A, B] will be matched by the following topic filters:
/// * [] "anything"
/// * [A] "A in first position (and anything after)"
/// * [null, B] "anything in first position AND B in second position (and anything after)"
/// * [A, B] "A in first position AND B in second position (and anything after)"
/// * [[A, B], [A, B]] "(A OR B) in first position AND (A OR B) in second position (and anything after)"
pub fn matches_topics(filter: &et::Filter, log: &et::Log) -> bool {
for i in 0..4 {
if let Some(topics) = &filter.topics[i] {
let topic = log.topics.get(i);
let matches = match topics {
et::ValueOrArray::Value(Some(t)) => topic == Some(t),
et::ValueOrArray::Array(ts) => ts.iter().flatten().any(|t| topic == Some(t)),
_ => true,
};
if !matches {
return false;
}
}
}
true
}
pub type FilterId = et::U256;
pub type FilterMap = Arc<RwLock<HashMap<FilterId, Sender<FilterCommand>>>>;
pub type BlockHash = et::H256;
pub enum FilterCommand {
/// Update the records with an event, coming from one of the Tendermint subscriptions.
Update(Event),
/// One of the subscriptions has ended, potentially with an error.
Finish(Option<tendermint_rpc::Error>),
/// Take the accumulated records, coming from the API consumer.
Take(tokio::sync::oneshot::Sender<anyhow::Result<Option<FilterRecords<BlockHash>>>>),
/// The API consumer is no longer interested in taking the records.
Uninstall,
}
pub enum FilterKind {
NewBlocks,
PendingTransactions,
Logs(Box<et::Filter>),
}
impl FilterKind {
/// Convert an Ethereum filter to potentially multiple Tendermint queries.
///
/// One limitation with Tendermint is that it only handles AND condition
/// in filtering, so if the filter contains arrays, we have to make a
/// cartesian product of all conditions in it and subscribe individually.
///
/// https://docs.tendermint.com/v0.34/rpc/#/Websocket/subscribe
pub fn to_queries(&self) -> Vec<Query> {
match self {
FilterKind::NewBlocks => vec![Query::from(EventType::NewBlock)],
// Testing indicates that `EventType::Tx` might only be raised
// if there are events emitted by the transaction itself.
FilterKind::PendingTransactions => vec![Query::from(EventType::NewBlock)],
FilterKind::Logs(filter) => {
// `Query::from(EventType::Tx)` doesn't seem to combine well with non-standard keys.
// But `Query::default()` doesn't return anything if we subscribe to `Filter::default()`.
let mut query = if filter.has_topics() || filter.address.is_some() {
Query::default()
} else {
Query::from(EventType::Tx)
};
if let Some(_block_hash) = filter.get_block_hash() {
// Currently we only use these filters for subscribing to future events,
// we don't go back to retireve past ones (although I think Lotus does that).
// As such, it is impossible to subscribe to future block hashes, they are unknown.
// We could add a `block.hash` to the index, but there are other ways to find transactions
// in a block, so it would be storing data for little reason.
}
if let Some(from_block) = filter.get_from_block() {
query = query.and_gte("tx.height", from_block.as_u64());
}
if let Some(to_block) = filter.get_to_block() {
query = query.and_lte("tx.height", to_block.as_u64());
}
let mut queries = vec![query];
let addrs = match &filter.address {
None => vec![],
Some(et::ValueOrArray::Value(addr)) => vec![*addr],
Some(et::ValueOrArray::Array(addrs)) => addrs.clone(),
};
// We need to turn the Ethereum addresses f410 addresses, which is something we asked CometBFT to index
// so that we can use it for filtering.
let addrs = addrs
.into_iter()
.map(|addr| Address::from(EthAddress(addr.0)))
.collect::<Vec<_>>();
if !addrs.is_empty() {
queries = addrs
.iter()
.flat_map(|addr| {
queries.iter().flat_map(|q| {
let mut emitters = if let Ok(id) = addr.id() {
// If it was a masked ID.
vec![q.clone().and_eq("event.emitter.id", id.to_string())]
} else {
vec![q.clone().and_eq("event.emitter.deleg", addr.to_string())]
};
emitters.push(q.clone().and_eq("message.from", addr.to_string()));
emitters.push(q.clone().and_eq("message.to", addr.to_string()));
emitters
})
})
.collect();
};
for i in 0..4 {
if let Some(Some(topics)) = filter.topics.get(i) {
let topics = match topics {
et::ValueOrArray::Value(Some(t)) => vec![t],
et::ValueOrArray::Array(ts) => ts.iter().flatten().collect(),
_ => vec![],
};
if !topics.is_empty() {
let key = format!("event.t{}", i + 1);
queries = topics
.into_iter()
.flat_map(|t| {
queries
.iter()
.map(|q| q.clone().and_eq(&key, hex::encode(t.0)))
})
.collect();
}
}
}
queries
}
}
}
}
/// Accumulator for filter data.
///
/// The type expected can be seen in [ethers::providers::Provider::watch_blocks].
pub enum FilterRecords<B> {
NewBlocks(Vec<B>),
PendingTransactions(Vec<et::TxHash>),
Logs(Vec<et::Log>),
}
impl<B> FilterRecords<B>
where
B: Serialize,
{
pub fn new(value: &FilterKind) -> Self {
match value {
FilterKind::NewBlocks => Self::NewBlocks(vec![]),
FilterKind::PendingTransactions => Self::PendingTransactions(vec![]),
FilterKind::Logs(_) => Self::Logs(vec![]),
}
}
fn take(&mut self) -> Self {
let mut records = match self {
Self::NewBlocks(_) => Self::NewBlocks(vec![]),
Self::PendingTransactions(_) => Self::PendingTransactions(vec![]),
Self::Logs(_) => Self::Logs(vec![]),
};
std::mem::swap(self, &mut records);
records
}
pub fn is_empty(&self) -> bool {
match self {
Self::NewBlocks(xs) => xs.is_empty(),
Self::PendingTransactions(xs) => xs.is_empty(),
Self::Logs(xs) => xs.is_empty(),
}
}
pub fn to_json_vec(&self) -> anyhow::Result<Vec<serde_json::Value>> {
match self {
Self::Logs(xs) => to_json_vec(xs),
Self::NewBlocks(xs) => to_json_vec(xs),
Self::PendingTransactions(xs) => to_json_vec(xs),
}
}
/// Accumulate the events.
async fn update<F>(
&mut self,
event: Event,
to_block: F,
chain_id: &ChainID,
filter: &Option<et::Filter>,
) -> anyhow::Result<()>
where
F: FnOnce(
Box<tendermint::Block>,
) -> Pin<Box<dyn Future<Output = anyhow::Result<B>> + Send>>,
{
match (self, event.data) {
(
Self::NewBlocks(ref mut blocks),
EventData::NewBlock {
block: Some(block), ..
},
) => {
let b: B = to_block(block).await?;
blocks.push(b);
}
(
Self::PendingTransactions(ref mut hashes),
EventData::NewBlock {
block: Some(block), ..
},
) => {
for tx in &block.data {
if let Ok(ChainMessage::Signed(msg)) = fvm_ipld_encoding::from_slice(tx) {
if let Ok(Some(DomainHash::Eth(h))) = msg.domain_hash(chain_id) {
hashes.push(et::TxHash::from(h))
}
}
}
}
(Self::Logs(ref mut logs), EventData::Tx { tx_result }) => {
// An example of an `Event`:
// Event {
// query: "tm.event = 'Tx'",
// data: Tx {
// tx_result: TxInfo {
// height: 1088,
// index: None,
// tx: [161, 102, ..., 0],
// result: TxResult {
// log: None,
// gas_wanted: Some("5156433"),
// gas_used: Some("5151233"),
// events: [
// Event {
// kind: "event",
// attributes: [
// EventAttribute { key: "emitter.id", value: "108", index: true },
// EventAttribute { key: "t1", value: "dd...b3ef", index: true },
// EventAttribute { key: "t2", value: "00...362f", index: true },
// EventAttribute { key: "t3", value: "00...44eb", index: true },
// EventAttribute { key: "d", value: "00...0064", index: true }
// ]
// }
// ]
// }
// }
// },
// events: Some(
// {
// "event.d": ["00...0064"],
// "event.emitter.id": ["108"],
// "event.t1": ["dd...b3ef"],
// "event.t2": ["00...362f"],
// "event.t3": ["00...44eb"],
// "tm.event": ["Tx"],
// "tx.hash": ["FA7339B4D9F6AF80AEDB03FC4BFBC1FDD9A62F97632EF8B79C98AAD7044C5BDB"],
// "tx.height": ["1088"]
// })
// }
// There is no easy way here to tell the block hash. Maybe it has been given in a preceding event,
// but other than that our only option is to query the Tendermint API. If we do that we should have caching,
// otherwise all the transactions in a block hammering the node will act like a DoS attack.
// Or we can add it to the indexed fields.
let block_hash =
find_hash_event("block", &tx_result.result.events).unwrap_or_default();
let block_number = et::U64::from(tx_result.height);
let transaction_hash = msg_hash(&tx_result.result.events, &tx_result.tx);
// TODO: The transaction index comes as None.
let transaction_index = et::U64::from(tx_result.index.unwrap_or_default());
// TODO: We have no way to tell where the logs start within the block.
let log_index_start = Default::default();
let mut tx_logs = from_tm::to_logs(
&tx_result.result.events,
block_hash,
block_number,
transaction_hash,
transaction_index,
log_index_start,
)?;
if let Some(filter) = filter {
tx_logs.retain(|log| matches_topics(filter, log));
}
logs.extend(tx_logs)
}
_ => {}
}
Ok(())
}
}
fn to_json_vec<R: Serialize>(records: &[R]) -> anyhow::Result<Vec<serde_json::Value>> {
let values: Vec<serde_json::Value> = records
.iter()
.map(serde_json::to_value)
.collect::<Result<Vec<_>, _>>()
.context("failed to convert records to JSON")?;
Ok(values)
}
pub struct FilterDriver {
id: FilterId,
kind: FilterKind,
state: FilterState,
rx: Receiver<FilterCommand>,
}
enum FilterState {
Poll(PollState),
Subscription(SubscriptionState),
}
/// Accumulate changes between polls.
///
/// Polling returns batches.
struct PollState {
timeout: Duration,
last_poll: Instant,
finished: Option<Option<anyhow::Error>>,
records: FilterRecords<BlockHash>,
}
/// Send changes to a WebSocket as soon as they happen, one by one, not in batches.
struct SubscriptionState {
ws_sender: WebSocketSender,
}
impl FilterDriver {
pub fn new(
id: FilterId,
timeout: Duration,
kind: FilterKind,
ws_sender: Option<WebSocketSender>,
) -> (Self, Sender<FilterCommand>) {
let (tx, rx) = tokio::sync::mpsc::channel(10);
let state = match ws_sender {
Some(ws_sender) => FilterState::Subscription(SubscriptionState { ws_sender }),
None => FilterState::Poll(PollState {
timeout,
last_poll: Instant::now(),
finished: None,
records: FilterRecords::new(&kind),
}),
};
let r = Self {
id,
kind,
state,
rx,
};
(r, tx)
}
pub fn id(&self) -> FilterId {
self.id
}
/// Consume commands until some end condition is met.
///
/// In the end the filter removes itself from the registry.
pub async fn run<C>(mut self, filters: FilterMap, client: FendermintClient<C>)
where
C: Client + Send + Sync + Clone + 'static,
{
let id = self.id;
tracing::info!(?id, "handling filter events");
// Get the Chain ID once. In practice it will not change and will last the entire session.
let chain_id = client
.state_params(FvmQueryHeight::default())
.await
.map(|state_params| ChainID::from(state_params.value.chain_id));
// Logs need to be filtered by topics.
let filter = if let FilterKind::Logs(ref filter) = self.kind {
Some(filter.as_ref().to_owned())
} else {
None
};
// Because there are multiple potentially overlapping subscriptions, we might see the same transaction twice,
// e.g. because we were interested in ones that emit events "A or B" we had to subscribe to "A" and also to "B",
// so if a transaction emits both "A" and "B" we'll get it twice. Most likely they will be at the same time,
// so a short time based cache should help get rid of the duplicates.
let mut tx_cache: LruCache<tendermint::Hash, bool> =
LruCache::with_expiry_duration(Duration::from_secs(60));
while let Some(cmd) = self.rx.recv().await {
// Skip duplicate transactions. We won't see duplidate blocks because there is only 1 query for that.
if let FilterCommand::Update(ref event) = cmd {
if let EventData::Tx { ref tx_result } = event.data {
let tx_hash = tx_hash(&tx_result.tx);
if tx_cache.insert(tx_hash, true).is_some() {
continue;
}
}
}
match self.state {
FilterState::Poll(ref mut state) => {
match cmd {
FilterCommand::Update(event) => {
if state.is_timed_out() {
tracing::debug!(?id, "filter timed out");
return self.remove(filters).await;
}
if state.is_finished() {
// Not returning to allow the consumer to get final results.
continue;
}
let res = match &chain_id {
Ok(chain_id) => {
state
.records
.update(
event,
|block| {
Box::pin(async move {
Ok(et::H256::from_slice(
block.header().hash().as_bytes(),
))
})
},
chain_id,
&filter,
)
.await
}
Err(e) => Err(anyhow!("failed to get chain ID: {e}")),
};
if let Err(err) = res {
tracing::error!(?id, "failed to update filter: {err}");
state.finish(Some(anyhow!("failed to update filter: {err}")));
}
}
FilterCommand::Finish(err) => {
tracing::debug!(?id, "filter producer finished: {err:?}");
state.finish(err.map(|e| anyhow!("subscription failed: {e}")))
}
FilterCommand::Take(tx) => {
let result = state.try_take();
let remove = match result {
Ok(None) | Err(_) => true,
Ok(Some(_)) => false,
};
let _ = tx.send(result);
if remove {
tracing::debug!(?id, "filter finished");
return self.remove(filters).await;
}
}
FilterCommand::Uninstall => {
tracing::debug!(?id, "filter uninstalled");
return self.remove(filters).await;
}
}
}
FilterState::Subscription(ref state) => match cmd {
FilterCommand::Update(event) => {
let mut records = FilterRecords::<et::Block<et::TxHash>>::new(&self.kind);
let res = match &chain_id {
Ok(chain_id) => {
records
.update(
event,
|block| {
let client = client.clone();
Box::pin(async move {
let block =
enrich_block_with_retry(&client, &block)
.await
.context(
"failed to enrich block in event",
)?;
let block: anyhow::Result<et::Block<et::TxHash>> =
map_rpc_block_txs(block, |tx| Ok(tx.hash()));
block
})
},
chain_id,
&filter,
)
.await
}
Err(e) => Err(anyhow!("failed to get chain ID: {e}")),
};
match res {
Err(e) => {
send_error(
&state.ws_sender,
ExitCode::USR_UNSPECIFIED,
format!("failed to process events: {e}"),
id,
);
}
Ok(()) => match records.to_json_vec() {
Err(e) => tracing::error!("failed to convert events to JSON: {e}"),
Ok(records) => {
for rec in records {
let msg: MethodNotification = notification(id, rec);
if state.ws_sender.send(msg).is_err() {
tracing::debug!(?id, "web socket no longer listening");
return self.remove(filters).await;
}
}
}
},
}
}
FilterCommand::Finish(err) => {
tracing::debug!(?id, "subscription producer finished: {err:?}");
// We have already sent all updates to the socket.
// Make best effort to notify the socket.
if let Some(err) = err {
send_error(
&state.ws_sender,
ExitCode::USR_UNSPECIFIED,
format!("subscription finished with error: {err}"),
id,
);
}
// We know at least one subscription has failed, so might as well quit.
return self.remove(filters).await;
}
FilterCommand::Take(tx) => {
// This should not be used, but because we treat subscriptions and filters
// under the same umbrella, it is possible to send a request to get changes.
// Respond with empty, because all of the changes were already sent to the socket.
let _ = tx.send(Ok(Some(FilterRecords::new(&self.kind))));
}
FilterCommand::Uninstall => {
tracing::debug!(?id, "subscription uninstalled");
return self.remove(filters).await;
}
},
}
}
}
async fn remove(self, filters: FilterMap) {
filters.write().await.remove(&self.id);
}
}
fn send_error(ws_sender: &WebSocketSender, exit_code: ExitCode, msg: String, id: FilterId) {
tracing::error!(?id, "sending error to WS: {msg}");
let err = JsonRpcError {
code: exit_code.value().into(),
message: msg,
data: None,
};
let err = jsonrpc_v2::Error::from(err);
match serde_json::to_value(err) {
Err(e) => tracing::error!("failed to convert JSON-RPC error to JSON: {e}"),
Ok(json) => {
// Ignoring the case where the socket is no longer there.
// Assuming that there will be another event to trigger removal.
let msg = notification(id, json);
let _ = ws_sender.send(msg);
}
}
}
fn notification(subscription: FilterId, result: serde_json::Value) -> MethodNotification {
MethodNotification {
// We know this is the only one at the moment.
// The go-ethereum client checks that the suffix is "_subscription":
// https://github.com/ethereum/go-ethereum/blob/92b8f28df3255c6cef9605063850d77b46146763/rpc/handler.go#L236C42-L236C42
method: "eth_subscription".into(),
notification: Notification {
subscription,
result,
},
}
}
/// It looks like it might not be true that when we receive a `NewBlock` event from Tendermint,
/// (which includes begin and end events, so we can assume it's been executed), then it's safe
/// to query the API for the block results, which is what `enrich_block` does to fill stuff
/// like gas used, etc.
async fn enrich_block_with_retry<C: Client + Send + Sync>(
client: &FendermintClient<C>,
block: &tendermint::block::Block,
) -> JsonRpcResult<et::Block<et::Transaction>> {
// TODO: Assuming at ~1 block time; move this to config.
const SLEEP_SECS: u64 = 1;
const MAX_ATTEMPT: u32 = 5;
let mut attempt = 0;
loop {
match enrich_block(client, block).await {
Err(e) if attempt < MAX_ATTEMPT => {
tracing::debug!(
error = e.to_string(),
height = block.header().height.value(),
"failed to enrich block; retrying..."
);
tokio::time::sleep(Duration::from_secs(SLEEP_SECS)).await;
attempt += 1;
}
other => return other,
}
}
}
impl PollState {
/// Take all the accumulated changes.
///
/// If there are no changes but there was an error, return that.
/// If the producers have stopped, return `None`.
fn try_take(&mut self) -> anyhow::Result<Option<FilterRecords<BlockHash>>> {
self.last_poll = Instant::now();
let records = self.records.take();
if records.is_empty() {
if let Some(ref mut finished) = self.finished {
// Return error on first poll, because it can't be cloned.
return match finished.take() {
Some(e) => Err(e),
None => Ok(None),
};
}
}
Ok(Some(records))
}
/// Signal that the producers are finished, or that the reader is no longer intersted.
///
/// Propagate the error to the reader next time it comes to check on the filter.
fn finish(&mut self, error: Option<anyhow::Error>) {
// Keep any already existing error.
let error = self.finished.take().flatten().or(error);
self.finished = Some(error);
}
/// Indicate whether the reader has been too slow at polling the filter
/// and that it should be removed.
fn is_timed_out(&self) -> bool {
Instant::now().duration_since(self.last_poll) > self.timeout
}
/// Indicate that that the filter takes no more data.
fn is_finished(&self) -> bool {
self.finished.is_some()
}
}
/// Spawn a Tendermint subscription handler in a new task.
///
/// The subscription sends [Event] records to the driver over a channel.
pub async fn run_subscription(id: FilterId, mut sub: Subscription, tx: Sender<FilterCommand>) {
let query = sub.query().to_string();
tracing::debug!(?id, query, "polling filter subscription");
while let Some(result) = sub.next().await {
match result {
Ok(event) => {
if tx.send(FilterCommand::Update(event)).await.is_err() {
// Filter has been uninstalled.
tracing::debug!(
?id,
query,
"filter no longer listening, quiting subscription"
);
return;
}
}
Err(err) => {
tracing::error!(
?id,
query,
error = ?err,
"filter subscription error"
);
let _ = tx.send(FilterCommand::Finish(Some(err))).await;
return;
}
}
}
tracing::debug!(?id, query, "filter subscription finished");
let _ = tx.send(FilterCommand::Finish(None)).await;
// Dropping the `Subscription` should cause the client to unsubscribe,
// if this was the last one interested in that query; we don't have to
// call the unsubscribe method explicitly.
// See https://docs.rs/tendermint-rpc/0.31.1/tendermint_rpc/client/struct.WebSocketClient.html
}
#[cfg(test)]
mod tests {
use ethers_core::types as et;
use super::FilterKind;
#[test]
fn default_filter_to_query() {
let filter = et::Filter::default();
let queries = FilterKind::Logs(Box::new(filter)).to_queries();
assert_eq!(queries.len(), 1);
assert_eq!(queries[0].to_string(), "tm.event = 'Tx'");
}
#[test]
fn filter_to_query() {
fn hash(s: &str) -> et::H256 {
et::H256::from(ethers_core::utils::keccak256(s))
}
fn hash_hex(s: &str) -> String {
hex::encode(hash(s))
}
let filter = et::Filter::new()
.select(1234..)
.address(
"0xb794f5ea0ba39494ce839613fffba74279579268"
.parse::<et::Address>()
.unwrap(),
)
.events(vec!["Foo", "Bar"])
.topic1(hash("Alice"))
.topic2(
vec!["Bob", "Charlie"]
.into_iter()
.map(hash)
.collect::<Vec<_>>(),
);
eprintln!("filter = {filter:?}");
assert_eq!(
filter.topics[0],
Some(et::ValueOrArray::Array(vec![
Some(hash("Foo")),
Some(hash("Bar"))
]))
);
let queries = FilterKind::Logs(Box::new(filter)).to_queries();
assert_eq!(queries.len(), 12);
let mut i = 0;
for t3 in ["Bob", "Charlie"] {
for t1 in ["Foo", "Bar"] {
for addr in ["event.emitter.deleg", "message.from", "message.to"] {
let q = queries[i].to_string();
let e = format!("tx.height >= 1234 AND {addr} = 'f410fw6kpl2qluokjjtudsyj7765hij4vpetitn2e2wq' AND event.t1 = '{}' AND event.t2 = '{}' AND event.t3 = '{}'", hash_hex(t1), hash_hex("Alice"), hash_hex(t3));
assert_eq!(q, e, "combination {i}");
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | true |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/eth/api/src/state.rs | fendermint/eth/api/src/state.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! Tendermint RPC helper methods for the implementation of the APIs.
use std::borrow::Cow;
use std::collections::HashMap;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::time::Duration;
use anyhow::{anyhow, Context};
use cid::Cid;
use ethers_core::types::{self as et};
use fendermint_rpc::client::{FendermintClient, TendermintClient};
use fendermint_rpc::query::QueryClient;
use fendermint_vm_actor_interface::{evm, system};
use fendermint_vm_message::query::{ActorState, FvmQueryHeight};
use fendermint_vm_message::signed::DomainHash;
use fendermint_vm_message::{chain::ChainMessage, conv::from_eth::to_fvm_address};
use fvm_ipld_encoding::{de::DeserializeOwned, RawBytes};
use fvm_shared::{chainid::ChainID, econ::TokenAmount, error::ExitCode, message::Message};
use rand::Rng;
use tendermint::block::Height;
use tendermint_rpc::query::Query;
use tendermint_rpc::{
endpoint::{block, block_by_hash, block_results, commit, header, header_by_hash},
Client,
};
use tendermint_rpc::{Order, Subscription, SubscriptionClient};
use tokio::sync::mpsc::{Sender, UnboundedSender};
use tokio::sync::RwLock;
use crate::cache::{AddressCache, Cache};
use crate::conv::from_tm;
use crate::filters::{
run_subscription, BlockHash, FilterCommand, FilterDriver, FilterId, FilterKind, FilterMap,
FilterRecords,
};
use crate::handlers::ws::MethodNotification;
use crate::mpool::{TransactionBuffer, TransactionCache};
use crate::GasOpt;
use crate::{
conv::from_tm::{map_rpc_block_txs, to_chain_message, to_eth_block, to_eth_transaction},
error, JsonRpcResult,
};
/// How long to keep transactions in the caches.
const TX_CACHE_TTL_SECS: u64 = 5 * 60;
pub type WebSocketId = usize;
pub type WebSocketSender = UnboundedSender<MethodNotification>;
pub type Nonce = u64;
// Made generic in the client type so we can mock it if we want to test API
// methods without having to spin up a server. In those tests the methods
// below would not be used, so those aren't generic; we'd directly invoke
// e.g. `fendermint_eth_api::apis::eth::accounts` with some mock client.
pub struct JsonRpcState<C> {
pub client: FendermintClient<C>,
pub addr_cache: AddressCache<C>,
/// Cache submitted transactions until they are added to a block.
pub tx_cache: TransactionCache,
/// Buffer out-of-order transactions until they can be submitted.
pub tx_buffer: TransactionBuffer,
filter_timeout: Duration,
filters: FilterMap,
next_web_socket_id: AtomicUsize,
web_sockets: RwLock<HashMap<WebSocketId, WebSocketSender>>,
pub max_nonce_gap: Nonce,
pub gas_opt: GasOpt,
}
impl<C> JsonRpcState<C>
where
C: Client + Send + Sync + Clone,
{
pub fn new(
client: C,
filter_timeout: Duration,
cache_capacity: usize,
max_nonce_gap: Nonce,
gas_opt: GasOpt,
) -> Self {
let client = FendermintClient::new(client);
let addr_cache = AddressCache::new(client.clone(), cache_capacity);
let tx_cache = Cache::new_with_ttl(cache_capacity, Duration::from_secs(TX_CACHE_TTL_SECS));
let tx_buffer = TransactionBuffer(Cache::new_with_ttl(
cache_capacity,
Duration::from_secs(TX_CACHE_TTL_SECS),
));
Self {
client,
addr_cache,
tx_cache,
tx_buffer,
filter_timeout,
filters: Default::default(),
next_web_socket_id: Default::default(),
web_sockets: Default::default(),
gas_opt,
max_nonce_gap,
}
}
}
impl<C> JsonRpcState<C> {
/// The underlying Tendermint RPC client.
pub fn tm(&self) -> &C {
self.client.underlying()
}
/// Register the sender of a web socket.
pub async fn add_web_socket(&self, tx: WebSocketSender) -> WebSocketId {
let next_id = self.next_web_socket_id.fetch_add(1, Ordering::Relaxed);
let mut guard = self.web_sockets.write().await;
guard.insert(next_id, tx);
next_id
}
/// Remove the sender of a web socket.
pub async fn remove_web_socket(&self, id: &WebSocketId) {
let mut guard = self.web_sockets.write().await;
guard.remove(id);
}
/// Get the sender of a web socket.
pub async fn get_web_socket(&self, id: &WebSocketId) -> anyhow::Result<WebSocketSender> {
let guard = self.web_sockets.read().await;
guard
.get(id)
.cloned()
.ok_or_else(|| anyhow!("web socket not found"))
}
}
/// Represents the actor type of a concrete actor.
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub enum ActorType {
/// The queried actor does not exist in the state tree.
Inexistent,
/// The queried actor exists, and it's one of the built-in actor types.
Known(Cow<'static, str>),
/// The queried actor exists, but it's not a built-in actor and therefore it cannot be identified.
Unknown(Cid),
}
impl ActorType {
pub const EVM: ActorType = ActorType::Known(Cow::Borrowed("evm"));
pub const ETH_ACCOUNT: ActorType = ActorType::Known(Cow::Borrowed("ethaccount"));
}
impl<C> JsonRpcState<C>
where
C: Client + Sync + Send,
{
/// Get the height of the latest commit.
pub async fn latest_height(&self) -> JsonRpcResult<tendermint::block::Height> {
let res: commit::Response = self.tm().latest_commit().await?;
// Return -1 so we don't risk having no data to serve.
let h = res.signed_header.header.height.value();
let h = h.saturating_sub(1);
Ok(Height::try_from(h).context("decrementing should be fine")?)
}
/// Get the Tendermint block at a specific height.
pub async fn block_by_height(
&self,
block_number: et::BlockNumber,
) -> JsonRpcResult<tendermint::Block> {
let block = match block_number {
et::BlockNumber::Number(height) if height == et::U64::from(0) => {
from_tm::BLOCK_ZERO.clone()
}
et::BlockNumber::Number(height) => {
let height =
Height::try_from(height.as_u64()).context("failed to convert to height")?;
let res: block::Response = self.tm().block(height).await?;
res.block
}
et::BlockNumber::Finalized
| et::BlockNumber::Latest
| et::BlockNumber::Safe
| et::BlockNumber::Pending => {
// Using 1 block less than latest so if this is followed up by `block_results` then we don't get an error.
let commit: commit::Response = self.tm().latest_commit().await?;
let height = commit.signed_header.header.height.value();
let height = Height::try_from((height.saturating_sub(1)).max(1))
.context("failed to convert to height")?;
let res: block::Response = self.tm().block(height).await?;
res.block
}
et::BlockNumber::Earliest => {
let res: block::Response = self.tm().block(Height::from(1u32)).await?;
res.block
}
};
Ok(block)
}
/// Get the Tendermint header at a specific height.
pub async fn header_by_height(
&self,
block_number: et::BlockNumber,
) -> JsonRpcResult<tendermint::block::Header> {
let header = match block_number {
et::BlockNumber::Number(height) if height == et::U64::from(0) => {
from_tm::BLOCK_ZERO.header.clone()
}
et::BlockNumber::Number(height) => {
let height =
Height::try_from(height.as_u64()).context("failed to convert to height")?;
let res: header::Response = self.tm().header(height).await?;
res.header
}
et::BlockNumber::Finalized
| et::BlockNumber::Latest
| et::BlockNumber::Safe
| et::BlockNumber::Pending => {
// `.latest_commit()` actually points at the block before the last one,
// because the commit is attached to the next block.
// Not using `.latest_block().header` because this is a lighter query.
let res: commit::Response = self.tm().latest_commit().await?;
res.signed_header.header
}
et::BlockNumber::Earliest => {
let res: header::Response = self.tm().header(Height::from(1u32)).await?;
res.header
}
};
Ok(header)
}
/// Get the Tendermint header at a specificed height or hash.
pub async fn header_by_id(
&self,
block_id: et::BlockId,
) -> JsonRpcResult<tendermint::block::Header> {
match block_id {
et::BlockId::Number(n) => self.header_by_height(n).await,
et::BlockId::Hash(h) => self.header_by_hash(h).await,
}
}
/// Return the height of a block which we should send with a query,
/// or None if it's the latest, to let the node figure it out.
///
/// Adjusts the height of the query to +1 so the effects of the block is visible.
/// The node stores the results at height+1 to be consistent with how CometBFT works,
/// ie. the way it publishes the state hash in the *next* block.
///
/// The assumption here is that the client got the height from one of two sources:
/// * by calling the `latest_height` method above, which adjusts it down,
/// so that the returned height is one which is surely executed
/// * by getting a block (e.g. from a subscription) which was already executed
///
/// In both cases we know that there should be state stored at height + 1.
pub async fn query_height(&self, block_id: et::BlockId) -> JsonRpcResult<FvmQueryHeight> {
match block_id {
et::BlockId::Number(bn) => match bn {
// The client might be asking by height of a block, expecting to see the results.
et::BlockNumber::Number(height) => Ok(FvmQueryHeight::from(height.as_u64() + 1)),
et::BlockNumber::Finalized | et::BlockNumber::Latest | et::BlockNumber::Safe => {
Ok(FvmQueryHeight::Committed)
}
et::BlockNumber::Pending => Ok(FvmQueryHeight::Pending),
et::BlockNumber::Earliest => Ok(FvmQueryHeight::Height(1)),
},
et::BlockId::Hash(h) => {
// The effects of this block are saved at the next height.
let header = self.header_by_hash(h).await?;
Ok(FvmQueryHeight::Height(header.height.value() + 1))
}
}
}
/// Get a Tendermint block by hash, if it exists.
pub async fn block_by_hash_opt(
&self,
block_hash: et::H256,
) -> JsonRpcResult<Option<tendermint::block::Block>> {
if block_hash.0 == *from_tm::BLOCK_ZERO_HASH {
return Ok(Some(from_tm::BLOCK_ZERO.clone()));
}
let hash = tendermint::Hash::Sha256(*block_hash.as_fixed_bytes());
let res: block_by_hash::Response = self.tm().block_by_hash(hash).await?;
Ok(res.block)
}
/// Get a Tendermint height by hash, if it exists.
pub async fn header_by_hash_opt(
&self,
block_hash: et::H256,
) -> JsonRpcResult<Option<tendermint::block::Header>> {
if block_hash.0 == *from_tm::BLOCK_ZERO_HASH {
return Ok(Some(from_tm::BLOCK_ZERO.header.clone()));
}
let hash = tendermint::Hash::Sha256(*block_hash.as_fixed_bytes());
let res: header_by_hash::Response = self.tm().header_by_hash(hash).await?;
Ok(res.header)
}
/// Get a Tendermint header by hash.
pub async fn header_by_hash(
&self,
block_hash: et::H256,
) -> JsonRpcResult<tendermint::block::Header> {
match self.header_by_hash_opt(block_hash).await? {
Some(header) => Ok(header),
None => error(
ExitCode::USR_NOT_FOUND,
format!("block {block_hash} not found"),
),
}
}
/// Fetch transaction results to produce the full block.
pub async fn enrich_block(
&self,
block: tendermint::Block,
full_tx: bool,
) -> JsonRpcResult<et::Block<serde_json::Value>>
where
C: Client + Sync + Send,
{
let block = enrich_block(&self.client, &block).await?;
let block = if full_tx {
map_rpc_block_txs(block, serde_json::to_value).context("failed to convert to JSON")?
} else {
map_rpc_block_txs(block, |h| serde_json::to_value(h.hash))
.context("failed to convert hash to JSON")?
};
Ok(block)
}
/// Get a transaction from a block by index.
pub async fn transaction_by_index(
&self,
block: tendermint::Block,
index: et::U64,
) -> JsonRpcResult<Option<et::Transaction>> {
if let Some(msg) = block.data().get(index.as_usize()) {
let msg = to_chain_message(msg)?;
if let ChainMessage::Signed(msg) = msg {
let sp = self
.client
.state_params(FvmQueryHeight::from(index.as_u64()))
.await?;
let chain_id = ChainID::from(sp.value.chain_id);
let hash = if let Ok(Some(DomainHash::Eth(h))) = msg.domain_hash(&chain_id) {
et::TxHash::from(h)
} else {
return error(ExitCode::USR_ILLEGAL_ARGUMENT, "incompatible transaction");
};
let mut tx = to_eth_transaction(msg, chain_id, hash)
.context("failed to convert to eth transaction")?;
tx.transaction_index = Some(index);
tx.block_hash = Some(et::H256::from_slice(block.header.hash().as_bytes()));
tx.block_number = Some(et::U64::from(block.header.height.value()));
Ok(Some(tx))
} else {
error(ExitCode::USR_ILLEGAL_ARGUMENT, "incompatible transaction")
}
} else {
Ok(None)
}
}
/// Get the Tendermint transaction by hash.
pub async fn tx_by_hash(
&self,
tx_hash: et::TxHash,
) -> JsonRpcResult<Option<tendermint_rpc::endpoint::tx::Response>> {
// We cannot use `self.tm().tx()` because the ethers.js forces us to use Ethereum specific hashes.
// For now we can try to retrieve the transaction using the `tx_search` mechanism, and relying on
// CometBFT indexing capabilities.
// Doesn't work with `Query::from(EventType::Tx).and_eq()`
let query = Query::eq("eth.hash", hex::encode(tx_hash.as_bytes()));
match self
.tm()
.tx_search(query, false, 1, 1, Order::Ascending)
.await
{
Ok(res) => Ok(res.txs.into_iter().next()),
Err(e) => error(ExitCode::USR_UNSPECIFIED, e),
}
}
/// Send a message by the system actor to an EVM actor for a read-only query.
///
/// If the actor doesn't exist then the FVM will create a placeholder actor,
/// which will not respond to any queries. In that case `None` is returned.
pub async fn read_evm_actor<T>(
&self,
address: et::H160,
method: evm::Method,
params: RawBytes,
height: FvmQueryHeight,
) -> JsonRpcResult<Option<T>>
where
T: DeserializeOwned,
{
let method_num = method as u64;
// We send off a read-only query to an EVM actor at the given address.
let message = Message {
version: Default::default(),
from: system::SYSTEM_ACTOR_ADDR,
to: to_fvm_address(address),
sequence: 0,
value: TokenAmount::from_atto(0),
method_num,
params,
gas_limit: fvm_shared::BLOCK_GAS_LIMIT,
gas_fee_cap: TokenAmount::from_atto(0),
gas_premium: TokenAmount::from_atto(0),
};
let result = self
.client
.call(message, height)
.await
.context("failed to call contract")?;
if result.value.code.is_err() {
return match ExitCode::new(result.value.code.value()) {
ExitCode::USR_UNHANDLED_MESSAGE => {
// If the account is an ETHACCOUNT then it doesn't handle certain methods like `GetCode`.
// Let's make it work the same way as a PLACEHOLDER and return nothing.
Ok(None)
}
other => error(other, result.value.info),
};
}
tracing::debug!(addr = ?address, method_num, data = hex::encode(&result.value.data), "evm actor response");
let data = fendermint_rpc::response::decode_bytes(&result.value)
.context("failed to decode data as bytes")?;
if data.is_empty() {
Ok(None)
} else {
let data: T =
fvm_ipld_encoding::from_slice(&data).context("failed to decode as IPLD")?;
Ok(Some(data))
}
}
pub async fn get_actor_type(
&self,
address: &et::H160,
height: FvmQueryHeight,
) -> JsonRpcResult<ActorType> {
let addr = to_fvm_address(*address);
if let Some(actor_type) = self.addr_cache.get_actor_type_from_addr(&addr) {
tracing::debug!(
?addr,
?actor_type,
"addr cache hit, directly return the actor type"
);
return Ok(actor_type);
}
let Some((
_,
ActorState {
code: actor_type_cid,
..
},
)) = self.client.actor_state(&addr, height).await?.value
else {
return Ok(ActorType::Inexistent);
};
if let Some(actor_type) = self.addr_cache.get_actor_type_from_cid(&actor_type_cid) {
tracing::debug!(
?actor_type_cid,
?actor_type,
"cid cache hit, directly return the actor type"
);
tracing::debug!(?addr, ?actor_type, "put result into addr cache");
self.addr_cache
.set_actor_type_for_addr(addr, actor_type.clone());
return Ok(actor_type);
}
let registry = self.client.builtin_actors(height).await?.value.registry;
let ret = match registry.into_iter().find(|(_, cid)| cid == &actor_type_cid) {
Some((typ, _)) => ActorType::Known(Cow::Owned(typ)),
None => ActorType::Unknown(actor_type_cid),
};
tracing::debug!(?actor_type_cid, ?ret, "put result into cid cache");
self.addr_cache
.set_actor_type_for_cid(actor_type_cid, ret.clone());
tracing::debug!(?addr, ?ret, "put result into addr cache");
self.addr_cache.set_actor_type_for_addr(addr, ret.clone());
Ok(ret)
}
}
impl<C> JsonRpcState<C>
where
C: Client + SubscriptionClient + Clone + Sync + Send + 'static,
{
/// Create a new filter with the next available ID and insert it into the filters collection.
async fn insert_filter_driver(
&self,
kind: FilterKind,
ws_sender: Option<WebSocketSender>,
) -> (FilterDriver, Sender<FilterCommand>) {
let mut filters = self.filters.write().await;
// Choose an unpredictable filter, so it's not so easy to clear out someone else's logs.
let mut id: et::U256;
loop {
id = FilterId::from(rand::thread_rng().gen::<u64>());
if !filters.contains_key(&id) {
break;
}
}
let (driver, tx) = FilterDriver::new(id, self.filter_timeout, kind, ws_sender);
// Inserting happens here, while removal will be handled by the `FilterState` itself.
filters.insert(id, tx.clone());
(driver, tx)
}
/// Create a new filter driver, subscribe with Tendermint and start handlers in the background.
async fn new_filter_driver(
&self,
kind: FilterKind,
ws_sender: Option<WebSocketSender>,
) -> anyhow::Result<FilterId> {
let queries = kind.to_queries();
let mut subs = Vec::new();
for query in queries {
let sub: Subscription = self
.tm()
.subscribe(query)
.await
.context("failed to subscribe to query")?;
subs.push(sub);
}
let (state, tx) = self.insert_filter_driver(kind, ws_sender).await;
let id = state.id();
let filters = self.filters.clone();
let client = self.client.clone();
tokio::spawn(async move { state.run(filters, client).await });
for sub in subs {
let tx = tx.clone();
tokio::spawn(async move { run_subscription(id, sub, tx).await });
}
Ok(id)
}
/// Create a new filter, subscribe with Tendermint and start handlers in the background.
pub async fn new_filter(&self, kind: FilterKind) -> anyhow::Result<FilterId> {
self.new_filter_driver(kind, None).await
}
/// Create a new subscription, subscribe with Tendermint and start handlers in the background.
pub async fn new_subscription(
&self,
kind: FilterKind,
ws_sender: WebSocketSender,
) -> anyhow::Result<FilterId> {
self.new_filter_driver(kind, Some(ws_sender)).await
}
}
impl<C> JsonRpcState<C> {
pub async fn uninstall_filter(&self, filter_id: FilterId) -> anyhow::Result<bool> {
let filters = self.filters.read().await;
if let Some(tx) = filters.get(&filter_id) {
// Signal to the background tasks that they can unsubscribe.
tx.send(FilterCommand::Uninstall)
.await
.map_err(|e| anyhow!("failed to send command: {e}"))?;
Ok(true)
} else {
Ok(false)
}
}
/// Take the currently accumulated changes.
pub async fn take_filter_changes(
&self,
filter_id: FilterId,
) -> anyhow::Result<Option<FilterRecords<BlockHash>>> {
let filters = self.filters.read().await;
match filters.get(&filter_id) {
None => Ok(None),
Some(tx) => {
let (tx_res, rx_res) = tokio::sync::oneshot::channel();
tx.send(FilterCommand::Take(tx_res))
.await
.map_err(|e| anyhow!("failed to send command: {e}"))?;
rx_res.await.context("failed to receive response")?
}
}
}
}
pub async fn enrich_block<C>(
client: &FendermintClient<C>,
block: &tendermint::Block,
) -> JsonRpcResult<et::Block<et::Transaction>>
where
C: Client + Sync + Send,
{
let height = block.header().height;
let state_params = client
.state_params(FvmQueryHeight::Height(height.value()))
.await?;
let base_fee = state_params.value.base_fee;
let chain_id = ChainID::from(state_params.value.chain_id);
let block_results: block_results::Response = client.underlying().block_results(height).await?;
let block = to_eth_block(block, block_results, base_fee, chain_id)
.context("failed to convert to eth block")?;
Ok(block)
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/eth/api/src/client.rs | fendermint/eth/api/src/client.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::{pin::Pin, time::Duration};
use anyhow::Context;
use async_trait::async_trait;
use fendermint_rpc::client::{http_client, ws_client};
use futures::Future;
use tendermint_rpc::{
error::ErrorDetail, query::Query, Client, Error, HttpClient, SimpleRequest, Subscription,
SubscriptionClient, Url, WebSocketClient, WebSocketClientDriver, WebSocketClientUrl,
};
/// A mixed HTTP and WebSocket client. Uses HTTP to perform all
/// the JSON-RPC requests except the ones which require subscription,
/// which go through a WebSocket client.
///
/// The WebSocket client is expected to lose connection with CometBFT,
/// in which case it will be re-established in the background.
///
/// Existing subscriptions should receive an error and they can try
/// re-subscribing through the Ethereum API facade, which should create
/// new subscriptions through a fresh CometBFT client.
#[derive(Clone)]
pub struct HybridClient {
http_client: HttpClient,
cmd_tx: tokio::sync::mpsc::UnboundedSender<DriverCommand>,
}
pub struct HybridClientDriver {
ws_url: WebSocketClientUrl,
retry_delay: Duration,
cmd_rx: tokio::sync::mpsc::UnboundedReceiver<DriverCommand>,
}
enum DriverCommand {
Subscribe(
Query,
tokio::sync::oneshot::Sender<Result<Subscription, Error>>,
),
Unsubscribe(Query, tokio::sync::oneshot::Sender<Result<(), Error>>),
Close,
}
impl HybridClient {
pub fn new(
http_url: Url,
ws_url: WebSocketClientUrl,
retry_delay: Duration,
) -> anyhow::Result<(Self, HybridClientDriver)> {
let http_client =
http_client(http_url, None).context("failed to create Tendermint client")?;
let (cmd_tx, cmd_rx) = tokio::sync::mpsc::unbounded_channel();
let client = Self {
http_client,
cmd_tx,
};
let driver = HybridClientDriver {
ws_url,
retry_delay,
cmd_rx,
};
Ok((client, driver))
}
}
#[async_trait]
impl Client for HybridClient {
async fn perform<R>(&self, request: R) -> Result<R::Output, Error>
where
R: SimpleRequest,
{
self.http_client.perform(request).await
}
}
#[async_trait]
impl SubscriptionClient for HybridClient {
async fn subscribe(&self, query: Query) -> Result<Subscription, Error> {
let (tx, rx) = tokio::sync::oneshot::channel();
self.cmd_tx
.send(DriverCommand::Subscribe(query, tx))
.map_err(|_| Error::channel_send())?;
rx.await
.map_err(|e| Error::client_internal(e.to_string()))?
}
async fn unsubscribe(&self, query: Query) -> Result<(), Error> {
let (tx, rx) = tokio::sync::oneshot::channel();
self.cmd_tx
.send(DriverCommand::Unsubscribe(query, tx))
.map_err(|_| Error::channel_send())?;
rx.await
.map_err(|e| Error::client_internal(e.to_string()))?
}
fn close(self) -> Result<(), Error> {
self.cmd_tx
.send(DriverCommand::Close)
.map_err(|_| Error::channel_send())
}
}
impl HybridClientDriver {
pub async fn run(mut self) {
let mut client = self.ws_client().await;
while let Some(cmd) = self.cmd_rx.recv().await {
match cmd {
DriverCommand::Subscribe(query, tx) => {
client = self
.send_loop(client, tx, |client| {
let query = query.clone();
Box::pin(async move { client.subscribe(query.clone()).await })
})
.await;
}
DriverCommand::Unsubscribe(query, tx) => {
client = self
.send_loop(client, tx, |client| {
let query = query.clone();
Box::pin(async move { client.unsubscribe(query.clone()).await })
})
.await;
}
DriverCommand::Close => {
break;
}
}
}
let _ = client.close();
}
/// Try to send something to the socket. If it fails, reconnect and send again.
async fn send_loop<F, T>(
&self,
mut client: WebSocketClient,
tx: tokio::sync::oneshot::Sender<Result<T, Error>>,
f: F,
) -> WebSocketClient
where
F: Fn(WebSocketClient) -> Pin<Box<dyn Future<Output = Result<T, Error>> + Send>>,
{
loop {
match f(client.clone()).await {
Err(e) if matches!(e.detail(), ErrorDetail::ChannelSend(_)) => {
client = self.ws_client().await;
}
res => {
let _ = tx.send(res);
return client;
}
}
}
}
/// Connect to the WebSocket and start the driver, returning the client.
async fn ws_client(&self) -> WebSocketClient {
let (client, driver) = self.ws_connect().await;
tokio::spawn(async move { driver.run().await });
client
}
/// Try connecting repeatedly until it succeeds.
async fn ws_connect(&self) -> (WebSocketClient, WebSocketClientDriver) {
let url: Url = self.ws_url.clone().into();
loop {
match ws_client(url.clone()).await {
Ok(cd) => {
return cd;
}
Err(e) => {
tracing::warn!(
error = e.to_string(),
url = url.to_string(),
"failed to connect to Tendermint WebSocket; retrying in {}s...",
self.retry_delay.as_secs()
);
tokio::time::sleep(self.retry_delay).await;
}
}
}
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/eth/api/src/error.rs | fendermint/eth/api/src/error.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use ethers_contract::{ContractRevert, EthError};
use fendermint_vm_actor_interface::ipc::subnet::SubnetActorErrors;
use fvm_shared::error::ExitCode;
use lazy_static::lazy_static;
use regex::Regex;
use serde::Serialize;
use crate::state::Nonce;
#[derive(Debug, Clone)]
pub struct JsonRpcError {
pub code: i64,
pub message: String,
pub data: Option<serde_json::Value>,
}
impl From<anyhow::Error> for JsonRpcError {
fn from(value: anyhow::Error) -> Self {
Self {
code: 0,
message: format!("{:#}", value),
data: None,
}
}
}
impl From<tendermint_rpc::Error> for JsonRpcError {
fn from(value: tendermint_rpc::Error) -> Self {
Self {
code: 0,
message: format!("Tendermint RPC error: {value}"),
data: None,
}
}
}
impl From<JsonRpcError> for jsonrpc_v2::Error {
fn from(value: JsonRpcError) -> Self {
Self::Full {
code: value.code,
message: value.message,
data: value.data.map(|d| {
let d: Box<dyn erased_serde::Serialize + Send> = Box::new(d);
d
}),
}
}
}
pub fn error<T>(exit_code: ExitCode, msg: impl ToString) -> Result<T, JsonRpcError> {
Err(JsonRpcError {
code: exit_code.value().into(),
message: msg.to_string(),
data: None,
})
}
pub fn error_with_data<T, E: Serialize>(
exit_code: ExitCode,
msg: impl ToString,
data: Option<E>,
) -> Result<T, JsonRpcError> {
let data = data.map(|data| match serde_json::to_value(data) {
Ok(v) => v,
Err(e) => serde_json::Value::String(format!("failed to serialize error data: {e}")),
});
Err(JsonRpcError {
code: exit_code.value().into(),
message: msg.to_string(),
data,
})
}
/// Try to parse the data returned from the EVM as a revert string and append it to the message,
/// so we have a bit more human readable feedback than just hexadecimal strings with the selector
/// we can see in for example [here](https://github.com/gakonst/ethers-rs/commit/860100535812cbfe5e3cc417872392a6d76a159c).
///
/// The goal is that if Solidity has something like `require(x > 0, "X must be positive")` then we see the message in the JSON-RPC response.
pub fn error_with_revert<T>(
exit_code: ExitCode,
msg: impl ToString,
data: Option<impl AsRef<[u8]>>,
) -> Result<T, JsonRpcError> {
let msg = msg.to_string();
let (msg, data) = match data {
None => (msg, None),
Some(data) => {
// Try the simplest case of just a string, even though it's covered by the `SubnetActorErrors` as well.
// Then see if it's an error that one of our known IPC actor facets are producing.
let revert = if let Some(revert) = String::decode_with_selector(data.as_ref()) {
Some(revert)
} else {
SubnetActorErrors::decode_with_selector(data.as_ref()).map(|e| e.to_string())
};
(
revert.map(|rev| format!("{msg}\n{rev}")).unwrap_or(msg),
Some(hex::encode(data)),
)
}
};
error_with_data(exit_code, msg, data)
}
impl std::fmt::Display for JsonRpcError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{} (code: {})", self.message, self.code)
}
}
impl std::error::Error for JsonRpcError {}
/// Error representing out-of-order transaction submission.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct OutOfSequence {
pub expected: Nonce,
pub got: Nonce,
}
impl OutOfSequence {
/// Check whether the nonce gap is small enough that we can add this transaction to the buffer.
pub fn is_admissible(&self, max_nonce_gap: Nonce) -> bool {
self.got >= self.expected && self.got - self.expected <= max_nonce_gap
}
/// Check if the error indicates an out-of-order submission with the nonce expectation in the message.
pub fn try_parse(code: ExitCode, msg: &str) -> Option<Self> {
if code != ExitCode::SYS_SENDER_STATE_INVALID {
return None;
}
lazy_static! {
static ref OOS_RE: Regex =
Regex::new(r"expected sequence (\d+), got (\d+)").expect("regex parses");
}
if let Some((e, g)) = OOS_RE
.captures_iter(msg)
.map(|c| c.extract())
.map(|(_, [e, g])| (e, g))
.filter_map(|(e, g)| {
let e = e.parse().ok()?;
let g = g.parse().ok()?;
Some((e, g))
})
.next()
{
return Some(Self {
expected: e,
got: g,
});
}
None
}
}
#[cfg(test)]
mod tests {
use crate::error::OutOfSequence;
use fvm_shared::error::ExitCode;
#[test]
fn test_out_of_sequence_parse() {
assert_eq!(
OutOfSequence::try_parse(
ExitCode::SYS_SENDER_STATE_INVALID,
"... expected sequence 0, got 4 ..."
),
Some(OutOfSequence {
expected: 0,
got: 4
})
);
}
#[test]
fn test_out_of_sequence_admissible() {
let examples10 = [
(0, 4, true),
(0, 10, true),
(0, 11, false),
(1, 11, true),
(11, 1, false),
(10, 5, false),
(10, 10, true),
];
for (expected, got, admissible) in examples10 {
assert_eq!(
OutOfSequence { expected, got }.is_admissible(10),
admissible
)
}
let examples0 = [(0, 0, true), (10, 10, true), (0, 1, false), (1, 0, false)];
for (expected, got, admissible) in examples0 {
assert_eq!(OutOfSequence { expected, got }.is_admissible(0), admissible)
}
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/eth/api/src/mpool.rs | fendermint/eth/api/src/mpool.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! Utilities related to caching and buffering Ethereum transactions.
use std::{collections::BTreeMap, time::Duration};
use ethers_core::types as et;
use fendermint_rpc::{
client::TendermintClient, message::SignedMessageFactory, FendermintClient, QueryClient,
};
use fendermint_vm_message::{chain::ChainMessage, query::FvmQueryHeight, signed::DomainHash};
use futures::StreamExt;
use fvm_shared::{address::Address, chainid::ChainID};
use tendermint::Block;
use tendermint_rpc::{
event::EventData,
query::{EventType, Query},
Client, SubscriptionClient,
};
use crate::{cache::Cache, state::Nonce, HybridClient};
const RETRY_SLEEP_SECS: u64 = 5;
/// Cache submitted transactions by their Ethereum hash, because the CometBFT
/// API would not be able to find them until they are delivered to the application
/// and indexed by their domain hash, which some tools interpret as the transaction
/// being dropped from the mempool.
pub type TransactionCache = Cache<et::TxHash, et::Transaction>;
/// Buffer out-of-order messages until they can be sent to the chain.
#[derive(Clone)]
pub struct TransactionBuffer(pub Cache<Address, BTreeMap<Nonce, ChainMessage>>);
impl TransactionBuffer {
/// Insert a transaction we could not submit straight away into the buffer.
pub fn insert(&self, sender: Address, nonce: Nonce, msg: ChainMessage) {
self.0.with(|c| {
let buffer = c.entry(sender).or_insert_with(BTreeMap::new);
// Overwrite any previous entry to protect against DoS attack; it wouldn't make sense to submit them anyway.
buffer.insert(nonce, msg);
})
}
/// Remove all (sender, nonce) pairs which were included in a block.
fn remove_many<'a, I>(&self, txs: I)
where
I: Iterator<Item = (&'a Address, Nonce)>,
{
self.0.with(|c| {
for (sender, nonce) in txs {
if let Some(buffer) = c.get_mut(sender) {
buffer.remove(&nonce);
}
}
})
}
/// Gather any messages that have been enabled by transactions added to a block.
///
/// These are removed from the cache, submission is only attempted once.
fn remove_unblocked<'a, I>(&self, txs: I) -> Vec<(Address, Nonce, ChainMessage)>
where
I: Iterator<Item = (&'a Address, Nonce)>,
{
self.0.with(|c| {
let mut msgs = Vec::new();
for (sender, mut nonce) in txs {
if let Some(buffer) = c.get_mut(sender) {
nonce += 1;
while let Some(msg) = buffer.remove(&nonce) {
msgs.push((*sender, nonce, msg));
nonce += 1;
}
}
}
msgs
})
}
}
/// Subscribe to `NewBlock` notifications and clear transactions from the caches.`
pub fn start_tx_cache_clearing(
client: FendermintClient<HybridClient>,
tx_cache: TransactionCache,
tx_buffer: TransactionBuffer,
) {
tokio::task::spawn(async move {
let chain_id = get_chain_id(&client).await;
tx_cache_clearing_loop(client.into_underlying(), chain_id, tx_cache, tx_buffer).await;
});
}
/// Subscribe to notifications about new blocks and
/// 1) remove all included transactions from the caches
/// 2) broadcast buffered out-of-order transactions when they are unblocked
///
/// Re-subscribe in the event of a subscription failure.
async fn tx_cache_clearing_loop<C>(
client: C,
chain_id: ChainID,
tx_cache: TransactionCache,
tx_buffer: TransactionBuffer,
) where
C: Client + SubscriptionClient + Send + Sync,
{
loop {
let query = Query::from(EventType::NewBlock);
match client.subscribe(query).await {
Err(e) => {
tracing::warn!(error=?e, "failed to subscribe to NewBlocks; retrying later...");
tokio::time::sleep(Duration::from_secs(RETRY_SLEEP_SECS)).await;
}
Ok(mut subscription) => {
while let Some(result) = subscription.next().await {
match result {
Err(e) => {
tracing::warn!(error=?e, "NewBlocks subscription failed; resubscribing...");
break;
}
Ok(event) => {
if let EventData::NewBlock {
block: Some(block), ..
} = event.data
{
let txs = collect_txs(&block, &chain_id);
if txs.is_empty() {
continue;
}
let tx_hashes = txs.iter().map(|(h, _, _)| h);
let tx_nonces = || txs.iter().map(|(_, s, n)| (s, *n));
tx_cache.remove_many(tx_hashes);
// First remove all transactions which have been in the block (could be multiple from the same sender).
tx_buffer.remove_many(tx_nonces());
// Then collect whatever is unblocked on top of those, ie. anything that hasn't been included, but now can.
let unblocked_msgs = tx_buffer.remove_unblocked(tx_nonces());
// Send them all with best-effort.
send_msgs(&client, unblocked_msgs).await;
}
}
}
}
}
}
}
}
/// Collect the identifiers of the transactions in the block.
fn collect_txs(block: &Block, chain_id: &ChainID) -> Vec<(et::TxHash, Address, Nonce)> {
let mut txs = Vec::new();
for tx in &block.data {
if let Ok(ChainMessage::Signed(msg)) = fvm_ipld_encoding::from_slice(tx) {
if let Ok(Some(DomainHash::Eth(h))) = msg.domain_hash(chain_id) {
txs.push((et::TxHash::from(h), msg.message.from, msg.message.sequence))
}
}
}
txs
}
/// Fetch the chain ID from the API; do it in a loop until it succeeds.
async fn get_chain_id(client: &FendermintClient<HybridClient>) -> ChainID {
loop {
match client.state_params(FvmQueryHeight::default()).await {
Ok(sp) => {
return ChainID::from(sp.value.chain_id);
}
Err(e) => {
tracing::warn!(error=?e, "failed to get chain ID; retrying later...");
tokio::time::sleep(Duration::from_secs(RETRY_SLEEP_SECS)).await;
}
}
}
}
/// Best effort attempt to broadcast previously out-of-order transactions which have been unblocked.
async fn send_msgs<C>(client: &C, msgs: Vec<(Address, Nonce, ChainMessage)>)
where
C: Client + Send + Sync,
{
for (sender, nonce, msg) in msgs {
let Ok(bz) = SignedMessageFactory::serialize(&msg) else {
continue;
};
// Use the broadcast version which waits for basic checks to complete.
match client.broadcast_tx_sync(bz).await {
Ok(_) => {
tracing::info!(
sender = sender.to_string(),
nonce,
"submitted out-of-order transaction"
);
}
Err(e) => {
tracing::error!(error=?e, sender = sender.to_string(), nonce, "failed to submit out-of-order transaction");
}
}
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/eth/api/src/cache.rs | fendermint/eth/api/src/cache.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::{
sync::{Arc, Mutex},
time::Duration,
};
use anyhow::Context;
use cid::Cid;
use lru_time_cache::LruCache;
use tendermint_rpc::Client;
use fvm_shared::{
address::{Address, Payload},
ActorID,
};
use fendermint_rpc::client::FendermintClient;
use fendermint_rpc::query::QueryClient;
use fendermint_vm_message::query::FvmQueryHeight;
use crate::state::ActorType;
// The `LruCache` is wrapped in `Mutex` beause even reading requires mutation.
#[derive(Clone)]
pub struct Cache<K, V> {
cache: Arc<Mutex<LruCache<K, V>>>,
}
impl<K, V> Cache<K, V>
where
K: Ord + Clone,
V: Clone,
{
pub fn new(capacity: usize) -> Self {
Self {
cache: Arc::new(Mutex::new(LruCache::with_capacity(capacity))),
}
}
pub fn new_with_ttl(capacity: usize, ttl: Duration) -> Self {
Self {
cache: Arc::new(Mutex::new(LruCache::with_expiry_duration_and_capacity(
ttl, capacity,
))),
}
}
pub fn insert(&self, key: K, value: V) {
self.with(|c| c.insert(key, value));
}
pub fn get(&self, key: &K) -> Option<V> {
self.with(|c| c.get(key).cloned())
}
pub fn remove(&self, key: &K) {
self.with(|c| c.remove(key));
}
pub fn remove_many<'a, I>(&self, keys: I)
where
I: Iterator<Item = &'a K>,
K: 'a,
{
self.with(|c| {
for key in keys {
c.remove(key);
}
})
}
pub fn with<F, T>(&self, f: F) -> T
where
F: FnOnce(&mut LruCache<K, V>) -> T,
{
let mut guard = self.cache.lock().expect("cache poisoned");
f(&mut guard)
}
}
/// Facilitate Ethereum address <-> Actor ID lookups.
#[derive(Clone)]
pub struct AddressCache<C> {
client: FendermintClient<C>,
addr_to_id: Cache<Address, ActorID>,
id_to_addr: Cache<ActorID, Address>,
addr_to_actor_type: Cache<Address, ActorType>,
cid_to_actor_type: Cache<Cid, ActorType>,
}
impl<C> AddressCache<C>
where
C: Client + Sync + Send,
{
pub fn new(client: FendermintClient<C>, capacity: usize) -> Self {
Self {
client,
addr_to_id: Cache::new(capacity),
id_to_addr: Cache::new(capacity),
addr_to_actor_type: Cache::new(capacity),
cid_to_actor_type: Cache::new(capacity),
}
}
pub async fn lookup_id(&self, addr: &Address) -> anyhow::Result<Option<ActorID>> {
if let Ok(id) = addr.id() {
return Ok(Some(id));
}
if let Some(id) = self.get_id(addr) {
return Ok(Some(id));
}
// Using committed height because pending could change.
let res = self
.client
.actor_state(addr, FvmQueryHeight::Committed)
.await
.context("failed to lookup actor state")?;
if let Some((id, _)) = res.value {
self.set_id(*addr, id);
if let Payload::Delegated(_) = addr.payload() {
self.set_addr(id, *addr)
}
return Ok(Some(id));
}
tracing::info!(
addr = addr.to_string(),
height = res.height.value(),
"actor not found"
);
Ok(None)
}
/// Look up the delegated address of an ID, if any.
pub async fn lookup_addr(&self, id: &ActorID) -> anyhow::Result<Option<Address>> {
if let Some(addr) = self.get_addr(id) {
return Ok(Some(addr));
}
let res = self
.client
.actor_state(&Address::new_id(*id), FvmQueryHeight::Committed)
.await
.context("failed to lookup actor state")?;
if let Some((_, actor_state)) = res.value {
if let Some(addr) = actor_state.delegated_address {
self.set_addr(*id, addr);
self.set_id(addr, *id);
return Ok(Some(addr));
}
}
tracing::info!(id, height = res.height.value(), "actor not found");
Ok(None)
}
fn get_id(&self, addr: &Address) -> Option<ActorID> {
self.addr_to_id.get(addr)
}
fn set_id(&self, addr: Address, id: ActorID) {
self.addr_to_id.insert(addr, id)
}
fn get_addr(&self, id: &ActorID) -> Option<Address> {
self.id_to_addr.get(id)
}
fn set_addr(&self, id: ActorID, addr: Address) {
self.id_to_addr.insert(id, addr)
}
pub fn set_actor_type_for_addr(&self, addr: Address, actor_type: ActorType) {
self.addr_to_actor_type.insert(addr, actor_type)
}
pub fn get_actor_type_from_addr(&self, addr: &Address) -> Option<ActorType> {
self.addr_to_actor_type.get(addr)
}
pub fn set_actor_type_for_cid(&self, cid: Cid, actor_type: ActorType) {
self.cid_to_actor_type.insert(cid, actor_type);
}
pub fn get_actor_type_from_cid(&self, cid: &Cid) -> Option<ActorType> {
self.cid_to_actor_type.get(cid)
}
}
#[cfg(test)]
mod tests {
use crate::cache::AddressCache;
use crate::state::ActorType;
use cid::Cid;
use fendermint_rpc::FendermintClient;
use fvm_shared::address::Address;
use std::str::FromStr;
use tendermint_rpc::MockClient;
#[test]
fn test_read_and_write_addr_to_actor_type() {
let client = FendermintClient::new(
MockClient::new(tendermint_rpc::MockRequestMethodMatcher::default()).0,
);
let addr_cache = AddressCache::new(client, 1000);
let address1 = Address::from_str("f410fivboj67m6ut4j6xx3lhc426io22r7l3h6yha5bi").unwrap();
let address2 = Address::from_str("f410fmpohbjcmznke3e7pbxomsbg5uae6o2sfjurchxa").unwrap();
let address3 = Address::from_str("f410fxbfwpcrgbjg2ab6fevpoi4qlcfosw2vk5kzo5ga").unwrap();
let address4 = Address::from_str("f410fggjevhgketpz6gw6ordusynlgcd5piyug4aomuq").unwrap();
addr_cache.set_actor_type_for_addr(address1, ActorType::EVM);
addr_cache.set_actor_type_for_addr(address2, ActorType::Unknown(Cid::default()));
addr_cache.set_actor_type_for_addr(address3, ActorType::Inexistent);
assert_eq!(
addr_cache.get_actor_type_from_addr(&address1).unwrap(),
ActorType::EVM
);
assert_eq!(
addr_cache.get_actor_type_from_addr(&address2).unwrap(),
ActorType::Unknown(Cid::default())
);
assert_eq!(
addr_cache.get_actor_type_from_addr(&address3).unwrap(),
ActorType::Inexistent
);
assert_eq!(addr_cache.get_actor_type_from_addr(&address4), None);
}
#[test]
fn test_read_and_write_cid_to_actor_type() {
let client = FendermintClient::new(
MockClient::new(tendermint_rpc::MockRequestMethodMatcher::default()).0,
);
let addr_cache = AddressCache::new(client, 1000);
let cid1 = Cid::from_str("bafk2bzacecmnyfiwb52tkbwmm2dsd7ysi3nvuxl3lmspy7pl26wxj4zj7w4wi")
.unwrap();
let cid2 = Cid::from_str("bafy2bzaceas2zajrutdp7ugb6w2lpmow3z3klr3gzqimxtuz22tkkqallfch4")
.unwrap();
let cid3 = Cid::from_str("k51qzi5uqu5dlvj2baxnqndepeb86cbk3ng7n3i46uzyxzyqj2xjonzllnv0v8")
.unwrap();
let cid4 =
Cid::from_str("bafybeiemxf5abjwjbikoz4mc3a3dla6ual3jsgpdr4cjr3oz3evfyavhwq").unwrap();
addr_cache.set_actor_type_for_cid(cid1, ActorType::EVM);
addr_cache.set_actor_type_for_cid(cid2, ActorType::Unknown(Cid::default()));
addr_cache.set_actor_type_for_cid(cid3, ActorType::Inexistent);
assert_eq!(
addr_cache.get_actor_type_from_cid(&cid1).unwrap(),
ActorType::EVM
);
assert_eq!(
addr_cache.get_actor_type_from_cid(&cid2).unwrap(),
ActorType::Unknown(Cid::default())
);
assert_eq!(
addr_cache.get_actor_type_from_cid(&cid3).unwrap(),
ActorType::Inexistent
);
assert_eq!(addr_cache.get_actor_type_from_cid(&cid4), None);
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/eth/api/src/conv/from_fvm.rs | fendermint/eth/api/src/conv/from_fvm.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! Helper methods to convert between FVM and Ethereum data formats.
pub use fendermint_vm_message::conv::from_fvm::*;
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/eth/api/src/conv/from_eth.rs | fendermint/eth/api/src/conv/from_eth.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! Helper methods to convert between Ethereum and FVM data formats.
use ethers_core::types as et;
use ethers_core::types::transaction::eip2718::TypedTransaction;
pub use fendermint_vm_message::conv::from_eth::*;
use fvm_shared::{error::ExitCode, message::Message};
use crate::{error, JsonRpcResult};
pub fn to_fvm_message(tx: TypedTransaction, accept_legacy: bool) -> JsonRpcResult<Message> {
match tx {
TypedTransaction::Eip1559(ref tx) => {
Ok(fendermint_vm_message::conv::from_eth::to_fvm_message(tx)?)
}
TypedTransaction::Legacy(_) if accept_legacy => {
// legacy transactions are only accepted for gas estimation purposes
// (when accept_legacy is explicitly set)
// eth_sendRawTransaction should fail for legacy transactions.
// For this purpose it os OK to not set `max_fee_per_gas` and
// `max_priority_fee_per_gas`. Legacy transactions don't include
// that information
Ok(fendermint_vm_message::conv::from_eth::to_fvm_message(
&tx.into(),
)?)
}
TypedTransaction::Legacy(_) | TypedTransaction::Eip2930(_) => error(
ExitCode::USR_ILLEGAL_ARGUMENT,
"unexpected transaction type",
),
}
}
/// Turn a request into the DTO returned by the API.
pub fn to_eth_transaction(
tx: et::Eip1559TransactionRequest,
sig: et::Signature,
hash: et::TxHash,
) -> et::Transaction {
et::Transaction {
hash,
nonce: tx.nonce.unwrap_or_default(),
block_hash: None,
block_number: None,
transaction_index: None,
from: tx.from.unwrap_or_default(),
to: tx.to.and_then(|to| to.as_address().cloned()),
value: tx.value.unwrap_or_default(),
gas: tx.gas.unwrap_or_default(),
max_fee_per_gas: tx.max_fee_per_gas,
max_priority_fee_per_gas: tx.max_priority_fee_per_gas,
// Strictly speaking a "Type 2" transaction should not need to set this, but we do because Blockscout
// has a database constraint that if a transaction is included in a block this can't be null.
gas_price: Some(
tx.max_fee_per_gas.unwrap_or_default()
+ tx.max_priority_fee_per_gas.unwrap_or_default(),
),
input: tx.data.unwrap_or_default(),
chain_id: tx.chain_id.map(|x| et::U256::from(x.as_u64())),
v: et::U64::from(sig.v),
r: sig.r,
s: sig.s,
transaction_type: Some(2u64.into()),
access_list: Some(tx.access_list),
other: Default::default(),
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/eth/api/src/conv/from_tm.rs | fendermint/eth/api/src/conv/from_tm.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! Helper methods to convert between Ethereum and Tendermint data formats.
use std::collections::HashSet;
use std::str::FromStr;
use anyhow::{anyhow, Context};
use ethers_core::types::{self as et};
use fendermint_vm_actor_interface::eam::EthAddress;
use fendermint_vm_message::conv::from_fvm::to_eth_transaction_request;
use fendermint_vm_message::{chain::ChainMessage, signed::SignedMessage};
use fvm_shared::address::Address;
use fvm_shared::bigint::Zero;
use fvm_shared::chainid::ChainID;
use fvm_shared::{bigint::BigInt, econ::TokenAmount};
use lazy_static::lazy_static;
use tendermint::abci::types::ExecTxResult;
use tendermint::abci::{self, Event, EventAttribute};
use tendermint::crypto::sha256::Sha256;
use tendermint_rpc::endpoint;
use super::from_eth;
use super::from_fvm::{to_eth_address, to_eth_signature, to_eth_tokens};
// Values taken from https://github.com/filecoin-project/lotus/blob/6e7dc9532abdb3171427347710df4c860f1957a2/chain/types/ethtypes/eth_types.go#L199
lazy_static! {
static ref EMPTY_ETH_HASH: et::H256 = et::H256::default();
static ref EMPTY_ETH_NONCE: et::H64 = et::H64::default();
// Keccak-256 of an RLP of an empty array
static ref EMPTY_UNCLE_HASH: et::H256 = et::H256::from_slice(
hex::decode("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347")
.unwrap()
.as_ref(),
);
// Keccak-256 hash of the RLP of null
static ref EMPTY_ROOT_HASH: et::H256 = et::H256::from_slice(
hex::decode("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
.unwrap()
.as_ref(),
);
static ref EMPTY_ETH_BLOOM: [u8; 2048/8] = [0u8; 2048/8];
static ref FULL_ETH_BLOOM: [u8; 2048/8] = [0xff; 2048/8];
static ref MAX_U256: BigInt = BigInt::from_str(&et::U256::MAX.to_string()).unwrap();
pub static ref BLOCK_ZERO: tendermint::Block = block_zero();
pub static ref BLOCK_ZERO_HASH: [u8; 32] = BLOCK_ZERO.header().hash().as_bytes().try_into().unwrap();
}
/// A pretend block at height 0 for some tools like The Graph which go there.
fn block_zero() -> tendermint::Block {
let commit = tendermint::block::Commit {
height: tendermint::block::Height::try_from(0u64).unwrap(),
round: tendermint::block::Round::try_from(0).unwrap(),
block_id: tendermint::block::Id {
hash: tendermint::Hash::None,
part_set_header: tendermint::block::parts::Header::new(0, tendermint::Hash::None)
.unwrap(),
},
signatures: Vec::new(),
};
let empty_cid = fendermint_vm_message::cid(&[0u8; 0]).unwrap();
let header = tendermint::block::Header {
version: tendermint::block::header::Version { block: 0, app: 0 },
chain_id: tendermint::chain::Id::try_from("UNSPECIFIED").expect("invalid chainid"),
height: tendermint::block::Height::try_from(0u64).unwrap(),
time: tendermint::time::Time::unix_epoch(),
last_block_id: None,
last_commit_hash: None,
data_hash: None,
validators_hash: tendermint::Hash::None,
next_validators_hash: tendermint::Hash::None,
consensus_hash: tendermint::Hash::None,
app_hash: tendermint::AppHash::try_from(empty_cid.to_bytes()).unwrap(),
last_results_hash: None,
evidence_hash: None,
proposer_address: tendermint::account::Id::new([0u8; 20]),
};
tendermint::Block::new(
header,
Vec::new(),
// tendermint::evidence::Data::default(),
tendermint::evidence::List::default(),
Some(commit),
)
.unwrap()
}
pub fn is_block_zero(block: &tendermint::Block) -> bool {
block.header().hash() == tendermint::Hash::Sha256(*BLOCK_ZERO_HASH)
}
/// Convert a Tendermint block to Ethereum with only the block hashes in the body.
pub fn to_eth_block(
block: &tendermint::Block,
block_results: tendermint_rpc::endpoint::block_results::Response,
base_fee: TokenAmount,
chain_id: ChainID,
) -> anyhow::Result<et::Block<et::Transaction>> {
// Based on https://github.com/evmos/ethermint/blob/07cf2bd2b1ce9bdb2e44ec42a39e7239292a14af/rpc/types/utils.go#L113
// https://github.com/evmos/ethermint/blob/07cf2bd2b1ce9bdb2e44ec42a39e7239292a14af/rpc/backend/blocks.go#L365
// https://github.com/filecoin-project/lotus/blob/6cc506f5cf751215be6badc94a960251c6453202/node/impl/full/eth.go#L1883
let hash = et::H256::from_slice(block.header().hash().as_ref());
let parent_hash = if block.header.height.value() == 1 {
// Just in case the client tool wants to compare hashes.
et::H256::from_slice(BLOCK_ZERO_HASH.as_ref())
} else {
block
.header()
.last_block_id
.map(|id| et::H256::from_slice(id.hash.as_bytes()))
.unwrap_or_default()
};
let transactions_root = if block.data.is_empty() {
*EMPTY_ROOT_HASH
} else {
block
.header()
.data_hash
.map(|h| et::H256::from_slice(h.as_bytes()))
.unwrap_or(*EMPTY_ROOT_HASH)
};
// Tendermint's account hash luckily has the same length as Eth.
let author = et::H160::from_slice(block.header().proposer_address.as_bytes());
let transaction_results = block_results.txs_results.unwrap_or_default();
let mut transactions = Vec::new();
let mut size = et::U256::zero();
let mut gas_limit = et::U256::zero();
let mut gas_used = et::U256::zero();
// I'm just going to skip all the future message types here, which are CID based.
// To deal with them, we'd have to send IPLD requests via ABCI to resolve them,
// potentially through multiple hops. Let's leave that for the future and for now
// assume that all we have is signed transactions.
for (idx, data) in block.data().iter().enumerate() {
let result = match transaction_results.get(idx) {
Some(result) => result,
None => continue,
};
size += et::U256::from(data.len());
gas_used += et::U256::from(result.gas_used);
gas_limit += et::U256::from(result.gas_wanted);
let msg = to_chain_message(data)?;
if let ChainMessage::Signed(msg) = msg {
let hash = msg_hash(&result.events, data);
let mut tx = to_eth_transaction(msg, chain_id, hash)
.context("failed to convert to eth transaction")?;
tx.transaction_index = Some(et::U64::from(idx));
tx.block_hash = Some(et::H256::from_slice(block.header.hash().as_bytes()));
tx.block_number = Some(et::U64::from(block.header.height.value()));
transactions.push(tx);
}
}
let block = et::Block {
hash: Some(hash),
parent_hash,
number: Some(et::U64::from(block.header().height.value())),
timestamp: et::U256::from(block.header().time.unix_timestamp()),
author: Some(author),
state_root: app_hash_to_root(&block.header().app_hash)?,
transactions_root,
base_fee_per_gas: Some(to_eth_tokens(&base_fee)?),
difficulty: et::U256::zero(),
total_difficulty: Some(et::U256::zero()),
nonce: Some(*EMPTY_ETH_NONCE),
mix_hash: Some(*EMPTY_ETH_HASH),
uncles: Vec::new(),
uncles_hash: *EMPTY_UNCLE_HASH,
receipts_root: *EMPTY_ROOT_HASH,
extra_data: et::Bytes::default(),
logs_bloom: Some(et::Bloom::from_slice(&*EMPTY_ETH_BLOOM)),
withdrawals_root: None,
withdrawals: None,
seal_fields: Vec::new(),
other: Default::default(),
transactions,
size: Some(size),
gas_limit,
gas_used,
blob_gas_used: None,
excess_blob_gas: None,
parent_beacon_block_root: None,
};
Ok(block)
}
pub fn to_eth_transaction(
msg: SignedMessage,
chain_id: ChainID,
hash: et::TxHash,
) -> anyhow::Result<et::Transaction> {
// Based on https://github.com/filecoin-project/lotus/blob/6cc506f5cf751215be6badc94a960251c6453202/node/impl/full/eth.go#L2048
let sig =
to_eth_signature(msg.signature(), true).context("failed to convert to eth signature")?;
// Recover the original request; this method has better tests.
let tx = to_eth_transaction_request(&msg.message, &chain_id)
.context("failed to convert to tx request")?;
let tx = from_eth::to_eth_transaction(tx, sig, hash);
Ok(tx)
}
/// Helper function to produce cumulative gas used after the execution of each transaction in a block,
/// along with cumulative event log count.
pub fn to_cumulative(block_results: &endpoint::block_results::Response) -> Vec<(et::U256, usize)> {
let mut records = Vec::new();
let mut cumulative_gas_used = et::U256::zero();
let mut cumulative_event_count = 0usize;
if let Some(rs) = block_results.txs_results.as_ref() {
for r in rs {
cumulative_gas_used += et::U256::from(r.gas_used);
cumulative_event_count += r.events.len();
records.push((cumulative_gas_used, cumulative_event_count));
}
}
records
}
// https://github.com/filecoin-project/lotus/blob/6cc506f5cf751215be6badc94a960251c6453202/node/impl/full/eth.go#L2174
// https://github.com/evmos/ethermint/blob/07cf2bd2b1ce9bdb2e44ec42a39e7239292a14af/rpc/backend/tx_info.go#L147
pub async fn to_eth_receipt(
msg: &SignedMessage,
result: &endpoint::tx::Response,
cumulative: &[(et::U256, usize)],
header: &tendermint::block::Header,
base_fee: &TokenAmount,
) -> anyhow::Result<et::TransactionReceipt> {
let block_hash = et::H256::from_slice(header.hash().as_bytes());
let block_number = et::U64::from(result.height.value());
let transaction_index = et::U64::from(result.index);
let transaction_hash = msg_hash(&result.tx_result.events, &result.tx);
let msg = &msg.message;
// Lotus effective gas price is based on total spend divided by gas used,
// for which it recalculates the gas outputs. However, we don't have access
// to the VM interpreter here to restore those results, and they are discarded
// from the [`ApplyRet`] during the conversion to [`DeliverTx`].
// We could put it into the [`DeliverTx::info`] field, or we can calculate
// something based on the gas fields of the transaction, like Ethermint.
let effective_gas_price =
crate::gas::effective_gas_price(msg, base_fee, result.tx_result.gas_used);
// Sum up gas up to this transaction.
let (cumulative_gas_used, cumulative_event_count) = cumulative
.get(result.index as usize)
.cloned()
.unwrap_or_default();
let log_index_start = cumulative_event_count.saturating_sub(result.tx_result.events.len());
let logs = to_logs(
&result.tx_result.events,
block_hash,
block_number,
transaction_hash,
transaction_index,
log_index_start,
)
.context("failed to collect logs")?;
// See if the return value is an Ethereum contract creation.
// https://github.com/filecoin-project/lotus/blob/6cc506f5cf751215be6badc94a960251c6453202/node/impl/full/eth.go#LL2240C9-L2240C15
let contract_address = if result.tx_result.code.is_err() {
None
} else {
maybe_contract_address(&result.tx_result).map(|ca| et::H160::from_slice(&ca.0))
};
let receipt = et::TransactionReceipt {
transaction_hash,
transaction_index,
block_hash: Some(block_hash),
block_number: Some(block_number),
from: to_eth_address(&msg.from).ok().flatten().unwrap_or_default(),
to: to_eth_address(&msg.to).ok().flatten(),
cumulative_gas_used,
gas_used: Some(et::U256::from(result.tx_result.gas_used)),
contract_address,
logs,
status: Some(et::U64::from(if result.tx_result.code.is_ok() {
1
} else {
0
})),
root: Some(app_hash_to_root(&header.app_hash)?),
logs_bloom: et::Bloom::from_slice(&*EMPTY_ETH_BLOOM),
transaction_type: Some(et::U64::from(2)), // Value used by Lotus.
effective_gas_price: Some(to_eth_tokens(&effective_gas_price)?),
other: Default::default(),
};
Ok(receipt)
}
/// Change the type of transactions in a block by mapping a function over them.
pub fn map_rpc_block_txs<F, A, B, E>(block: et::Block<A>, f: F) -> Result<et::Block<B>, E>
where
F: Fn(A) -> Result<B, E>,
{
let et::Block {
hash,
parent_hash,
uncles_hash,
author,
state_root,
transactions_root,
receipts_root,
number,
gas_used,
gas_limit,
extra_data,
logs_bloom,
timestamp,
difficulty,
total_difficulty,
seal_fields,
uncles,
transactions,
size,
mix_hash,
nonce,
base_fee_per_gas,
withdrawals_root,
withdrawals,
other,
blob_gas_used,
excess_blob_gas,
parent_beacon_block_root,
} = block;
let transactions: Result<Vec<B>, E> = transactions.into_iter().map(f).collect();
let transactions = transactions?;
let block = et::Block {
hash,
parent_hash,
uncles_hash,
author,
state_root,
transactions_root,
receipts_root,
number,
gas_used,
gas_limit,
extra_data,
logs_bloom,
timestamp,
difficulty,
total_difficulty,
seal_fields,
uncles,
size,
mix_hash,
nonce,
base_fee_per_gas,
withdrawals_root,
withdrawals,
transactions,
other,
blob_gas_used,
excess_blob_gas,
parent_beacon_block_root,
};
Ok(block)
}
fn app_hash_to_root(app_hash: &tendermint::AppHash) -> anyhow::Result<et::H256> {
// Out app hash is a CID. We only need the hash part.
// Actually it's not the state root of the actors, but it's still a CID.
let state_root = cid::Cid::try_from(app_hash.as_bytes()).context("app hash is not a CID")?;
// Just in case we returned `Cid::default()`
if state_root.hash().digest().is_empty() {
Ok(et::H256::default())
} else {
Ok(et::H256::from_slice(state_root.hash().digest()))
}
}
fn maybe_contract_address(deliver_tx: &ExecTxResult) -> Option<EthAddress> {
fendermint_rpc::response::decode_fevm_create(deliver_tx)
.ok()
.map(|cr| {
// We can return either `cr.actor_id` as a masked address,
// or `cr.eth_address`. Both addresses are usable for calling the contract.
// However, the masked ID doesn't work with some of the Ethereum tooling which check some hash properties.
// We also have to make sure to use the same kind of address that we do in the filtering and event logs,
// otherwise the two doesn't align and it makes the API difficult to use. It's impossible(?) to find out
// the actor ID just using the Ethereum API, so best use the same.
// EthAddress::from_id(cr.actor_id)
cr.eth_address
})
}
/// Artificial block-zero.
pub fn to_eth_block_zero(block: tendermint::Block) -> anyhow::Result<et::Block<serde_json::Value>> {
let block_results = tendermint_rpc::endpoint::block_results::Response {
height: block.header.height,
txs_results: None,
begin_block_events: None,
end_block_events: None,
validator_updates: Vec::new(),
consensus_param_updates: None,
finalize_block_events: Vec::new(),
app_hash: Default::default(),
};
let block = to_eth_block(&block, block_results, TokenAmount::zero(), ChainID::from(0))
.context("failed to map block zero to eth")?;
let block =
map_rpc_block_txs(block, serde_json::to_value).context("failed to convert to JSON")?;
Ok(block)
}
/// Turn Events into Ethereum logs.
///
/// We need to turn Actor IDs into Ethereum addresses because that's what the tooling expects.
pub fn to_logs(
events: &[abci::Event],
block_hash: et::H256,
block_number: et::U64,
transaction_hash: et::H256,
transaction_index: et::U64,
log_index_start: usize,
) -> anyhow::Result<Vec<et::Log>> {
let mut logs = Vec::new();
for (idx, event) in events.iter().filter(|e| e.kind == "event").enumerate() {
// Lotus looks up an Ethereum address based on the actor ID:
// https://github.com/filecoin-project/lotus/blob/6cc506f5cf751215be6badc94a960251c6453202/node/impl/full/eth.go#L1987
let addr = event
.attributes
.iter()
.find(|a| matches!(a.key_str(), Ok(key_str) if key_str == "emitter.deleg"))
.and_then(|a| a.value_str().ok())
.and_then(|a| a.parse::<Address>().ok());
let actor_id = event
.attributes
.iter()
.find(|a| matches!(a.key_str(), Ok(key_str) if key_str == "emitter.id"))
.and_then(|a| a.value_str().ok())
.and_then(|a| a.parse::<u64>().ok())
.ok_or_else(|| anyhow!("cannot find the 'emitter.id' key"))?;
let address = addr
.and_then(|a| to_eth_address(&a).ok())
.flatten()
.unwrap_or_else(|| et::H160::from(EthAddress::from_id(actor_id).0));
// https://github.com/filecoin-project/lotus/blob/6cc506f5cf751215be6badc94a960251c6453202/node/impl/full/eth.go#LL2240C9-L2240C15
let (topics, data) =
to_topics_and_data(&event.attributes).context("failed to collect topics and data")?;
// Blockscout doesn't recognise the `logType` field since https://github.com/blockscout/blockscout/pull/9007
let log_type = None; // Some(event.kind.clone()),
let log = et::Log {
address,
topics,
data,
block_hash: Some(block_hash),
block_number: Some(block_number),
transaction_hash: Some(transaction_hash),
transaction_index: Some(transaction_index),
log_index: Some(et::U256::from(idx + log_index_start)),
transaction_log_index: Some(et::U256::from(idx)),
log_type,
removed: Some(false),
};
logs.push(log);
}
Ok(logs)
}
// Find the Ethereum topics (up to 4) and the data in the event attributes.
fn to_topics_and_data(attrs: &Vec<EventAttribute>) -> anyhow::Result<(Vec<et::H256>, et::Bytes)> {
// Based on https://github.com/filecoin-project/lotus/blob/6cc506f5cf751215be6badc94a960251c6453202/node/impl/full/eth.go#L1534
let mut topics = Vec::new();
let mut data = None;
for attr in attrs {
let decode_value = || {
hex::decode(attr.value_str()?).with_context(|| {
format!(
"failed to decode attr value as hex: {}",
&attr.value_str().unwrap()
)
})
};
match attr.key_str()? {
"t1" | "t2" | "t3" | "t4" => {
let bz = decode_value()?;
if bz.len() != 32 {
return Err(anyhow!("unexpected topic value: {attr:?}"));
}
let h = et::H256::from_slice(&bz);
let i = attr.key_str()?[1..]
.parse::<usize>()
.unwrap()
.saturating_sub(1);
while topics.len() <= i {
topics.push(et::H256::default())
}
topics[i] = h;
}
"d" => data = Some(et::Bytes::from(decode_value()?)),
_ => {} // e.g. "emitter.*"
}
}
Ok((topics, data.unwrap_or_default()))
}
/// Decode the transaction payload as a [ChainMessage].
pub fn to_chain_message(tx: &[u8]) -> anyhow::Result<ChainMessage> {
fvm_ipld_encoding::from_slice::<ChainMessage>(tx).context("failed to decode tx as ChainMessage")
}
/// Hash the transaction payload the way Tendermint does,
/// to calculate the transaction hash which is otherwise unavailable.
///
/// This is here for reference only and should not be returned to Ethereum tools which expect
/// the hash to be based on RLP and Keccak256.
pub fn tx_hash(tx: &[u8]) -> tendermint::Hash {
// based on how `tendermint::Header::hash` works.
let hash = tendermint::crypto::default::Sha256::digest(tx);
tendermint::Hash::Sha256(hash)
}
/// Best effort to find and parse any `<kind>.hash` attribute emitted among the events.
pub fn find_hash_event(kind: &str, events: &[abci::Event]) -> Option<et::H256> {
events
.iter()
.find(|e| e.kind == kind)
.and_then(|e| {
e.attributes
.iter()
.find(|a| matches!(a.key_str(), Ok(key_str) if key_str == "hash"))
})
.and_then(|a| hex::decode(a.value_str().unwrap()).ok())
.filter(|bz| bz.len() == 32)
.map(|bz| et::H256::from_slice(&bz))
}
// Calculate some kind of hash for the message, preferrably one the tools expect.
pub fn msg_hash(events: &[Event], tx: &[u8]) -> et::TxHash {
if let Some(h) = find_hash_event("eth", events) {
h
} else {
// Return the default hash, at least there is something
et::TxHash::from_slice(tx_hash(tx).as_bytes())
}
}
/// Collect and parse all `emitter.deleg` or `emitter.id` in the events.
pub fn collect_emitters(events: &[abci::Event]) -> HashSet<Address> {
let mut emitters = HashSet::new();
for event in events.iter().filter(|e| e.kind == "event") {
for addr in [
event
.attributes
.iter()
.find(|a| matches!(a.key_str(), Ok(key_str) if key_str == "emitter.deleg"))
.and_then(|a| a.value_str().unwrap().parse::<Address>().ok()),
event
.attributes
.iter()
.find(|a| matches!(a.key_str(), Ok(key_str) if key_str == "emitter.id"))
.and_then(|a| a.value_str().unwrap().parse::<u64>().ok())
.map(Address::new_id),
]
.into_iter()
.flatten()
{
emitters.insert(addr);
}
}
emitters
}
#[cfg(test)]
mod tests {
use crate::conv::from_tm::is_block_zero;
use super::{to_eth_block_zero, BLOCK_ZERO};
#[test]
fn block_zero_can_be_created() {
assert!(is_block_zero(&BLOCK_ZERO))
}
#[test]
fn block_zero_can_be_turned_into_eth() {
let _ = to_eth_block_zero(BLOCK_ZERO.clone()).unwrap();
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/eth/api/src/conv/mod.rs | fendermint/eth/api/src/conv/mod.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
pub mod from_eth;
pub mod from_fvm;
pub mod from_tm;
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/eth/api/src/handlers/http.rs | fendermint/eth/api/src/handlers/http.rs | // Copyright 2022-2024 Protocol Labs
// Copyright 2019-2022 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
// Based on https://github.com/ChainSafe/forest/blob/v0.8.2/node/rpc/src/rpc_http_handler.rs
use axum::http::{HeaderMap, StatusCode};
use axum::response::IntoResponse;
use jsonrpc_v2::{RequestObject, ResponseObjects};
use serde::Deserialize;
use crate::{apis, AppState};
type ResponseHeaders = [(&'static str, &'static str); 1];
const RESPONSE_HEADERS: ResponseHeaders = [("content-type", "application/json-rpc;charset=utf-8")];
/// The Ethereum API implementations accept `{}` or `[{}, {}, ...]` as requests,
/// with the expectation of as many responses.
///
/// `jsonrpc_v2` has a type named `RequestKind` but it's not `Deserialize`.
#[derive(Deserialize)]
#[serde(untagged)]
pub enum RequestKind {
One(RequestObject),
Many(Vec<RequestObject>),
}
/// Handle JSON-RPC calls.
pub async fn handle(
_headers: HeaderMap,
axum::extract::State(state): axum::extract::State<AppState>,
axum::Json(request): axum::Json<RequestKind>,
) -> impl IntoResponse {
// NOTE: Any authorization can come here.
let response = match request {
RequestKind::One(request) => {
if let Err(response) = check_request(&request) {
return response;
}
state.rpc_server.handle(request).await
}
RequestKind::Many(requests) => {
for request in requests.iter() {
if let Err(response) = check_request(request) {
return response;
}
}
state.rpc_server.handle(requests).await
}
};
debug_response(&response);
json_response(&response)
}
fn debug_response(response: &ResponseObjects) {
let debug = |r| {
tracing::debug!(
response = serde_json::to_string(r).unwrap_or_else(|e| e.to_string()),
"RPC response"
);
};
match response {
ResponseObjects::Empty => {}
ResponseObjects::One(r) => {
debug(r);
}
ResponseObjects::Many(rs) => {
for r in rs {
debug(r);
}
}
}
}
fn json_response(response: &ResponseObjects) -> (StatusCode, ResponseHeaders, std::string::String) {
match serde_json::to_string(response) {
Ok(json) => (StatusCode::OK, RESPONSE_HEADERS, json),
Err(err) => {
let msg = err.to_string();
tracing::error!(error = msg, "RPC to JSON failure");
(StatusCode::INTERNAL_SERVER_ERROR, RESPONSE_HEADERS, msg)
}
}
}
fn check_request(
request: &RequestObject,
) -> Result<(), (StatusCode, ResponseHeaders, std::string::String)> {
tracing::debug!(?request, "RPC request");
let method = request.method_ref().to_owned();
if apis::is_streaming_method(&method) {
Err((
StatusCode::BAD_REQUEST,
RESPONSE_HEADERS,
format!("'{method}' is only available through WebSocket"),
))
} else {
Ok(())
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/eth/api/src/handlers/mod.rs | fendermint/eth/api/src/handlers/mod.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
pub mod http;
pub mod ws;
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/eth/api/src/handlers/ws.rs | fendermint/eth/api/src/handlers/ws.rs | // Copyright 2022-2024 Protocol Labs
// Copyright 2019-2022 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
// Based on https://github.com/ChainSafe/forest/blob/v0.8.2/node/rpc/src/rpc_ws_handler.rs
use axum::{
extract::{
ws::{Message, WebSocket},
WebSocketUpgrade,
},
http::HeaderMap,
response::IntoResponse,
};
use futures::{stream::SplitSink, SinkExt, StreamExt};
use jsonrpc_v2::{RequestObject, ResponseObject, ResponseObjects, V2};
use serde_json::json;
use crate::{apis, state::WebSocketId, AppState, JsonRpcServer};
/// Mirroring [ethers_providers::rpc::transports::ws::types::Notification], which is what the library
/// expects for non-request-response payloads in [PubSubItem::deserialize].
#[derive(Debug)]
pub struct Notification {
pub subscription: ethers_core::types::U256,
pub result: serde_json::Value,
}
#[derive(Debug)]
pub struct MethodNotification {
// There is only one streaming method at the moment, but let's not hardcode it here.
pub method: String,
pub notification: Notification,
}
pub async fn handle(
_headers: HeaderMap,
axum::extract::State(state): axum::extract::State<AppState>,
ws: WebSocketUpgrade,
) -> impl IntoResponse {
ws.on_upgrade(move |socket| async { rpc_ws_handler_inner(state, socket).await })
}
/// Handle requests in a loop, interpreting each message as a JSON-RPC request.
///
/// Messages are evaluated one by one. We could spawn tasks like Forest,
/// but there should be some rate limiting applied to avoid DoS attacks.
async fn rpc_ws_handler_inner(state: AppState, socket: WebSocket) {
tracing::debug!("Accepted WS connection!");
let (mut sender, mut receiver) = socket.split();
// Create a channel over which the application can send messages to this socket.
let (notif_tx, mut notif_rx) = tokio::sync::mpsc::unbounded_channel();
let web_socket_id = state.rpc_state.add_web_socket(notif_tx).await;
loop {
let keep = tokio::select! {
Some(Ok(message)) = receiver.next() => {
handle_incoming(web_socket_id, &state.rpc_server, &mut sender, message).await
},
Some(notif) = notif_rx.recv() => {
handle_outgoing(web_socket_id, &mut sender, notif).await
},
else => break,
};
if !keep {
break;
}
}
// Clean up.
tracing::debug!(web_socket_id, "Removing WS connection");
state.rpc_state.remove_web_socket(&web_socket_id).await;
}
/// Handle an incoming request.
async fn handle_incoming(
web_socket_id: WebSocketId,
rpc_server: &JsonRpcServer,
sender: &mut SplitSink<WebSocket, Message>,
message: Message,
) -> bool {
if let Message::Text(mut request_text) = message {
if !request_text.is_empty() {
tracing::debug!(web_socket_id, request = request_text, "WS Request Received");
// We have to deserialize-add-reserialize becuase `JsonRpcRequest` can
// only be parsed with `from_str`, not `from_value`.
request_text = maybe_add_web_socket_id(request_text, web_socket_id);
match serde_json::from_str::<RequestObject>(&request_text) {
Ok(req) => {
return send_call_result(web_socket_id, rpc_server, sender, req).await;
}
Err(e) => {
deserialization_error("RequestObject", e);
}
}
}
}
true
}
fn deserialization_error(what: &str, e: serde_json::Error) {
// Not responding to the websocket because it requires valid responses, which need to have
// the `id` field present, which we'd only get if we managed to parse the request.
// Using `debug!` so someone sending junk cannot flood the log with warnings.
tracing::debug!("Error deserializing WS payload as {what}: {e}");
}
/// Try to append the websocket ID to the parameters if the method is a streaming one.
///
/// This is best effort. If fails, just let the JSON-RPC server handle the problem.
fn maybe_add_web_socket_id(request_text: String, web_socket_id: WebSocketId) -> String {
match serde_json::from_str::<serde_json::Value>(&request_text) {
Ok(mut json) => {
// If the method requires web sockets, append the ID of the socket to the parameters.
let is_streaming = match json.get("method") {
Some(serde_json::Value::String(method)) => apis::is_streaming_method(method),
_ => false,
};
if is_streaming {
match json.get_mut("params") {
Some(serde_json::Value::Array(ref mut params)) => {
params.push(serde_json::Value::Number(serde_json::Number::from(
web_socket_id,
)));
return serde_json::to_string(&json).unwrap_or(request_text);
}
_ => {
tracing::debug!("JSON-RPC streaming request has no or unexpected params")
}
}
}
}
Err(e) => {
deserialization_error("JSON", e);
}
}
request_text
}
/// Send a message from the application, result of an async subscription.
///
/// Returns `false` if the socket has been closed, otherwise `true` to keep working.
async fn handle_outgoing(
web_socket_id: WebSocketId,
sender: &mut SplitSink<WebSocket, Message>,
notif: MethodNotification,
) -> bool {
// Based on https://github.com/gakonst/ethers-rs/blob/ethers-v2.0.7/ethers-providers/src/rpc/transports/ws/types.rs#L145
let message = json! ({
"jsonrpc": V2,
"method": notif.method,
"params": {
"subscription": notif.notification.subscription,
"result": notif.notification.result
}
});
match serde_json::to_string(&message) {
Err(e) => {
tracing::error!(error=?e, "failed to serialize notification to JSON");
}
Ok(json) => {
tracing::debug!(web_socket_id, json, "sending notification to WS");
if let Err(e) = sender.send(Message::Text(json)).await {
tracing::warn!(web_socket_id, error =? e, "failed to send notfication to WS");
if is_closed_connection(e) {
return false;
}
}
}
}
true
}
/// Call the RPC method and respond through the Web Socket.
async fn send_call_result(
web_socket_id: WebSocketId,
server: &JsonRpcServer,
sender: &mut SplitSink<WebSocket, Message>,
request: RequestObject,
) -> bool {
let method = request.method_ref();
tracing::debug!("RPC WS called method: {}", method);
match server.handle(request).await {
ResponseObjects::Empty => true,
ResponseObjects::One(response) => send_response(web_socket_id, sender, response).await,
ResponseObjects::Many(responses) => {
for response in responses {
if !send_response(web_socket_id, sender, response).await {
return false;
}
}
true
}
}
}
async fn send_response(
web_socket_id: WebSocketId,
sender: &mut SplitSink<WebSocket, Message>,
response: ResponseObject,
) -> bool {
let response = serde_json::to_string(&response);
match response {
Err(e) => {
tracing::error!(error=?e, "failed to serialize response to JSON");
}
Ok(json) => {
tracing::debug!(web_socket_id, json, "sending response to WS");
if let Err(e) = sender.send(Message::Text(json)).await {
tracing::warn!(web_socket_id, error=?e, "failed to send response to WS");
if is_closed_connection(e) {
return false;
}
}
}
}
true
}
fn is_closed_connection(e: axum::Error) -> bool {
e.to_string().contains("closed connection")
}
#[cfg(test)]
mod tests {
#[test]
fn can_parse_request() {
let text = "{\"id\":0,\"jsonrpc\":\"2.0\",\"method\":\"eth_newFilter\",\"params\":[{\"topics\":[]}]}";
let _value = serde_json::from_str::<serde_json::Value>(text).expect("should parse as JSON");
// The following would fail because `V2` expects an `&str` but the `from_value` deserialized returns `String`.
// let _request = serde_json::from_value::<jsonrpc_v2::RequestObject>(value)
// .expect("should parse as JSON-RPC request");
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/eth/api/src/apis/eth.rs | fendermint/eth/api/src/apis/eth.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
// See the following for inspiration:
// * https://github.com/evmos/ethermint/blob/ebbe0ffd0d474abd745254dc01e60273ea758dae/rpc/namespaces/ethereum/eth/api.go#L44
// * https://github.com/filecoin-project/lotus/blob/v1.23.1-rc2/api/api_full.go#L783
// * https://github.com/filecoin-project/lotus/blob/v1.23.1-rc2/node/impl/full/eth.go
use std::collections::HashSet;
use anyhow::Context;
use ethers_core::types::transaction::eip2718::TypedTransaction;
use ethers_core::types::{self as et, BlockNumber};
use ethers_core::utils::rlp;
use fendermint_rpc::message::SignedMessageFactory;
use fendermint_rpc::query::QueryClient;
use fendermint_rpc::response::{decode_data, decode_fevm_invoke, decode_fevm_return_data};
use fendermint_vm_actor_interface::eam::{EthAddress, EAM_ACTOR_ADDR};
use fendermint_vm_actor_interface::evm;
use fendermint_vm_message::chain::ChainMessage;
use fendermint_vm_message::query::FvmQueryHeight;
use fendermint_vm_message::signed::SignedMessage;
use futures::FutureExt;
use fvm_ipld_encoding::RawBytes;
use fvm_shared::address::Address;
use fvm_shared::bigint::BigInt;
use fvm_shared::crypto::signature::Signature;
use fvm_shared::{chainid::ChainID, error::ExitCode};
use jsonrpc_v2::Params;
use rand::Rng;
use tendermint::block::Height;
use tendermint_rpc::endpoint::{self, status};
use tendermint_rpc::SubscriptionClient;
use tendermint_rpc::{
endpoint::{block, block_results, broadcast::tx_sync, consensus_params, header},
Client,
};
use fil_actors_evm_shared::uints;
use crate::conv::from_eth::{self, to_fvm_message};
use crate::conv::from_tm::{self, msg_hash, to_chain_message, to_cumulative, to_eth_block_zero};
use crate::error::{error_with_revert, OutOfSequence};
use crate::filters::{matches_topics, FilterId, FilterKind, FilterRecords};
use crate::{
conv::{
from_eth::to_fvm_address,
from_fvm::to_eth_tokens,
from_tm::{to_eth_receipt, to_eth_transaction},
},
error, JsonRpcData, JsonRpcResult,
};
/// Returns a list of addresses owned by client.
///
/// It will always return [] since we don't expect Fendermint to manage private keys.
pub async fn accounts<C>(_data: JsonRpcData<C>) -> JsonRpcResult<Vec<et::Address>> {
Ok(vec![])
}
/// Returns the number of most recent block.
pub async fn block_number<C>(data: JsonRpcData<C>) -> JsonRpcResult<et::U64>
where
C: Client + Sync + Send,
{
let height = data.latest_height().await?;
Ok(et::U64::from(height.value()))
}
/// Returns the chain ID used for signing replay-protected transactions.
pub async fn chain_id<C>(data: JsonRpcData<C>) -> JsonRpcResult<et::U64>
where
C: Client + Sync + Send,
{
let res = data.client.state_params(FvmQueryHeight::default()).await?;
Ok(et::U64::from(res.value.chain_id))
}
/// The current FVM network version.
pub async fn protocol_version<C>(data: JsonRpcData<C>) -> JsonRpcResult<String>
where
C: Client + Sync + Send,
{
let res = data.client.state_params(FvmQueryHeight::default()).await?;
let version: u32 = res.value.network_version.into();
Ok(version.to_string())
}
/// Returns a fee per gas that is an estimate of how much you can pay as a
/// priority fee, or 'tip', to get a transaction included in the current block.
pub async fn max_priority_fee_per_gas<C>(data: JsonRpcData<C>) -> JsonRpcResult<et::U256>
where
C: Client + Sync + Send,
{
// get the latest block
let res: block::Response = data.tm().latest_block().await?;
let latest_h = res.block.header.height;
// get consensus params to fetch block gas limit
// (this just needs to be done once as we assume that is constant
// for all blocks)
let consensus_params: consensus_params::Response = data
.tm()
.consensus_params(latest_h)
.await
.context("failed to get consensus params")?;
let mut block_gas_limit = consensus_params.consensus_params.block.max_gas;
if block_gas_limit <= 0 {
block_gas_limit =
i64::try_from(fvm_shared::BLOCK_GAS_LIMIT).expect("FVM block gas limit not i64")
};
let mut premiums = Vec::new();
// iterate through the blocks in the range
// we may be able to de-duplicate a lot of this code from fee_history
let latest_h: u64 = latest_h.into();
let mut blk = latest_h;
while blk > latest_h - data.gas_opt.num_blocks_max_prio_fee {
let block = data
.block_by_height(blk.into())
.await
.context("failed to get block")?;
let height = block.header().height;
// Genesis has height 1, but no relevant fees.
if height.value() <= 1 {
break;
}
let state_params = data
.client
.state_params(FvmQueryHeight::Height(height.value()))
.await?;
let base_fee = &state_params.value.base_fee;
// The latest block might not have results yet.
if let Ok(block_results) = data.tm().block_results(height).await {
let txs_results = block_results.txs_results.unwrap_or_default();
for (tx, txres) in block.data().iter().zip(txs_results) {
let msg = fvm_ipld_encoding::from_slice::<ChainMessage>(tx)
.context("failed to decode tx as ChainMessage")?;
if let ChainMessage::Signed(msg) = msg {
let premium = crate::gas::effective_gas_premium(&msg.message, base_fee);
premiums.push((premium, txres.gas_used));
}
}
}
blk -= 1;
}
// compute median gas price
let mut median = crate::gas::median_gas_premium(&mut premiums, block_gas_limit);
let min_premium = data.gas_opt.min_gas_premium.clone();
if median < min_premium {
median = min_premium;
}
// add some noise to normalize behaviour of message selection
// mean 1, stddev 0.005 => 95% within +-1%
const PRECISION: u32 = 32;
let mut rng = rand::thread_rng();
let noise: f64 = 1.0 + rng.gen::<f64>() * 0.005;
let precision: i64 = 32;
let coeff: u64 = ((noise * (1 << precision) as f64) as u64) + 1;
median *= BigInt::from(coeff);
median.div_ceil(BigInt::from(1 << PRECISION));
Ok(to_eth_tokens(&median)?)
}
/// Returns transaction base fee per gas and effective priority fee per gas for the requested/supported block range.
pub async fn fee_history<C>(
data: JsonRpcData<C>,
Params((block_count, last_block, reward_percentiles)): Params<(
et::U256,
et::BlockNumber,
Vec<f64>,
)>,
) -> JsonRpcResult<et::FeeHistory>
where
C: Client + Sync + Send,
{
if block_count > et::U256::from(data.gas_opt.max_fee_hist_size) {
return error(
ExitCode::USR_ILLEGAL_ARGUMENT,
"block_count must be <= 1024",
);
}
let mut hist = et::FeeHistory {
base_fee_per_gas: Vec::new(),
gas_used_ratio: Vec::new(),
oldest_block: et::U256::default(),
reward: Vec::new(),
};
let mut block_number = last_block;
let mut block_count = block_count.as_usize();
let get_base_fee = |height: Height| {
data.client
.state_params(FvmQueryHeight::Height(height.value()))
.map(|result| result.map(|state_params| state_params.value.base_fee))
};
while block_count > 0 {
let block = data
.block_by_height(block_number)
.await
.context("failed to get block")?;
let height = block.header().height;
// Apparently the base fees have to include the next fee after the newest block.
// See https://github.com/filecoin-project/lotus/blob/v1.25.2/node/impl/full/eth.go#L721-L725
if hist.base_fee_per_gas.is_empty() {
let base_fee = get_base_fee(height.increment())
.await
.context("failed to get next base fee")?;
hist.base_fee_per_gas.push(to_eth_tokens(&base_fee)?);
}
let base_fee = get_base_fee(height)
.await
.context("failed to get block base fee")?;
let consensus_params: consensus_params::Response = data
.tm()
.consensus_params(height)
.await
.context("failed to get consensus params")?;
let mut block_gas_limit = consensus_params.consensus_params.block.max_gas;
if block_gas_limit <= 0 {
block_gas_limit =
i64::try_from(fvm_shared::BLOCK_GAS_LIMIT).expect("FVM block gas limit not i64")
};
// The latest block might not have results yet.
if let Ok(block_results) = data.tm().block_results(height).await {
let txs_results = block_results.txs_results.unwrap_or_default();
let total_gas_used: i64 = txs_results.iter().map(|r| r.gas_used).sum();
let mut premiums = Vec::new();
for (tx, txres) in block.data().iter().zip(txs_results) {
let msg = fvm_ipld_encoding::from_slice::<ChainMessage>(tx)
.context("failed to decode tx as ChainMessage")?;
if let ChainMessage::Signed(msg) = msg {
let premium = crate::gas::effective_gas_premium(&msg.message, &base_fee);
premiums.push((premium, txres.gas_used));
}
}
premiums.sort();
let premium_gas_used: i64 = premiums.iter().map(|(_, gas)| *gas).sum();
let rewards: Result<Vec<et::U256>, _> = reward_percentiles
.iter()
.map(|p| {
if premiums.is_empty() {
Ok(et::U256::zero())
} else {
let threshold_gas_used = (premium_gas_used as f64 * p / 100f64) as i64;
let mut sum_gas_used = 0;
let mut idx = 0;
while sum_gas_used < threshold_gas_used && idx < premiums.len() - 1 {
sum_gas_used += premiums[idx].1;
idx += 1;
}
to_eth_tokens(&premiums[idx].0)
}
})
.collect();
hist.oldest_block = et::U256::from(height.value());
hist.base_fee_per_gas.push(to_eth_tokens(&base_fee)?);
hist.gas_used_ratio
.push(total_gas_used as f64 / block_gas_limit as f64);
hist.reward.push(rewards?);
block_count -= 1;
}
// Genesis has height 1.
if height.value() <= 1 {
break;
}
block_number = et::BlockNumber::Number(et::U64::from(height.value() - 1));
}
// Reverse data to be oldest-to-newest.
hist.base_fee_per_gas.reverse();
hist.gas_used_ratio.reverse();
hist.reward.reverse();
Ok(hist)
}
/// Returns the current price per gas in wei.
pub async fn gas_price<C>(data: JsonRpcData<C>) -> JsonRpcResult<et::U256>
where
C: Client + Sync + Send,
{
let res = data.client.state_params(FvmQueryHeight::default()).await?;
let price = to_eth_tokens(&res.value.base_fee)?;
Ok(price)
}
/// Returns the balance of the account of given address.
pub async fn get_balance<C>(
data: JsonRpcData<C>,
Params((addr, block_id)): Params<(et::Address, et::BlockId)>,
) -> JsonRpcResult<et::U256>
where
C: Client + Sync + Send,
{
let addr = to_fvm_address(addr);
let height = data.query_height(block_id).await?;
let res = data.client.actor_state(&addr, height).await?;
match res.value {
Some((_, state)) => Ok(to_eth_tokens(&state.balance)?),
None => Ok(et::U256::zero()),
}
}
/// Returns information about a block by hash.
pub async fn get_block_by_hash<C>(
data: JsonRpcData<C>,
Params((block_hash, full_tx)): Params<(et::H256, bool)>,
) -> JsonRpcResult<Option<et::Block<serde_json::Value>>>
where
C: Client + Sync + Send,
{
match data.block_by_hash_opt(block_hash).await? {
Some(block) if from_tm::is_block_zero(&block) => Ok(Some(to_eth_block_zero(block)?)),
Some(block) => data.enrich_block(block, full_tx).await.map(Some),
None => Ok(None),
}
}
/// Returns information about a block by block number.
pub async fn get_block_by_number<C>(
data: JsonRpcData<C>,
Params((block_number, full_tx)): Params<(et::BlockNumber, bool)>,
) -> JsonRpcResult<Option<et::Block<serde_json::Value>>>
where
C: Client + Sync + Send,
{
match data.block_by_height(block_number).await? {
block if block.header().height.value() > 0 => {
data.enrich_block(block, full_tx).await.map(Some)
}
block if from_tm::is_block_zero(&block) => Ok(Some(to_eth_block_zero(block)?)),
_ => Ok(None),
}
}
/// Returns the number of transactions in a block matching the given block number.
pub async fn get_block_transaction_count_by_number<C>(
data: JsonRpcData<C>,
Params((block_number,)): Params<(et::BlockNumber,)>,
) -> JsonRpcResult<et::U64>
where
C: Client + Sync + Send,
{
let block = data.block_by_height(block_number).await?;
Ok(et::U64::from(block.data.len()))
}
/// Returns the number of transactions in a block from a block matching the given block hash.
pub async fn get_block_transaction_count_by_hash<C>(
data: JsonRpcData<C>,
Params((block_hash,)): Params<(et::H256,)>,
) -> JsonRpcResult<et::U64>
where
C: Client + Sync + Send,
{
let block = data.block_by_hash_opt(block_hash).await?;
let count = block
.map(|b| et::U64::from(b.data.len()))
.unwrap_or_default();
Ok(count)
}
/// Returns the information about a transaction requested by transaction hash.
pub async fn get_transaction_by_block_hash_and_index<C>(
data: JsonRpcData<C>,
Params((block_hash, index)): Params<(et::H256, et::U64)>,
) -> JsonRpcResult<Option<et::Transaction>>
where
C: Client + Sync + Send,
{
if let Some(block) = data.block_by_hash_opt(block_hash).await? {
data.transaction_by_index(block, index).await
} else {
Ok(None)
}
}
/// Returns the information about a transaction requested by transaction hash.
pub async fn get_transaction_by_block_number_and_index<C>(
data: JsonRpcData<C>,
Params((block_number, index)): Params<(et::BlockNumber, et::U64)>,
) -> JsonRpcResult<Option<et::Transaction>>
where
C: Client + Sync + Send,
{
let block = data.block_by_height(block_number).await?;
data.transaction_by_index(block, index).await
}
/// Returns the information about a transaction requested by transaction hash.
pub async fn get_transaction_by_hash<C>(
data: JsonRpcData<C>,
Params((tx_hash,)): Params<(et::H256,)>,
) -> JsonRpcResult<Option<et::Transaction>>
where
C: Client + Sync + Send,
{
// Check in the pending cache first.
if let Some(tx) = data.tx_cache.get(&tx_hash) {
Ok(Some(tx))
} else if let Some(res) = data.tx_by_hash(tx_hash).await? {
let msg = to_chain_message(&res.tx)?;
if let ChainMessage::Signed(msg) = msg {
let header: header::Response = data.tm().header(res.height).await?;
let sp = data
.client
.state_params(FvmQueryHeight::Height(header.header.height.value()))
.await?;
let chain_id = ChainID::from(sp.value.chain_id);
let hash = msg_hash(&res.tx_result.events, &res.tx);
let mut tx = to_eth_transaction(msg, chain_id, hash)?;
tx.transaction_index = Some(et::U64::from(res.index));
tx.block_hash = Some(et::H256::from_slice(header.header.hash().as_bytes()));
tx.block_number = Some(et::U64::from(res.height.value()));
Ok(Some(tx))
} else {
error(ExitCode::USR_ILLEGAL_ARGUMENT, "incompatible transaction")
}
} else {
Ok(None)
}
}
/// Returns the number of transactions sent from an address, up to a specific block.
///
/// This is done by looking up the nonce of the account.
pub async fn get_transaction_count<C>(
data: JsonRpcData<C>,
Params((addr, block_id)): Params<(et::Address, et::BlockId)>,
) -> JsonRpcResult<et::U64>
where
C: Client + Sync + Send,
{
let addr = to_fvm_address(addr);
let height = data.query_height(block_id).await?;
let res = data.client.actor_state(&addr, height).await?;
match res.value {
Some((_, state)) => {
let nonce = state.sequence;
Ok(et::U64::from(nonce))
}
None => Ok(et::U64::zero()),
}
}
/// Returns the receipt of a transaction by transaction hash.
pub async fn get_transaction_receipt<C>(
data: JsonRpcData<C>,
Params((tx_hash,)): Params<(et::H256,)>,
) -> JsonRpcResult<Option<et::TransactionReceipt>>
where
C: Client + Sync + Send,
{
if let Some(res) = data.tx_by_hash(tx_hash).await? {
let header: header::Response = data.tm().header(res.height).await?;
let block_results: block_results::Response = data.tm().block_results(res.height).await?;
let cumulative = to_cumulative(&block_results);
let state_params = data
.client
.state_params(FvmQueryHeight::Height(header.header.height.value()))
.await?;
let msg = to_chain_message(&res.tx)?;
if let ChainMessage::Signed(msg) = msg {
let receipt = to_eth_receipt(
&msg,
&res,
&cumulative,
&header.header,
&state_params.value.base_fee,
)
.await
.context("failed to convert to receipt")?;
Ok(Some(receipt))
} else {
error(ExitCode::USR_ILLEGAL_ARGUMENT, "incompatible transaction")
}
} else {
Ok(None)
}
}
/// Returns receipts for all the transactions in a block.
pub async fn get_block_receipts<C>(
data: JsonRpcData<C>,
Params((block_number,)): Params<(et::BlockNumber,)>,
) -> JsonRpcResult<Vec<et::TransactionReceipt>>
where
C: Client + Sync + Send,
{
let block = data.block_by_height(block_number).await?;
if from_tm::is_block_zero(&block) {
return Ok(Vec::new());
}
let height = block.header.height;
let state_params = data
.client
.state_params(FvmQueryHeight::Height(height.value()))
.await?;
let block_results: block_results::Response = data.tm().block_results(height).await?;
let cumulative = to_cumulative(&block_results);
let mut receipts = Vec::new();
for (index, (tx, tx_result)) in block
.data
.into_iter()
.zip(block_results.txs_results.unwrap_or_default())
.enumerate()
{
let msg = to_chain_message(&tx)?;
if let ChainMessage::Signed(msg) = msg {
let result = endpoint::tx::Response {
hash: Default::default(), // Shouldn't use this anyway.
height,
index: index as u32,
tx_result,
tx,
proof: None,
};
let receipt = to_eth_receipt(
&msg,
&result,
&cumulative,
&block.header,
&state_params.value.base_fee,
)
.await?;
receipts.push(receipt)
}
}
Ok(receipts)
}
/// Returns the number of uncles in a block from a block matching the given block hash.
///
/// It will always return 0 since Tendermint doesn't have uncles.
pub async fn get_uncle_count_by_block_hash<C>(
_data: JsonRpcData<C>,
_params: Params<(et::H256,)>,
) -> JsonRpcResult<et::U256> {
Ok(et::U256::zero())
}
/// Returns the number of uncles in a block from a block matching the given block number.
///
/// It will always return 0 since Tendermint doesn't have uncles.
pub async fn get_uncle_count_by_block_number<C>(
_data: JsonRpcData<C>,
_params: Params<(et::BlockNumber,)>,
) -> JsonRpcResult<et::U256> {
Ok(et::U256::zero())
}
/// Returns information about a uncle of a block by hash and uncle index position.
///
/// It will always return None since Tendermint doesn't have uncles.
pub async fn get_uncle_by_block_hash_and_index<C>(
_data: JsonRpcData<C>,
_params: Params<(et::H256, et::U64)>,
) -> JsonRpcResult<Option<et::Block<et::H256>>> {
Ok(None)
}
/// Returns information about a uncle of a block by number and uncle index position.
///
/// It will always return None since Tendermint doesn't have uncles.
pub async fn get_uncle_by_block_number_and_index<C>(
_data: JsonRpcData<C>,
_params: Params<(et::BlockNumber, et::U64)>,
) -> JsonRpcResult<Option<et::Block<et::H256>>> {
Ok(None)
}
/// Creates new message call transaction or a contract creation for signed transactions.
pub async fn send_raw_transaction<C>(
data: JsonRpcData<C>,
Params((tx,)): Params<(et::Bytes,)>,
) -> JsonRpcResult<et::TxHash>
where
C: Client + Sync + Send,
{
let rlp = rlp::Rlp::new(tx.as_ref());
let (tx, sig): (TypedTransaction, et::Signature) = TypedTransaction::decode_signed(&rlp)
.context("failed to decode RLP as signed TypedTransaction")?;
let sighash = tx.sighash();
let msghash = et::TxHash::from(ethers_core::utils::keccak256(rlp.as_raw()));
tracing::debug!(?sighash, eth_hash = ?msghash, ?tx, "received raw transaction");
if let Some(tx) = tx.as_eip1559_ref() {
let tx = from_eth::to_eth_transaction(tx.clone(), sig, msghash);
data.tx_cache.insert(msghash, tx);
}
let msg = to_fvm_message(tx, false)?;
let sender = msg.from;
let nonce = msg.sequence;
let msg = SignedMessage {
message: msg,
signature: Signature::new_secp256k1(sig.to_vec()),
};
let msg = ChainMessage::Signed(msg);
let bz: Vec<u8> = SignedMessageFactory::serialize(&msg)?;
// Use the broadcast version which waits for basic checks to complete,
// but not the execution results - those will have to be polled with get_transaction_receipt.
let res: tx_sync::Response = data.tm().broadcast_tx_sync(bz).await?;
if res.code.is_ok() {
// The following hash would be okay for ethers-rs,and we could use it to look up the TX with Tendermint,
// but ethers.js would reject it because it doesn't match what Ethereum would use.
// Ok(et::TxHash::from_slice(res.hash.as_bytes()))
Ok(msghash)
} else {
// Try to decode any errors returned in the data.
let bz = RawBytes::from(res.data.to_vec());
// Might have to first call `decode_fevm_data` here in case CometBFT
// wraps the data into Base64 encoding like it does for `DeliverTx`.
let bz = decode_fevm_return_data(bz)
.or_else(|_| decode_data(&res.data).and_then(decode_fevm_return_data))
.ok();
let exit_code = ExitCode::new(res.code.value());
// NOTE: We could have checked up front if we have buffered transactions already waiting,
// in which case this have just been appended to the list.
if let Some(oos) = OutOfSequence::try_parse(exit_code, &res.log) {
let is_admissible = oos.is_admissible(data.max_nonce_gap);
tracing::debug!(eth_hash = ?msghash, expected = oos.expected, got = oos.got, is_admissible, "out-of-sequence transaction received");
if is_admissible {
data.tx_buffer.insert(sender, nonce, msg);
return Ok(msghash);
}
}
error_with_revert(exit_code, res.log, bz)
}
}
/// Executes a new message call immediately without creating a transaction on the block chain.
pub async fn call<C>(
data: JsonRpcData<C>,
Params((tx, block_id)): Params<(TypedTransactionCompat, et::BlockId)>,
) -> JsonRpcResult<et::Bytes>
where
C: Client + Sync + Send,
{
let msg = to_fvm_message(tx.into(), true)?;
let is_create = msg.to == EAM_ACTOR_ADDR;
let height = data.query_height(block_id).await?;
let response = data.client.call(msg, height).await?;
let deliver_tx = response.value;
// Based on Lotus, we should return the data from the receipt.
if deliver_tx.code.is_err() {
// There might be some revert data encoded as ABI in the response.
let (msg, data) = match decode_fevm_invoke(&deliver_tx) {
Ok(h) => (deliver_tx.info, Some(h)),
Err(e) => (
format!("{}\nfailed to decode return data: {:#}", deliver_tx.info, e),
None,
),
};
error_with_revert(ExitCode::new(deliver_tx.code.value()), msg, data)
} else if is_create {
// It's not clear why some tools like Remix call this with deployment transaction, but they do.
// We could parse the deployed contract address, but it would be of very limited use;
// the call effect isn't persisted, so one would have to send an actual transaction
// and then run a call on `pending` state with this address to have a chance to hit
// that contract before the transaction is included in a block, assuming address
// creation is deterministic.
// Lotus returns empty: https://github.com/filecoin-project/lotus/blob/v1.23.1-rc2/node/impl/full/eth.go#L1091-L1094
Ok(Default::default())
} else {
let return_data = decode_fevm_invoke(&deliver_tx)
.context("error decoding data from deliver_tx in query")?;
Ok(return_data.into())
}
}
/// Generates and returns an estimate of how much gas is necessary to allow the transaction to complete.
/// The transaction will not be added to the blockchain.
/// Note that the estimate may be significantly more than the amount of gas actually used by the transaction, f
/// or a variety of reasons including EVM mechanics and node performance.
pub async fn estimate_gas<C>(
data: JsonRpcData<C>,
Params(params): Params<EstimateGasParams>,
) -> JsonRpcResult<et::U256>
where
C: Client + Sync + Send,
{
let (tx, block_id) = match params {
EstimateGasParams::One((tx,)) => (tx, et::BlockId::Number(et::BlockNumber::Latest)),
EstimateGasParams::Two((tx, block_id)) => (tx, block_id),
};
let msg = to_fvm_message(tx.into(), true).context("failed to convert to FVM message")?;
let height = data
.query_height(block_id)
.await
.context("failed to get height")?;
let response = data
.client
.estimate_gas(msg, height)
.await
.context("failed to call estimate gas query")?;
let estimate = response.value;
if !estimate.exit_code.is_success() {
// There might be some revert data encoded as ABI in the response.
let msg = format!("failed to estimate gas: {}", estimate.info);
let (msg, data) = match decode_fevm_return_data(estimate.return_data) {
Ok(h) => (msg, Some(h)),
Err(e) => (format!("{msg}\n{e:#}"), None),
};
error_with_revert(estimate.exit_code, msg, data)
} else {
Ok(estimate.gas_limit.into())
}
}
/// Returns the value from a storage position at a given address.
///
/// The return value is a hex encoded U256.
pub async fn get_storage_at<C>(
data: JsonRpcData<C>,
Params((address, position, block_id)): Params<(et::H160, et::U256, et::BlockId)>,
) -> JsonRpcResult<String>
where
C: Client + Sync + Send,
{
let encode = |data: Option<uints::U256>| {
let mut bz = [0u8; 32];
if let Some(data) = data {
data.to_big_endian(&mut bz);
}
// The client library expects hex encoded string. The JS client might want a prefix too.
Ok(format!("0x{}", hex::encode(bz)))
};
let height = data.query_height(block_id).await?;
// If not an EVM actor, return empty.
if data.get_actor_type(&address, height).await? != ActorType::EVM {
// The client library expects hex encoded string.
return encode(None);
}
let params = evm::GetStorageAtParams {
storage_key: {
let mut bz = [0u8; 32];
position.to_big_endian(&mut bz);
evm::uints::U256::from_big_endian(&bz)
},
};
let params = RawBytes::serialize(params).context("failed to serialize position to IPLD")?;
let ret = data
.read_evm_actor::<evm::GetStorageAtReturn>(
address,
evm::Method::GetStorageAt,
params,
height,
)
.await?;
if let Some(ret) = ret {
// ret.storage.to_big_endian(&mut bz);
return encode(Some(ret.storage));
}
encode(None)
}
/// Returns code at a given address.
pub async fn get_code<C>(
data: JsonRpcData<C>,
Params((address, block_id)): Params<(et::H160, et::BlockId)>,
) -> JsonRpcResult<et::Bytes>
where
C: Client + Sync + Send,
{
let height = data.query_height(block_id).await?;
// Return empty if not an EVM actor.
if data.get_actor_type(&address, height).await? != ActorType::EVM {
return Ok(Default::default());
}
// This method has no input parameters.
let params = RawBytes::default();
let ret = data
.read_evm_actor::<evm::BytecodeReturn>(address, evm::Method::GetBytecode, params, height)
.await?;
match ret.and_then(|r| r.code) {
None => Ok(et::Bytes::default()),
Some(cid) => {
let code = data
.client
.ipld(&cid, height)
.await
.context("failed to fetch bytecode")?;
Ok(code.map(et::Bytes::from).unwrap_or_default())
}
}
}
/// Returns an object with data about the sync status or false.
pub async fn syncing<C>(data: JsonRpcData<C>) -> JsonRpcResult<et::SyncingStatus>
where
C: Client + Sync + Send,
{
let status: status::Response = data.tm().status().await.context("failed to fetch status")?;
let info = status.sync_info;
let status = if !info.catching_up {
et::SyncingStatus::IsFalse
} else {
let progress = et::SyncProgress {
// This would be the block we executed.
current_block: et::U64::from(info.latest_block_height.value()),
// This would be the block we know about but haven't got to yet.
highest_block: et::U64::from(info.latest_block_height.value()),
// This would be the block we started syncing from.
starting_block: Default::default(),
pulled_states: None,
known_states: None,
healed_bytecode_bytes: None,
healed_bytecodes: None,
healed_trienode_bytes: None,
healed_trienodes: None,
healing_bytecode: None,
healing_trienodes: None,
synced_account_bytes: None,
synced_accounts: None,
synced_bytecode_bytes: None,
synced_bytecodes: None,
synced_storage: None,
synced_storage_bytes: None,
};
et::SyncingStatus::IsSyncing(Box::new(progress))
};
Ok(status)
}
/// Returns an array of all logs matching a given filter object.
pub async fn get_logs<C>(
data: JsonRpcData<C>,
Params((filter,)): Params<(et::Filter,)>,
) -> JsonRpcResult<Vec<et::Log>>
where
C: Client + Sync + Send,
{
let (from_height, to_height) = match filter.block_option {
et::FilterBlockOption::Range {
from_block,
to_block,
} => {
// Turn block number into a height.
async fn resolve_height<C: Client + Send + Sync>(
data: &JsonRpcData<C>,
bn: BlockNumber,
) -> JsonRpcResult<Height> {
match bn {
BlockNumber::Number(n) => {
Ok(Height::try_from(n.as_u64()).context("invalid height")?)
}
other => {
let h = data.header_by_height(other).await?;
Ok(h.height)
}
}
}
let from_block = from_block.unwrap_or_default();
let mut to_block = to_block.unwrap_or_default();
// Automatically restrict the end to the highest available block to allow queries by fixed ranges.
// This is only applied ot the end, not the start, so if `from > to` then we return nothing.
if let BlockNumber::Number(n) = to_block {
let latest_height = data.latest_height().await?;
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | true |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/eth/api/src/apis/web3.rs | fendermint/eth/api/src/apis/web3.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use anyhow::Context;
use cid::multihash::MultihashDigest;
use jsonrpc_v2::Params;
use tendermint::abci;
use tendermint_rpc::Client;
use crate::{JsonRpcData, JsonRpcResult};
/// Returns the current client version.
pub async fn client_version<C>(data: JsonRpcData<C>) -> JsonRpcResult<String>
where
C: Client + Sync + Send,
{
let res: abci::response::Info = data
.tm()
.abci_info()
.await
.context("failed to fetch info")?;
let version = format!("{}/{}/{}", res.data, res.version, res.app_version);
Ok(version)
}
/// Returns Keccak-256 (not the standardized SHA3-256) of the given data.
///
/// Expects the data as hex encoded string and returns it as such.
pub async fn sha3<C>(
_data: JsonRpcData<C>,
Params((input,)): Params<(String,)>,
) -> JsonRpcResult<String>
where
C: Client + Sync + Send,
{
let input = input.strip_prefix("0x").unwrap_or(&input);
let input = hex::decode(input).context("failed to decode input as hex")?;
let output = cid::multihash::Code::Keccak256.digest(&input);
let output = hex::encode(output.digest());
Ok(output)
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/eth/api/src/apis/mod.rs | fendermint/eth/api/src/apis/mod.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
// See https://ethereum.org/en/developers/docs/apis/json-rpc/#json-rpc-methods
// and https://ethereum.github.io/execution-apis/api-documentation/
use crate::HybridClient;
use jsonrpc_v2::{MapRouter, ServerBuilder};
use paste::paste;
mod eth;
mod net;
mod web3;
macro_rules! with_methods {
($server:ident, $module:ident, { $($method:ident),* }) => {
paste!{
$server
$(.with_method(
stringify!([< $module _ $method >]),
$module :: [< $method:snake >] ::<HybridClient>
))*
}
};
}
pub fn register_methods(server: ServerBuilder<MapRouter>) -> ServerBuilder<MapRouter> {
// This is the list of eth methods. Apart from these Lotus implements 1 method from web3,
// while Ethermint does more across web3, debug, miner, net, txpool, and personal.
// The unimplemented ones are commented out, to make it easier to see where we're at.
let server = with_methods!(server, eth, {
accounts,
blockNumber,
call,
chainId,
// eth_coinbase
// eth_compileLLL
// eth_compileSerpent
// eth_compileSolidity
estimateGas,
feeHistory,
maxPriorityFeePerGas,
gasPrice,
getBalance,
getBlockByHash,
getBlockByNumber,
getBlockTransactionCountByHash,
getBlockTransactionCountByNumber,
getBlockReceipts,
getCode,
// eth_getCompilers
getFilterChanges,
getFilterLogs,
getLogs,
getStorageAt,
getTransactionByBlockHashAndIndex,
getTransactionByBlockNumberAndIndex,
getTransactionByHash,
getTransactionCount,
getTransactionReceipt,
getUncleByBlockHashAndIndex,
getUncleByBlockNumberAndIndex,
getUncleCountByBlockHash,
getUncleCountByBlockNumber,
// eth_getWork
// eth_hashrate
// eth_mining
newBlockFilter,
newFilter,
newPendingTransactionFilter,
protocolVersion,
sendRawTransaction,
// eth_sendTransaction
// eth_sign
// eth_signTransaction
// eth_submitHashrate
// eth_submitWork
syncing,
uninstallFilter,
subscribe,
unsubscribe
});
let server = with_methods!(server, web3, {
clientVersion,
sha3
});
with_methods!(server, net, {
version,
listening,
peerCount
})
}
/// Indicate whether a method requires a WebSocket connection.
pub fn is_streaming_method(method: &str) -> bool {
method == "eth_subscribe"
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/eth/api/src/apis/net.rs | fendermint/eth/api/src/apis/net.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use anyhow::Context;
use ethers_core::types as et;
use fendermint_rpc::query::QueryClient;
use fendermint_vm_message::query::FvmQueryHeight;
use tendermint_rpc::endpoint::net_info;
use tendermint_rpc::Client;
use crate::{JsonRpcData, JsonRpcResult};
/// The current FVM network version.
///
/// Same as eth_protocolVersion
pub async fn version<C>(data: JsonRpcData<C>) -> JsonRpcResult<String>
where
C: Client + Sync + Send,
{
let res = data.client.state_params(FvmQueryHeight::default()).await?;
let version: u32 = res.value.network_version.into();
Ok(version.to_string())
}
/// Returns true if client is actively listening for network connections.
pub async fn listening<C>(data: JsonRpcData<C>) -> JsonRpcResult<bool>
where
C: Client + Sync + Send,
{
let res: net_info::Response = data
.tm()
.net_info()
.await
.context("failed to fetch net_info")?;
Ok(res.listening)
}
/// Returns true if client is actively listening for network connections.
pub async fn peer_count<C>(data: JsonRpcData<C>) -> JsonRpcResult<et::U64>
where
C: Client + Sync + Send,
{
let res: net_info::Response = data
.tm()
.net_info()
.await
.context("failed to fetch net_info")?;
Ok(et::U64::from(res.n_peers))
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/eth/api/src/gas/mod.rs | fendermint/eth/api/src/gas/mod.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use fvm_shared::{
bigint::{BigInt, Zero},
econ::TokenAmount,
message::Message,
};
// Copy of https://github.com/filecoin-project/ref-fvm/blob/fvm%40v3.3.1/fvm/src/gas/outputs.rs
mod output;
// https://github.com/filecoin-project/lotus/blob/6cc506f5cf751215be6badc94a960251c6453202/node/impl/full/eth.go#L2220C41-L2228
pub fn effective_gas_price(msg: &Message, base_fee: &TokenAmount, gas_used: i64) -> TokenAmount {
let out = output::GasOutputs::compute(
gas_used.try_into().expect("gas should be u64 convertible"),
msg.gas_limit,
base_fee,
&msg.gas_fee_cap,
&msg.gas_premium,
);
let total_spend = out.base_fee_burn + out.miner_tip + out.over_estimation_burn;
if gas_used > 0 {
TokenAmount::from_atto(total_spend.atto() / TokenAmount::from_atto(gas_used).atto())
} else {
TokenAmount::from_atto(0)
}
}
// https://github.com/filecoin-project/lotus/blob/9e4f1a4d23ad72ab191754d4f432e4dc754fce1b/chain/types/message.go#L227
pub fn effective_gas_premium(msg: &Message, base_fee: &TokenAmount) -> TokenAmount {
let available = if msg.gas_fee_cap < *base_fee {
TokenAmount::from_atto(0)
} else {
msg.gas_fee_cap.clone() - base_fee
};
if msg.gas_premium < available {
return msg.gas_premium.clone();
}
available
}
// finds 55th percntile instead of median to put negative pressure on gas price
// Rust implementation of:
// https://github.com/consensus-shipyard/lotus/blob/156f5556b3ecc042764d76308dca357da3adfb4d/node/impl/full/gas.go#L144
pub fn median_gas_premium(prices: &mut [(TokenAmount, i64)], block_gas_target: i64) -> TokenAmount {
// Sort in descending order based on premium
prices.sort_by(|a, b| b.0.cmp(&a.0));
let blocks = prices.len() as i64;
let mut at = block_gas_target * blocks / 2;
at += block_gas_target * blocks / (2 * 20);
let mut prev1 = TokenAmount::zero();
let mut prev2 = TokenAmount::zero();
for (price, limit) in prices.iter() {
prev2 = prev1.clone();
prev1 = price.clone();
at -= limit;
if at < 0 {
break;
}
}
let mut premium = prev1;
if prev2 != TokenAmount::zero() {
premium += &prev2;
premium.div_ceil(BigInt::from(2));
}
premium
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/eth/api/src/gas/output.rs | fendermint/eth/api/src/gas/output.rs | // Copyright 2021-2023 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use fvm_shared::econ::TokenAmount;
#[derive(Clone, Default)]
pub(crate) struct GasOutputs {
pub base_fee_burn: TokenAmount,
pub over_estimation_burn: TokenAmount,
pub miner_penalty: TokenAmount,
pub miner_tip: TokenAmount,
pub refund: TokenAmount,
// In whole gas units.
pub gas_refund: u64,
pub gas_burned: u64,
}
impl GasOutputs {
pub fn compute(
// In whole gas units.
gas_used: u64,
gas_limit: u64,
base_fee: &TokenAmount,
fee_cap: &TokenAmount,
gas_premium: &TokenAmount,
) -> Self {
let mut base_fee_to_pay = base_fee;
let mut out = GasOutputs::default();
if base_fee > fee_cap {
base_fee_to_pay = fee_cap;
out.miner_penalty = (base_fee - fee_cap) * gas_used
}
out.base_fee_burn = base_fee_to_pay * gas_used;
let mut miner_tip = gas_premium.clone();
if &(base_fee_to_pay + &miner_tip) > fee_cap {
miner_tip = fee_cap - base_fee_to_pay;
}
out.miner_tip = &miner_tip * gas_limit;
let (out_gas_refund, out_gas_burned) = compute_gas_overestimation_burn(gas_used, gas_limit);
out.gas_refund = out_gas_refund;
out.gas_burned = out_gas_burned;
if out.gas_burned != 0 {
out.over_estimation_burn = base_fee_to_pay * out.gas_burned;
out.miner_penalty += (base_fee - base_fee_to_pay) * out.gas_burned;
}
let required_funds = fee_cap * gas_limit;
let refund =
required_funds - &out.base_fee_burn - &out.miner_tip - &out.over_estimation_burn;
out.refund = refund;
out
}
}
fn compute_gas_overestimation_burn(gas_used: u64, gas_limit: u64) -> (u64, u64) {
const GAS_OVERUSE_NUM: u128 = 11;
const GAS_OVERUSE_DENOM: u128 = 10;
if gas_used == 0 {
return (0, gas_limit);
}
// Convert to u128 to prevent overflow on multiply.
let gas_used = gas_used as u128;
let gas_limit = gas_limit as u128;
// This burns (N-10)% (clamped at 0% and 100%) of the remaining gas where N is the
// overestimation percentage.
let over = gas_limit
.saturating_sub((GAS_OVERUSE_NUM * gas_used) / GAS_OVERUSE_DENOM)
.min(gas_used);
// We handle the case where the gas used exceeds the gas limit, just in case.
let gas_remaining = gas_limit.saturating_sub(gas_used);
// This computes the fraction of the "remaining" gas to burn and will never be greater than 100%
// of the remaining gas.
let gas_to_burn = (gas_remaining * over) / gas_used;
// But... we use saturating sub, just in case.
let refund = gas_remaining.saturating_sub(gas_to_burn);
(refund as u64, gas_to_burn as u64)
}
// Adapted from lotus.
#[test]
fn overestimation_burn_test() {
fn do_test(used: u64, limit: u64, refund: u64, toburn: u64) {
let (computed_refund, computed_toburn) = compute_gas_overestimation_burn(used, limit);
assert_eq!(refund, computed_refund, "refund");
assert_eq!(toburn, computed_toburn, "burned");
}
do_test(100, 200, 10, 90);
do_test(100, 150, 30, 20);
do_test(1_000, 1_300, 240, 60);
do_test(500, 700, 140, 60);
do_test(200, 200, 0, 0);
do_test(20_000, 21_000, 1_000, 0);
do_test(0, 2_000, 0, 2_000);
do_test(500, 651, 121, 30);
do_test(500, 5_000, 0, 4_500);
do_test(7_499_000_000, 7_500_000_000, 1_000_000, 0);
do_test(7_500_000_000 / 2, 7_500_000_000, 375_000_000, 3_375_000_000);
do_test(1, 7_500_000_000, 0, 7_499_999_999);
}
#[test]
fn gas_outputs_test() {
#[allow(clippy::too_many_arguments)]
fn do_test(
used: u64,
limit: u64,
fee_cap: u64,
premium: u64,
base_fee_burn: u64,
over_estimation_burn: u64,
miner_penalty: u64,
miner_tip: u64,
refund: u64,
) {
let base_fee = TokenAmount::from_atto(10);
let output = GasOutputs::compute(
used,
limit,
&base_fee,
&TokenAmount::from_atto(fee_cap),
&TokenAmount::from_atto(premium),
);
assert_eq!(
TokenAmount::from_atto(base_fee_burn),
output.base_fee_burn,
"base_fee_burn"
);
assert_eq!(
TokenAmount::from_atto(over_estimation_burn),
output.over_estimation_burn,
"over_estimation_burn"
);
assert_eq!(
TokenAmount::from_atto(miner_penalty),
output.miner_penalty,
"miner_penalty"
);
assert_eq!(
TokenAmount::from_atto(miner_tip),
output.miner_tip,
"miner_tip"
);
assert_eq!(TokenAmount::from_atto(refund), output.refund, "refund");
}
do_test(100, 110, 11, 1, 1_000, 0, 0, 110, 100);
do_test(100, 130, 11, 1, 1_000, 60, 0, 130, 240);
do_test(100, 110, 10, 1, 1_000, 0, 0, 0, 100);
do_test(100, 110, 6, 1, 600, 0, 400, 0, 60);
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/eth/api/examples/ethers.rs | fendermint/eth/api/examples/ethers.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! Example of using the Ethereum JSON-RPC facade with the Ethers provider.
//!
//! The example assumes that the following has been started and running in the background:
//! 1. Fendermint ABCI application
//! 2. Tendermint Core / Comet BFT
//! 3. Fendermint Ethereum API facade
//!
//! # Usage
//! ```text
//! cargo run -p fendermint_eth_api --release --example ethers --
//! ```
//!
//! A method can also be called directly with `curl`:
//!
//! ```text
//! curl -X POST -i \
//! -H 'Content-Type: application/json' \
//! -d '{"jsonrpc":"2.0","id":0,"method":"eth_getBlockTransactionCountByNumber","params":["0x1"]}' \
//! http://localhost:8545
//! ```
// See https://coinsbench.com/ethereum-with-rust-tutorial-part-1-create-simple-transactions-with-rust-26d365a7ea93
// and https://coinsbench.com/ethereum-with-rust-tutorial-part-2-compile-and-deploy-solidity-contract-with-rust-c3cd16fce8ee
// and https://coinsbench.com/ethers-rust-power-or-ethers-abigen-rundown-89ab5e47875d
use std::{fmt::Debug, path::PathBuf, sync::Arc};
use anyhow::{bail, Context};
use clap::Parser;
use common::{TestMiddleware, ENOUGH_GAS};
use ethers::providers::StreamExt;
use ethers::{
prelude::{abigen, ContractFactory},
providers::{FilterKind, Http, JsonRpcClient, Middleware, Provider, Ws},
signers::Signer,
};
use ethers_core::{
abi::Abi,
types::{
transaction::eip2718::TypedTransaction, Address, BlockId, BlockNumber, Bytes,
Eip1559TransactionRequest, Filter, Log, SyncingStatus, TransactionReceipt, TxHash, H256,
U256, U64,
},
};
use tracing::Level;
use crate::common::{
adjust_provider, make_middleware, prepare_call, request, send_transaction, TestAccount,
TestContractCall,
};
mod common;
/// Disabling filters helps when inspecting docker logs. The background data received for filters is rather noisy.
const FILTERS_ENABLED: bool = true;
// Generate a statically typed interface for the contract.
// An example of what it looks like is at https://github.com/filecoin-project/ref-fvm/blob/evm-integration-tests/testing/integration/tests/evm/src/simple_coin/simple_coin.rs
abigen!(SimpleCoin, "../../testing/contracts/SimpleCoin.abi");
const SIMPLECOIN_HEX: &'static str = include_str!("../../../testing/contracts/SimpleCoin.bin");
const SIMPLECOIN_RUNTIME_HEX: &'static str =
include_str!("../../../testing/contracts/SimpleCoin.bin-runtime");
#[derive(Parser, Debug)]
pub struct Options {
/// The host of the Fendermint Ethereum API endpoint.
#[arg(long, default_value = "127.0.0.1", env = "FM_ETH__LISTEN__HOST")]
pub http_host: String,
/// The port of the Fendermint Ethereum API endpoint.
#[arg(long, default_value = "8545", env = "FM_ETH__LISTEN__PORT")]
pub http_port: u32,
/// Secret key used to send funds, expected to be in Base64 format.
///
/// Assumed to exist with a non-zero balance.
#[arg(long)]
pub secret_key_from: PathBuf,
/// Secret key used to receive funds, expected to be in Base64 format.
#[arg(long)]
pub secret_key_to: PathBuf,
/// Enable DEBUG logs.
#[arg(long, short)]
pub verbose: bool,
}
impl Options {
pub fn log_level(&self) -> Level {
if self.verbose {
Level::DEBUG
} else {
Level::INFO
}
}
pub fn http_endpoint(&self) -> String {
format!("http://{}:{}", self.http_host, self.http_port)
}
pub fn ws_endpoint(&self) -> String {
// Same address but accessed with GET
format!("ws://{}:{}", self.http_host, self.http_port)
}
}
/// See the module docs for how to run.
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let opts: Options = Options::parse();
tracing_subscriber::fmt()
.with_max_level(opts.log_level())
.init();
let provider = Provider::<Http>::try_from(opts.http_endpoint())?;
run_http(provider, &opts).await?;
let provider = Provider::<Ws>::connect(opts.ws_endpoint()).await?;
run_ws(provider, &opts).await?;
Ok(())
}
// The following methods are called by the [`Provider`].
// This is not an exhaustive list of JSON-RPC methods that the API implements, just what the client library calls.
//
// DONE:
// - eth_accounts
// - eth_blockNumber
// - eth_chainId
// - eth_getBalance
// - eth_getUncleCountByBlockHash
// - eth_getUncleCountByBlockNumber
// - eth_getUncleByBlockHashAndIndex
// - eth_getUncleByBlockNumberAndIndex
// - eth_getTransactionCount
// - eth_gasPrice
// - eth_getBlockByHash
// - eth_getBlockByNumber
// - eth_getTransactionByHash
// - eth_getTransactionReceipt
// - eth_feeHistory
// - eth_maxPriorityFeePerGas
// - eth_sendRawTransaction
// - eth_call
// - eth_estimateGas
// - eth_getBlockReceipts
// - eth_getStorageAt
// - eth_getCode
// - eth_syncing
// - web3_clientVersion
// - eth_getLogs
// - eth_newFilter
// - eth_newBlockFilter
// - eth_newPendingTransactionFilter
// - eth_getFilterChanges
// - eth_uninstallFilter
// - eth_subscribe
// - eth_unsubscribe
//
// DOING:
//
// TODO:
//
// WON'T DO:
// - eth_sign
// - eth_sendTransaction
// - eth_mining
// - eth_createAccessList
// - eth_getProof
//
/// Exercise the above methods, so we know at least the parameters are lined up correctly.
async fn run<C>(provider: &Provider<C>, opts: &Options) -> anyhow::Result<()>
where
C: JsonRpcClient + Clone + 'static,
{
let from = TestAccount::new(&opts.secret_key_from)?;
let to = TestAccount::new(&opts.secret_key_to)?;
tracing::info!(from = ?from.eth_addr, to = ?to.eth_addr, "ethereum address");
// Set up filters to collect events.
let mut filter_ids = Vec::new();
let (logs_filter_id, blocks_filter_id, txs_filter_id): (
Option<U256>,
Option<U256>,
Option<U256>,
) = if FILTERS_ENABLED {
let logs_filter_id = request(
"eth_newFilter",
provider
.new_filter(FilterKind::Logs(&Filter::default()))
.await,
|_| true,
)?;
filter_ids.push(logs_filter_id);
let blocks_filter_id = request(
"eth_newBlockFilter",
provider.new_filter(FilterKind::NewBlocks).await,
|id| *id != logs_filter_id,
)?;
filter_ids.push(blocks_filter_id);
let txs_filter_id = request(
"eth_newPendingTransactionFilter",
provider.new_filter(FilterKind::PendingTransactions).await,
|id| *id != logs_filter_id,
)?;
filter_ids.push(txs_filter_id);
(
Some(logs_filter_id),
Some(blocks_filter_id),
Some(txs_filter_id),
)
} else {
(None, None, None)
};
request("web3_clientVersion", provider.client_version().await, |v| {
v.starts_with("fendermint/")
})?;
request("net_version", provider.get_net_version().await, |v| {
!v.is_empty() && v.chars().all(|c| c.is_numeric())
})?;
request("eth_accounts", provider.get_accounts().await, |acnts| {
acnts.is_empty()
})?;
let bn = request("eth_blockNumber", provider.get_block_number().await, |bn| {
bn.as_u64() > 0
})?;
// Go back one block, so we can be sure there are results.
let bn = bn - 1;
let chain_id = request("eth_chainId", provider.get_chainid().await, |id| {
!id.is_zero()
})?;
let mw = make_middleware(provider.clone(), chain_id.as_u64(), &from)
.context("failed to create middleware")?;
let mw = Arc::new(mw);
request(
"eth_getBalance",
provider.get_balance(from.eth_addr, None).await,
|b| !b.is_zero(),
)?;
request(
"eth_getBalance (non-existent)",
provider.get_balance(Address::default(), None).await,
|b| b.is_zero(),
)?;
request(
"eth_getUncleCountByBlockHash",
provider
.get_uncle_count(BlockId::Hash(H256([0u8; 32])))
.await,
|uc| uc.is_zero(),
)?;
request(
"eth_getUncleCountByBlockNumber",
provider
.get_uncle_count(BlockId::Number(BlockNumber::Number(bn)))
.await,
|uc| uc.is_zero(),
)?;
request(
"eth_getUncleByBlockHashAndIndex",
provider
.get_uncle(BlockId::Hash(H256([0u8; 32])), U64::from(0))
.await,
|u| u.is_none(),
)?;
request(
"eth_getUncleByBlockNumberAndIndex",
provider
.get_uncle(BlockId::Number(BlockNumber::Number(bn)), U64::from(0))
.await,
|u| u.is_none(),
)?;
// Get a block without transactions
let b = request(
"eth_getBlockByNumber w/o txns",
provider
.get_block(BlockId::Number(BlockNumber::Number(bn)))
.await,
|b| b.is_some() && b.as_ref().map(|b| b.number).flatten() == Some(bn),
)?;
let bh = b.unwrap().hash.expect("hash should be set");
// Get the same block without transactions by hash.
request(
"eth_getBlockByHash w/o txns",
provider.get_block(BlockId::Hash(bh)).await,
|b| b.is_some() && b.as_ref().map(|b| b.number).flatten() == Some(bn),
)?;
// Get the synthetic zero block.
let b = request(
"eth_getBlockByNumber @ zero",
provider
.get_block(BlockId::Number(BlockNumber::Number(U64::from(0))))
.await,
|b| b.is_some(),
)?;
let bh = b.unwrap().hash.expect("hash should be set");
// Check that block 0 can be fetched by its hash.
request(
"eth_getBlockByHash @ zero",
provider.get_block(BlockId::Hash(bh)).await,
|b| b.is_some() && b.as_ref().map(|b| b.number).flatten() == Some(U64::from(0)),
)?;
// Check that block 1 points at the synthetic block 0 as parent.
request(
"eth_getBlockByNumber @ one",
provider
.get_block(BlockId::Number(BlockNumber::Number(U64::from(1))))
.await,
|b| b.is_some() && b.as_ref().map(|b| b.parent_hash) == Some(bh),
)?;
let base_fee = request("eth_gasPrice", provider.get_gas_price().await, |id| {
!id.is_zero()
})?;
tracing::info!("sending example transfer");
let transfer = make_transfer(&mw, &to)
.await
.context("failed to make a transfer")?;
let receipt = send_transaction(&mw, transfer.clone(), "transfer")
.await
.context("failed to send transfer")?;
let tx_hash = receipt.transaction_hash;
let bn = receipt.block_number.unwrap();
let bh = receipt.block_hash.unwrap();
tracing::info!(height = ?bn, ?tx_hash, "example transfer");
// This equivalence is not required for ethers-rs, it's happy to use the return value from `eth_sendRawTransaction` for transaction hash.
// However, ethers.js actually asserts this and we cannot disable it, rendering that, or any similar tool, unusable if we rely on
// the default Tendermint transaction hash, which is a Sha256 hash of the entire payload (which includes the signature),
// not a Keccak256 of the unsigned RLP.
let expected_hash = {
let sig = mw
.signer()
.sign_transaction(&transfer)
.await
.context("failed to sign transaction")?;
let rlp = transfer.rlp_signed(&sig);
TxHash::from(ethers_core::utils::keccak256(rlp))
};
assert_eq!(tx_hash, expected_hash, "Ethereum hash should match");
// Querying at latest, so the transaction count should be non-zero.
request(
"eth_getTransactionCount",
provider.get_transaction_count(from.eth_addr, None).await,
|u| !u.is_zero(),
)?;
request(
"eth_getTransactionCount (non-existent)",
provider
.get_transaction_count(Address::default(), None)
.await,
|b| b.is_zero(),
)?;
// Get a block with transactions by number.
let block = request(
"eth_getBlockByNumber w/ txns",
provider
.get_block_with_txs(BlockId::Number(BlockNumber::Number(bn)))
.await,
|b| b.is_some() && b.as_ref().map(|b| b.number).flatten() == Some(bn),
)?;
assert_eq!(
tx_hash,
block.unwrap().transactions[0].hash,
"computed hash should match"
);
// Get the block with transactions by hash.
request(
"eth_getBlockByHash w/ txns",
provider.get_block_with_txs(BlockId::Hash(bh)).await,
|b| b.is_some() && b.as_ref().map(|b| b.number).flatten() == Some(bn),
)?;
// By now there should be a transaction in a block.
request(
"eth_feeHistory",
provider
.fee_history(
U256::from(100),
BlockNumber::Latest,
&[0.25, 0.5, 0.75, 0.95],
)
.await,
|hist| {
hist.base_fee_per_gas.len() > 0
&& *hist.base_fee_per_gas.last().unwrap() == base_fee
&& hist.gas_used_ratio.iter().any(|r| *r > 0.0)
},
)?;
request(
"eth_getTransactionByHash",
provider.get_transaction(tx_hash).await,
|tx| tx.is_some(),
)?;
request(
"eth_getTransactionReceipt",
provider.get_transaction_receipt(tx_hash).await,
|tx| tx.is_some(),
)?;
request(
"eth_getBlockReceipts",
provider.get_block_receipts(BlockNumber::Number(bn)).await,
|rs| !rs.is_empty(),
)?;
// Calling with 0 nonce so the node figures out the latest value.
let mut probe_tx = transfer.clone();
probe_tx.set_nonce(0);
let probe_height = BlockId::Number(BlockNumber::Number(bn));
request(
"eth_call",
provider.call(&probe_tx, Some(probe_height)).await,
|_| true,
)?;
request(
"eth_estimateGas w/ height",
provider.estimate_gas(&probe_tx, Some(probe_height)).await,
|gas: &U256| !gas.is_zero(),
)?;
request(
"eth_estimateGas w/o height",
provider.estimate_gas(&probe_tx, None).await,
|gas: &U256| !gas.is_zero(),
)?;
request(
"eth_maxPriorityFeePerGas",
provider.request("eth_maxPriorityFeePerGas", ()).await,
|premium: &U256| !premium.is_zero(),
)?;
tracing::info!("deploying SimpleCoin");
let bytecode =
Bytes::from(hex::decode(SIMPLECOIN_HEX).context("failed to decode contract hex")?);
let deployed_bytecode = Bytes::from(
hex::decode(SIMPLECOIN_RUNTIME_HEX).context("failed to decode contract runtime hex")?,
);
// let abi = serde_json::from_str::<ethers::core::abi::Abi>(SIMPLECOIN_ABI)?;
let abi: Abi = SIMPLECOIN_ABI.clone();
let factory = ContractFactory::new(abi, bytecode.clone(), mw.clone());
let mut deployer = factory.deploy(())?;
// Fill the fields so we can debug any difference between this and the node.
// Using `Some` block ID because with `None` the eth_estimateGas call would receive invalid parameters.
mw.fill_transaction(&mut deployer.tx, Some(BlockId::Number(BlockNumber::Latest)))
.await
.context("failed to fill deploy transaction")?;
tracing::info!(sighash = ?deployer.tx.sighash(), "deployment tx");
// Try with a call just because Remix does.
request(
"eth_call w/ deploy",
provider.call(&deployer.tx, None).await,
|_| true,
)?;
// NOTE: This would call eth_estimateGas to figure out how much gas to use, if we didn't set it.
// What the [Provider::fill_transaction] will _also_ do is estimate the fees using eth_feeHistory, here:
// https://github.com/gakonst/ethers-rs/blob/df165b84229cdc1c65e8522e0c1aeead3746d9a8/ethers-providers/src/rpc/provider.rs#LL300C30-L300C51
// These were set to zero in the earlier example transfer, ie. it was basically paid for by the miner (which is not at the moment charged),
// so the test passed. Here, however, there will be a non-zero cost to pay by the deployer, and therefore those balances
// have to be much higher than the defaults used earlier, e.g. the deployment cost 30 FIL, and we used to give 1 FIL.
let (contract, deploy_receipt): (_, TransactionReceipt) = deployer
.send_with_receipt()
.await
.context("failed to send deployment")?;
tracing::info!(addr = ?contract.address(), "SimpleCoin deployed");
let contract = SimpleCoin::new(contract.address(), contract.client());
let coin_balance: TestContractCall<_, U256> =
prepare_call(&mw, contract.get_balance(from.eth_addr), false).await?;
request("eth_call", coin_balance.call().await, |coin_balance| {
*coin_balance == U256::from(10000)
})?;
// Calling with 0x00..00 address so we see if it world work for calls by clients that set nothing.
let coin_balance = coin_balance.from(Address::default());
request(
"eth_call w/ 0x00..00",
coin_balance.call().await,
|coin_balance| *coin_balance == U256::from(10000),
)?;
// Call a method that does a revert, to check that the message shows up in the return value.
// Try to send more than the available balance of 10,000
let coin_send: TestContractCall<_, ()> = prepare_call(
&mw,
contract.send_coin_or_revert(to.eth_addr, U256::from(10000 * 10)),
true,
)
.await
.context("failed to prepare revert call")?;
match coin_send.call().await {
Ok(_) => bail!("call should failed with a revert"),
Err(e) => {
let e = e.to_string();
assert!(e.contains("revert"), "should say revert");
assert!(e.contains("0x08c379a"), "should have string selector");
}
}
// We could calculate the storage location of the balance of the owner of the contract,
// but let's just see what it returns with at slot 0. See an example at
// https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getstorageat
let storage_location = {
let mut bz = [0u8; 32];
U256::zero().to_big_endian(&mut bz);
H256::from_slice(&bz)
};
request(
"eth_getStorageAt",
mw.get_storage_at(contract.address(), storage_location, None)
.await,
|_| true,
)?;
request(
"eth_getStorageAt /w account",
mw.get_storage_at(from.eth_addr, storage_location, None)
.await,
|_| true,
)?;
request(
"eth_getCode",
mw.get_code(contract.address(), None).await,
|bz| *bz == deployed_bytecode,
)?;
request(
"eth_getCode /w account",
mw.get_code(from.eth_addr, None).await,
|bz| bz.is_empty(),
)?;
request("eth_syncing", mw.syncing().await, |s| {
*s == SyncingStatus::IsFalse // There is only one node.
})?;
// Send a SimpleCoin transaction to get an event emitted.
// Not using `prepare_call` here because `send_transaction` will fill the missing fields.
let coin_send_value = U256::from(100);
let coin_send: TestContractCall<_, bool> = contract.send_coin(to.eth_addr, coin_send_value);
// Take note of the inputs to ascertain it's the same we get back.
let tx_input = match coin_send.tx {
TypedTransaction::Eip1559(ref tx) => tx.data.clone(),
_ => None,
};
// Using `send_transaction` instead of `coin_send.send()` so it gets the receipt.
// Unfortunately the returned `bool` is not available through the Ethereum API.
let receipt = request(
"eth_sendRawTransaction",
send_transaction(&mw, coin_send.tx, "coin_send").await,
|receipt| !receipt.logs.is_empty() && receipt.logs.iter().all(|l| l.log_type.is_none()),
)?;
tracing::info!(tx_hash = ?receipt.transaction_hash, "coin sent");
request(
"eth_getTransactionByHash for input",
provider.get_transaction(receipt.transaction_hash).await,
|tx| match tx {
Some(tx) => tx.input == tx_input.unwrap_or_default(),
_ => false,
},
)?;
request(
"eth_getLogs",
mw.get_logs(&Filter::new().at_block_hash(receipt.block_hash.unwrap()))
.await,
|logs| *logs == receipt.logs,
)?;
// Check that requesting logs with higher-than-highest height does not fail.
request(
"eth_getLogs /w too high 'to' height",
mw.get_logs(&Filter::new().to_block(BlockNumber::Number(U64::from(u32::MAX))))
.await,
|logs: &Vec<Log>| logs.is_empty(), // There will be nothing from latest-to-latest by now.
)?;
// See what kind of events were logged.
if let Some(blocks_filter_id) = blocks_filter_id {
request(
"eth_getFilterChanges (blocks)",
mw.get_filter_changes(blocks_filter_id).await,
|block_hashes: &Vec<H256>| {
[bh, deploy_receipt.block_hash.unwrap()]
.iter()
.all(|h| block_hashes.contains(h))
},
)?;
}
if let Some(txs_filter_id) = txs_filter_id {
request(
"eth_getFilterChanges (txs)",
mw.get_filter_changes(txs_filter_id).await,
|tx_hashes: &Vec<H256>| {
[&tx_hash, &deploy_receipt.transaction_hash]
.iter()
.all(|h| tx_hashes.contains(h))
},
)?;
}
if let Some(logs_filter_id) = logs_filter_id {
let logs = request(
"eth_getFilterChanges (logs)",
mw.get_filter_changes(logs_filter_id).await,
|logs: &Vec<Log>| !logs.is_empty(),
)?;
// eprintln!("LOGS = {logs:?}");
// Parse `Transfer` events from the logs with the SimpleCoin contract.
// Based on https://github.com/filecoin-project/ref-fvm/blob/evm-integration-tests/testing/integration/tests/fevm_features/common.rs#L616
// and https://github.com/filecoin-project/ref-fvm/blob/evm-integration-tests/testing/integration/tests/fevm_features/simple_coin.rs#L26
// and https://github.com/filecoin-project/ref-fvm/blob/evm-integration-tests/testing/integration/tests/evm/src/simple_coin/simple_coin.rs#L103
// The contract has methods like `.transfer_filter()` which allows querying logs, but here we just test parsing to make sure the data is correct.
let transfer_events = logs
.into_iter()
.filter(|log| log.address == contract.address())
.map(|log| contract.decode_event::<TransferFilter>("Transfer", log.topics, log.data))
.collect::<Result<Vec<_>, _>>()
.context("failed to parse logs to transfer events")?;
assert!(!transfer_events.is_empty());
assert_eq!(transfer_events[0].from, from.eth_addr);
assert_eq!(transfer_events[0].to, to.eth_addr);
assert_eq!(transfer_events[0].value, coin_send_value);
}
// Uninstall all filters.
for id in filter_ids {
request("eth_uninstallFilter", mw.uninstall_filter(id).await, |ok| {
*ok
})?;
}
Ok(())
}
/// The HTTP interface provides JSON-RPC request/response endpoints.
async fn run_http(mut provider: Provider<Http>, opts: &Options) -> anyhow::Result<()> {
tracing::info!("Running the tests over HTTP...");
adjust_provider(&mut provider);
run(&provider, opts).await?;
tracing::info!("HTTP tests finished");
Ok(())
}
/// The WebSocket interface provides JSON-RPC request/response interactions
/// as well as subscriptions, both using messages over the socket.
///
/// We subscribe to notifications first, then run the same suite of request/responses
/// as the HTTP case, finally check that we have collected events over the subscriptions.
async fn run_ws(mut provider: Provider<Ws>, opts: &Options) -> anyhow::Result<()> {
tracing::info!("Running the tests over WS...");
adjust_provider(&mut provider);
// Subscriptions as well.
let subs = if FILTERS_ENABLED {
let block_sub = provider.subscribe_blocks().await?;
let txs_sub = provider.subscribe_pending_txs().await?;
let log_sub = provider.subscribe_logs(&Filter::default()).await?;
Some((block_sub, txs_sub, log_sub))
} else {
None
};
run(&provider, opts).await?;
if let Some((mut block_sub, mut txs_sub, mut log_sub)) = subs {
assert!(block_sub.next().await.is_some(), "blocks should arrive");
assert!(txs_sub.next().await.is_some(), "transactions should arrive");
assert!(log_sub.next().await.is_some(), "logs should arrive");
block_sub
.unsubscribe()
.await
.context("failed to unsubscribe blocks")?;
txs_sub
.unsubscribe()
.await
.context("failed to unsubscribe txs")?;
log_sub
.unsubscribe()
.await
.context("failed to unsubscribe logs")?;
}
tracing::info!("WS tests finished.");
Ok(())
}
async fn make_transfer<C>(
mw: &TestMiddleware<C>,
to: &TestAccount,
) -> anyhow::Result<TypedTransaction>
where
C: JsonRpcClient + 'static,
{
// Create a transaction to transfer 1000 atto.
let tx = Eip1559TransactionRequest::new().to(to.eth_addr).value(1000);
// Set the gas based on the testkit so it doesn't trigger estimation.
let mut tx = tx
.gas(ENOUGH_GAS)
.max_fee_per_gas(0)
.max_priority_fee_per_gas(0)
.into();
// Fill in the missing fields like `from` and `nonce` (which involves querying the API).
mw.fill_transaction(&mut tx, None).await?;
Ok(tx)
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/eth/api/examples/query_blockhash.rs | fendermint/eth/api/examples/query_blockhash.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! Example of using the Ethereum JSON-RPC facade with the Ethers provider.
//!
//! The example assumes that the following has been started and running in the background:
//! 1. Fendermint ABCI application
//! 2. Tendermint Core / Comet BFT
//! 3. Fendermint Ethereum API facade
//!
//! # Usage
//! ```text
//! cargo run -p fendermint_eth_api --release --example query_blockhash --
//! ```
use anyhow::Context;
use clap::Parser;
use ethers::{
prelude::{abigen, ContractFactory},
providers::{Http, JsonRpcClient, Middleware, Provider},
};
use ethers_core::{
abi::Abi,
types::{BlockId, BlockNumber, Bytes, TransactionReceipt, H256, U256, U64},
};
use hex;
use std::{fmt::Debug, path::PathBuf, sync::Arc};
use tracing::Level;
use crate::common::{adjust_provider, make_middleware, request, TestAccount};
#[allow(dead_code)]
mod common;
// Generate a statically typed interface for the contract.
abigen!(QueryBlockhash, "../../testing/contracts/QueryBlockhash.abi");
const QUERYBLOCKHASH_HEX: &'static str =
include_str!("../../../testing/contracts/QueryBlockhash.bin");
#[derive(Parser, Debug)]
pub struct Options {
/// The host of the Fendermint Ethereum API endpoint.
#[arg(long, default_value = "127.0.0.1", env = "FM_ETH__LISTEN__HOST")]
pub http_host: String,
/// The port of the Fendermint Ethereum API endpoint.
#[arg(long, default_value = "8545", env = "FM_ETH__LISTEN__PORT")]
pub http_port: u32,
/// Secret key used to deploy the contract.
///
/// Assumed to exist with a non-zero balance.
#[arg(long, short)]
pub secret_key: PathBuf,
/// Enable DEBUG logs.
#[arg(long, short)]
pub verbose: bool,
}
impl Options {
pub fn log_level(&self) -> Level {
if self.verbose {
Level::DEBUG
} else {
Level::INFO
}
}
pub fn http_endpoint(&self) -> String {
format!("http://{}:{}", self.http_host, self.http_port)
}
}
/// See the module docs for how to run.
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let opts: Options = Options::parse();
tracing_subscriber::fmt()
.with_max_level(opts.log_level())
.init();
let provider = Provider::<Http>::try_from(opts.http_endpoint())?;
run_http(provider, &opts).await?;
Ok(())
}
async fn run<C>(provider: &Provider<C>, opts: &Options) -> anyhow::Result<()>
where
C: JsonRpcClient + Clone + 'static,
{
let from = TestAccount::new(&opts.secret_key)?;
tracing::info!(from = ?from.eth_addr, "ethereum address");
tracing::info!("deploying QueryBlockhash");
let bytecode =
Bytes::from(hex::decode(QUERYBLOCKHASH_HEX).context("failed to decode contract hex")?);
let abi: Abi = QUERYBLOCKHASH_ABI.clone();
let chain_id = provider.get_chainid().await?;
let mw = make_middleware(provider.clone(), chain_id.as_u64(), &from)
.context("failed to create middleware")?;
let mw = Arc::new(mw);
let factory = ContractFactory::new(abi, bytecode.clone(), mw.clone());
let deployer = factory.deploy(())?;
let (contract, deploy_receipt): (_, TransactionReceipt) = deployer
.send_with_receipt()
.await
.context("failed to send deployment")?;
tracing::info!(addr = ?contract.address(), "QueryBlockhash deployed");
let contract = QueryBlockhash::new(contract.address(), contract.client());
// check the deploy_height so we don't risk asking for blocks that had
// been removed from the chainmetadata state (it has a relatively short
// lookback length of 256)
let deploy_height = deploy_receipt
.block_number
.expect("deploy height should be set")
.as_u64();
tracing::info!("deploy_height: {:?}", deploy_height);
// we want check the get the blockhash for the last 5 blocks
const NR_CHECKS: u64 = 5;
let start_block = if deploy_height >= NR_CHECKS {
deploy_height - NR_CHECKS
} else {
0
};
// check that the blockhash returned by the contract matches the one returned by tendermint
for epoch in start_block..deploy_height {
tracing::info!("Checking blockhashes at epoch: {}", epoch);
// get the blockhash from the contract, which results in call to get_tipset_cid in fendermint
//
let blockhash: [u8; 32] = contract
.get_blockhash(U256::from(epoch))
.call()
.await
.context("failed to call get_blockhash")?;
let blockhash = H256::from_slice(&blockhash);
tracing::info!("blockhash from contract: {:?}", blockhash);
// get the blockhash from tendermint
//
let b = request(
"eth_getBlockByNumber w/o txns",
provider
.get_block(BlockId::Number(BlockNumber::Number(U64::from(epoch))))
.await,
|b| b.is_some() && b.as_ref().map(|b| b.number).flatten() == Some(U64::from(epoch)),
)?;
let bh = b.unwrap().hash.expect("hash should be set");
tracing::info!("blockhash from API: {:?}", bh);
assert_eq!(blockhash, bh);
}
Ok(())
}
/// The HTTP interface provides JSON-RPC request/response endpoints.
async fn run_http(mut provider: Provider<Http>, opts: &Options) -> anyhow::Result<()> {
tracing::info!("Running the tests over HTTP...");
adjust_provider(&mut provider);
run(&provider, opts).await?;
tracing::info!("HTTP tests finished");
Ok(())
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/eth/api/examples/greeter.rs | fendermint/eth/api/examples/greeter.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! Example of using the Ethereum JSON-RPC facade with the Ethers provider.
//!
//! The example assumes that the following has been started and running in the background:
//! 1. Fendermint ABCI application
//! 2. Tendermint Core / Comet BFT
//! 3. Fendermint Ethereum API facade
//!
//! # Usage
//! ```text
//! cargo run -p fendermint_eth_api --release --example GREETER --
//! ```
use std::{fmt::Debug, path::PathBuf, sync::Arc};
use anyhow::Context;
use clap::Parser;
use ethers::contract::LogMeta;
use ethers::{
prelude::{abigen, ContractFactory},
providers::{Http, JsonRpcClient, Middleware, Provider},
};
use ethers_core::{
abi::Abi,
types::{Bytes, TransactionReceipt},
};
use serde_json::json;
use tracing::Level;
use crate::common::{adjust_provider, make_middleware, TestAccount, TestContractCall};
#[allow(dead_code)]
mod common;
// Generate a statically typed interface for the contract.
abigen!(Greeter, "../../testing/contracts/Greeter.abi");
const GREETER_HEX: &'static str = include_str!("../../../testing/contracts/Greeter.bin");
#[derive(Parser, Debug)]
pub struct Options {
/// The host of the Fendermint Ethereum API endpoint.
#[arg(long, default_value = "127.0.0.1", env = "FM_ETH__LISTEN__HOST")]
pub http_host: String,
/// The port of the Fendermint Ethereum API endpoint.
#[arg(long, default_value = "8545", env = "FM_ETH__LISTEN__PORT")]
pub http_port: u32,
/// Secret key used to deploy the contract.
///
/// Assumed to exist with a non-zero balance.
#[arg(long, short)]
pub secret_key: PathBuf,
/// Path to write the contract metadata to.
#[arg(long, short)]
pub out: Option<PathBuf>,
/// Enable DEBUG logs.
#[arg(long, short)]
pub verbose: bool,
}
impl Options {
pub fn log_level(&self) -> Level {
if self.verbose {
Level::DEBUG
} else {
Level::INFO
}
}
pub fn http_endpoint(&self) -> String {
format!("http://{}:{}", self.http_host, self.http_port)
}
}
/// See the module docs for how to run.
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let opts: Options = Options::parse();
tracing_subscriber::fmt()
.with_max_level(opts.log_level())
.init();
let provider = Provider::<Http>::try_from(opts.http_endpoint())?;
run_http(provider, &opts).await?;
Ok(())
}
async fn run<C>(provider: &Provider<C>, opts: &Options) -> anyhow::Result<()>
where
C: JsonRpcClient + Clone + 'static,
{
let from = TestAccount::new(&opts.secret_key)?;
tracing::info!(from = ?from.eth_addr, "ethereum address");
tracing::info!("deploying Greeter");
let bytecode = Bytes::from(hex::decode(GREETER_HEX).context("failed to decode contract hex")?);
let abi: Abi = GREETER_ABI.clone();
let chain_id = provider.get_chainid().await?;
let mw = make_middleware(provider.clone(), chain_id.as_u64(), &from)
.context("failed to create middleware")?;
let mw = Arc::new(mw);
const GREETING0: &str = "Welcome, weary traveller!";
const GREETING1: &str = "Howdy doody!";
let factory = ContractFactory::new(abi, bytecode.clone(), mw.clone());
let deployer = factory.deploy((GREETING0.to_string(),))?;
let (contract, deploy_receipt): (_, TransactionReceipt) = deployer
.send_with_receipt()
.await
.context("failed to send deployment")?;
tracing::info!(addr = ?contract.address(), "Greeter deployed");
let contract = Greeter::new(contract.address(), contract.client());
let greeting: String = contract
.greet()
.call()
.await
.context("failed to call greet")?;
assert_eq!(greeting, GREETING0);
let deploy_height = deploy_receipt.block_number.expect("deploy height is known");
// Set the greeting to emit an event.
let set_greeting: TestContractCall<_, ()> = contract.set_greeting(GREETING1.to_string());
let _tx_receipt: TransactionReceipt = set_greeting
.send()
.await
.context("failed to set greeting")?
.log_msg("set_greeting")
.retries(3)
.await?
.context("cannot get receipt")?;
let greeting: String = contract
.greet()
.call()
.await
.context("failed to call greet")?;
assert_eq!(greeting, GREETING1);
let logs: Vec<(GreetingSetFilter, LogMeta)> = contract
.greeting_set_filter()
.address(contract.address().into())
.from_block(deploy_height)
.query_with_meta()
.await
.context("failed to query logs")?;
assert_eq!(logs.len(), 2, "events: constructor + invocation");
assert_eq!(logs[0].0.greeting, GREETING0);
assert_eq!(logs[1].0.greeting, GREETING1);
if let Some(ref out) = opts.out {
// Print some metadata so that we can configure The Graph:
// `subgraph.template.yaml` requires the `address` and `startBlock` to be configured.
let output = json!({
"address": format!("{:?}", contract.address()),
"deploy_height": deploy_height.as_u64(),
});
let json = serde_json::to_string_pretty(&output).unwrap();
std::fs::write(out, json).expect("failed to write metadata");
}
Ok(())
}
/// The HTTP interface provides JSON-RPC request/response endpoints.
async fn run_http(mut provider: Provider<Http>, opts: &Options) -> anyhow::Result<()> {
tracing::info!("Running the tests over HTTP...");
adjust_provider(&mut provider);
run(&provider, opts).await?;
tracing::info!("HTTP tests finished");
Ok(())
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/eth/api/examples/common/mod.rs | fendermint/eth/api/examples/common/mod.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! Example of using the Ethereum JSON-RPC facade with the Ethers provider.
//!
//! The example assumes that the following has been started and running in the background:
//! 1. Fendermint ABCI application
//! 2. Tendermint Core / Comet BFT
//! 3. Fendermint Ethereum API facade
//!
//! # Usage
//! ```text
//! cargo run -p fendermint_eth_api --release --example GREETER --
//! ```
use std::{fmt::Debug, fmt::Display};
use std::{path::Path, time::Duration};
use anyhow::{anyhow, Context};
use ethers::{
prelude::{ContractCall, SignerMiddleware},
providers::{JsonRpcClient, Middleware, Provider},
signers::{Signer, Wallet},
};
use ethers_core::{
k256::ecdsa::SigningKey,
types::{
transaction::eip2718::TypedTransaction, Address, BlockId, BlockNumber, TransactionReceipt,
H160,
},
};
use fendermint_crypto::SecretKey;
use fendermint_rpc::message::SignedMessageFactory;
use fendermint_vm_actor_interface::eam::EthAddress;
pub type TestMiddleware<C> = SignerMiddleware<Provider<C>, Wallet<SigningKey>>;
pub type TestContractCall<C, T> = ContractCall<TestMiddleware<C>, T>;
/// Gas limit to set for transactions.
pub const ENOUGH_GAS: u64 = 10_000_000_000u64;
pub struct TestAccount {
pub secret_key: SecretKey,
pub eth_addr: H160,
}
impl TestAccount {
pub fn new(sk: &Path) -> anyhow::Result<Self> {
let sk = SignedMessageFactory::read_secret_key(sk)?;
let ea = EthAddress::from(sk.public_key());
let h = Address::from_slice(&ea.0);
Ok(Self {
secret_key: sk,
eth_addr: h,
})
}
}
pub fn adjust_provider<C>(provider: &mut Provider<C>)
where
C: JsonRpcClient,
{
// Tendermint block interval is lower.
provider.set_interval(Duration::from_secs(2));
}
/// Send a transaction and await the receipt.
pub async fn send_transaction<C>(
mw: &TestMiddleware<C>,
tx: TypedTransaction,
label: &str,
) -> anyhow::Result<TransactionReceipt>
where
C: JsonRpcClient + 'static,
{
// `send_transaction` will fill in the missing fields like `from` and `nonce` (which involves querying the API).
let receipt = mw
.send_transaction(tx, None)
.await
.context("failed to send transaction")?
.log_msg(format!("Pending transaction: {label}"))
.retries(5)
.await?
.context("Missing receipt")?;
Ok(receipt)
}
/// Create a middleware that will assign nonces and sign the message.
pub fn make_middleware<C>(
provider: Provider<C>,
chain_id: u64,
sender: &TestAccount,
) -> anyhow::Result<TestMiddleware<C>>
where
C: JsonRpcClient,
{
// We have to use Ethereum's signing scheme, beause the `from` is not part of the RLP representation,
// it is inferred from the public key recovered from the signature. We could potentially hash the
// transaction in a different way, but we can't for example use the actor ID in the hash, because
// we have no way of sending it along with the message.
let wallet: Wallet<SigningKey> =
Wallet::from_bytes(&sender.secret_key.serialize().as_ref())?.with_chain_id(chain_id);
Ok(SignerMiddleware::new(provider, wallet))
}
/// Fill the transaction fields such as gas and nonce.
pub async fn prepare_call<C, T>(
mw: &TestMiddleware<C>,
mut call: TestContractCall<C, T>,
prevent_estimation: bool,
) -> anyhow::Result<TestContractCall<C, T>>
where
C: JsonRpcClient + 'static,
{
if prevent_estimation {
// Set the gas based on the testkit so it doesn't trigger estimation.
let tx = call.tx.as_eip1559_mut();
let tx = tx.expect("eip1559");
tx.gas = Some(ENOUGH_GAS.into());
tx.max_fee_per_gas = Some(0.into());
tx.max_priority_fee_per_gas = Some(0.into());
}
// Fill in the missing fields like `from` and `nonce` (which involves querying the API).
mw.fill_transaction(&mut call.tx, Some(BlockId::Number(BlockNumber::Latest)))
.await
.context("failed to fill transaction")?;
Ok(call)
}
pub trait CheckResult {
fn check_result(&self) -> anyhow::Result<()>;
}
impl CheckResult for bool {
fn check_result(&self) -> anyhow::Result<()> {
if *self {
Ok(())
} else {
Err(anyhow!("expected true; got false"))
}
}
}
pub fn request<T, E, F, C>(method: &str, res: Result<T, E>, check: F) -> anyhow::Result<T>
where
T: Debug,
F: FnOnce(&T) -> C,
C: CheckResult,
E: Display,
{
tracing::debug!("checking request {method}...");
match res {
Ok(value) => match check(&value).check_result() {
Ok(()) => Ok(value),
Err(e) => Err(anyhow!("failed to check {method}: {e}:\n{value:?}")),
},
Err(e) => Err(anyhow!("failed to call {method}: {e:#}")),
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/eth/hardhat/src/lib.rs | fendermint/eth/hardhat/src/lib.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use anyhow::{anyhow, bail, Context};
use ethers_core::types as et;
use serde::Deserialize;
use std::{
cmp::Ord,
collections::{BTreeMap, HashMap, HashSet, VecDeque},
hash::Hash,
path::{Path, PathBuf},
};
/// Contract source as it appears in dependencies, e.g. `"src/lib/SubnetIDHelper.sol"`, or "Gateway.sol".
/// It is assumed to contain the file extension.
pub type ContractSource = PathBuf;
/// Contract name as it appears in dependencies, e.g. `"SubnetIDHelper"`.
pub type ContractName = String;
pub type ContractSourceAndName = (ContractSource, ContractName);
/// Fully Qualified Name of a contract, e.g. `"src/lib/SubnetIDHelper.sol:SubnetIDHelper"`.
pub type FQN = String;
/// Dependency tree for libraries.
///
/// Using a [BTreeMap] for deterministic ordering.
type DependencyTree<T> = BTreeMap<T, HashSet<T>>;
/// Utility to link bytecode from Hardhat build artifacts.
#[derive(Clone, Debug)]
pub struct Hardhat {
/// Directory with Hardhat build artifacts, the full-fat JSON files
/// that contain ABI, bytecode, link references, etc.
contracts_dir: PathBuf,
}
impl Hardhat {
pub fn new(contracts_dir: PathBuf) -> Self {
Self { contracts_dir }
}
/// Fully qualified name of a source and contract.
pub fn fqn(&self, contract_source: &Path, contract_name: &str) -> String {
format!("{}:{}", contract_source.to_string_lossy(), contract_name)
}
/// Read the bytecode of the contract and replace all links in it with library addresses,
/// similar to how the [hardhat-ethers](https://github.com/NomicFoundation/hardhat/blob/7cc06ab222be8db43265664c68416fdae3030418/packages/hardhat-ethers/src/internal/helpers.ts#L165C42-L165C42)
/// plugin does it.
///
/// The contract source is expected to be the logical path to a Solidity contract,
/// including the extension, ie. a [ContractSource].
pub fn bytecode(
&self,
contract_src: impl AsRef<Path>,
contract_name: &str,
libraries: &HashMap<FQN, et::Address>,
) -> anyhow::Result<Vec<u8>> {
let artifact = self.artifact(contract_src.as_ref(), contract_name)?;
// Get the bytecode which is in hex format with placeholders for library references.
let mut bytecode = artifact.bytecode.object.clone();
// Replace all library references with their address.
// Here we differ slightly from the TypeScript version in that we don't return an error
// for entries in the library address map that we end up not needing, so we can afford
// to know less about which contract needs which exact references when we call them,
for (lib_src, lib_name) in artifact.libraries_needed() {
// References can be given with Fully Qualified Name, or just the contract name,
// but they must be unique and unambiguous.
let fqn = self.fqn(&lib_src, &lib_name);
let lib_addr = match (libraries.get(&fqn), libraries.get(&lib_name)) {
(None, None) => {
bail!("failed to resolve library: {fqn}")
}
(Some(_), Some(_)) => bail!("ambiguous library: {fqn}"),
(Some(addr), None) => addr,
(None, Some(addr)) => addr,
};
let lib_addr = hex::encode(lib_addr.0);
for pos in artifact.library_positions(&lib_src, &lib_name) {
let start = 2 + pos.start * 2;
let end = start + pos.length * 2;
bytecode.replace_range(start..end, &lib_addr);
}
}
let bytecode = hex::decode(bytecode.trim_start_matches("0x"))
.context("failed to decode contract from hex")?;
Ok(bytecode)
}
/// Traverse the linked references and return the library contracts to be deployed in topological order.
///
/// The result will include the top contracts as well, and it's up to the caller to filter them out if
/// they have more complicated deployments including constructors. This is because there can be diamond
/// facets among them which aren't ABI visible dependencies but should be deployed as libraries.
pub fn dependencies(
&self,
root_contracts: &[(impl AsRef<Path>, &str)],
) -> anyhow::Result<Vec<ContractSourceAndName>> {
let mut deps: DependencyTree<ContractSourceAndName> = Default::default();
let mut queue = root_contracts
.iter()
.map(|(s, c)| (PathBuf::from(s.as_ref()), c.to_string()))
.collect::<VecDeque<_>>();
// Construct dependency tree by recursive traversal.
while let Some(sc) = queue.pop_front() {
if deps.contains_key(&sc) {
continue;
}
let artifact = self
.artifact(&sc.0, &sc.1)
.with_context(|| format!("failed to load dependency artifact: {}", sc.1))?;
let cds = deps.entry(sc).or_default();
for (ls, ln) in artifact.libraries_needed() {
cds.insert((ls.clone(), ln.clone()));
queue.push_back((ls, ln));
}
}
// Topo-sort the libraries in the order of deployment.
let sorted = topo_sort(deps)?;
Ok(sorted)
}
/// Concatenate the contracts directory with the expected layout to get
/// the path to the JSON file of a contract, which is under a directory
/// named after the Solidity file.
fn contract_path(&self, contract_src: &Path, contract_name: &str) -> anyhow::Result<PathBuf> {
// There is currently no example of a Solidity directory containing multiple JSON files,
// but it possible if there are multiple contracts in the file.
let base_name = contract_src
.file_name()
.and_then(|s| s.to_str())
.ok_or_else(|| anyhow!("failed to produce base name for {contract_src:?}"))?;
let path = self
.contracts_dir
.join(base_name)
.join(format!("{contract_name}.json"));
Ok(path)
}
/// Parse the Hardhat artifact of a contract.
fn artifact(&self, contract_src: &Path, contract_name: &str) -> anyhow::Result<Artifact> {
let contract_path = self.contract_path(contract_src, contract_name)?;
let json = std::fs::read_to_string(&contract_path)
.with_context(|| format!("failed to read {contract_path:?}"))?;
let artifact =
serde_json::from_str::<Artifact>(&json).context("failed to parse Hardhat artifact")?;
Ok(artifact)
}
}
#[derive(Deserialize)]
struct Artifact {
pub bytecode: Bytecode,
}
impl Artifact {
// Collect the libraries this contract needs.
pub fn libraries_needed(&self) -> Vec<(ContractSource, ContractName)> {
self.bytecode
.link_references
.iter()
.flat_map(|(lib_src, links)| {
links
.keys()
.map(|lib_name| (lib_src.to_owned(), lib_name.to_owned()))
})
.collect()
}
pub fn library_positions(
&self,
lib_src: &ContractSource,
lib_name: &ContractName,
) -> impl Iterator<Item = &Position> {
match self
.bytecode
.link_references
.get(lib_src)
.and_then(|links| links.get(lib_name))
{
Some(ps) => ps.iter(),
None => [].iter(),
}
}
}
/// Match the `"bytecode"` entry in the Hardhat build artifact.
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct Bytecode {
/// Hexadecimal format with placeholders for links.
pub object: String,
pub link_references: HashMap<ContractSource, HashMap<ContractName, Vec<Position>>>,
}
/// Indicate where a placeholder appears in the bytecode object.
#[derive(Deserialize)]
struct Position {
pub start: usize,
pub length: usize,
}
/// Return elements of a dependency tree in topological order.
fn topo_sort<T>(mut dependency_tree: DependencyTree<T>) -> anyhow::Result<Vec<T>>
where
T: Eq + PartialEq + Hash + Ord + Clone,
{
let mut sorted = Vec::new();
while !dependency_tree.is_empty() {
let leaf = match dependency_tree.iter().find(|(_, ds)| ds.is_empty()) {
Some((k, _)) => k.clone(),
None => bail!("circular reference in the dependencies"),
};
dependency_tree.remove(&leaf);
for (_, ds) in dependency_tree.iter_mut() {
ds.remove(&leaf);
}
sorted.push(leaf);
}
Ok(sorted)
}
#[cfg(test)]
mod tests {
use ethers_core::types as et;
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::str::FromStr;
use crate::{topo_sort, DependencyTree};
use super::Hardhat;
fn workspace_dir() -> PathBuf {
let output = std::process::Command::new(env!("CARGO"))
.arg("locate-project")
.arg("--workspace")
.arg("--message-format=plain")
.output()
.unwrap()
.stdout;
let cargo_path = Path::new(std::str::from_utf8(&output).unwrap().trim());
cargo_path.parent().unwrap().to_path_buf()
}
/// Path to the Solidity contracts, indended to be used in tests.
fn contracts_path() -> PathBuf {
let contracts_path = std::env::var("FM_CONTRACTS_DIR").unwrap_or_else(|_| {
workspace_dir()
.join("contracts/out")
.to_string_lossy()
.into_owned()
});
PathBuf::from_str(&contracts_path).expect("malformed contracts path")
}
fn test_hardhat() -> Hardhat {
Hardhat::new(contracts_path())
}
// These are all the libraries based on the `scripts/deploy-libraries.ts` in `ipc-solidity-actors`.
const IPC_DEPS: [&str; 4] = [
"AccountHelper",
"SubnetIDHelper",
"CrossMsgHelper",
"LibQuorum",
];
#[test]
fn bytecode_linking() {
let hardhat = test_hardhat();
let mut libraries = HashMap::new();
for lib in IPC_DEPS {
libraries.insert(lib.to_owned(), et::Address::default());
}
// This one requires a subset of above libraries.
let _bytecode = hardhat
.bytecode("GatewayManagerFacet.sol", "GatewayManagerFacet", &libraries)
.unwrap();
}
#[test]
fn bytecode_missing_link() {
let hardhat = test_hardhat();
// Not giving any dependency should result in a failure.
let result = hardhat.bytecode(
"SubnetActorDiamond.sol",
"SubnetActorDiamond",
&Default::default(),
);
assert!(result.is_err());
assert!(result
.unwrap_err()
.to_string()
.contains("failed to resolve library"));
}
#[test]
fn library_dependencies() {
let hardhat = test_hardhat();
let root_contracts: Vec<(String, &str)> = vec![
"GatewayDiamond",
"GatewayManagerFacet",
"CheckpointingFacet",
"TopDownFinalityFacet",
"XnetMessagingFacet",
"GatewayGetterFacet",
"GatewayMessengerFacet",
"SubnetActorGetterFacet",
"SubnetActorManagerFacet",
"SubnetActorRewardFacet",
"SubnetActorCheckpointingFacet",
"SubnetActorPauseFacet",
]
.into_iter()
.map(|c| (format!("{c}.sol"), c))
.collect();
// Name our top level contracts and gather all required libraries.
let mut lib_deps = hardhat
.dependencies(&root_contracts)
.expect("failed to compute dependencies");
// For the sake of testing, let's remove top libraries from the dependency list.
lib_deps.retain(|(_, d)| !root_contracts.iter().any(|(_, c)| c == d));
eprintln!("IPC dependencies: {lib_deps:?}");
assert_eq!(
lib_deps.len(),
IPC_DEPS.len(),
"should discover the same dependencies as expected"
);
let mut libs = HashMap::default();
for (s, c) in lib_deps {
hardhat.bytecode(&s, &c, &libs).unwrap_or_else(|e| {
panic!("failed to produce library bytecode in topo order for {c}: {e}")
});
// Pretend that we deployed it.
libs.insert(hardhat.fqn(&s, &c), et::Address::default());
}
for (src, name) in root_contracts {
hardhat
.bytecode(src, name, &libs)
.expect("failed to produce contract bytecode in topo order");
}
}
#[test]
fn topo_sorting() {
let mut tree: DependencyTree<u8> = Default::default();
for (k, ds) in [
(1, vec![]),
(2, vec![1]),
(3, vec![1, 2]),
(4, vec![3]),
(5, vec![4, 2]),
] {
tree.entry(k).or_default().extend(ds);
}
let sorted = topo_sort(tree.clone()).unwrap();
assert_eq!(sorted.len(), 5);
for (i, k) in sorted.iter().enumerate() {
for d in &tree[k] {
let j = sorted.iter().position(|x| x == d).unwrap();
assert!(j < i);
}
}
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/actor_interface/src/ethaccount.rs | fendermint/vm/actor_interface/src/ethaccount.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
define_code!(ETHACCOUNT { code_id: 16 });
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/actor_interface/src/evm.rs | fendermint/vm/actor_interface/src/evm.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use cid::Cid;
use fvm_ipld_encoding::RawBytes;
use fvm_shared::METHOD_CONSTRUCTOR;
use serde_tuple::{Deserialize_tuple, Serialize_tuple};
pub use fil_actors_evm_shared::uints;
use crate::eam::EthAddress;
define_code!(EVM { code_id: 14 });
#[repr(u64)]
pub enum Method {
Constructor = METHOD_CONSTRUCTOR,
Resurrect = 2,
GetBytecode = 3,
GetBytecodeHash = 4,
GetStorageAt = 5,
InvokeContractDelegate = 6,
// This hardcoded value is taken from https://github.com/filecoin-project/ref-fvm/blob/f4f3f340ba29b3800cd8272e34023606def23855/testing/integration/src/testkit/fevm.rs#L88-L89
// where it's used because of a ciruclar dependency (frc42_dispatch needs fvm_shared).
// Here we can use it if we want, however the release cycle is a bit lagging, preventing us from using the latest ref-fvm at the moment.
//InvokeContract = frc42_dispatch::method_hash!("InvokeEVM"),
InvokeContract = 3844450837,
}
// XXX: I don't know why the following arent' part of `fil_actors_evm_shared` :(
#[derive(Serialize_tuple, Deserialize_tuple)]
#[serde(transparent)]
pub struct BytecodeReturn {
pub code: Option<Cid>,
}
#[derive(Serialize_tuple, Deserialize_tuple)]
pub struct GetStorageAtParams {
pub storage_key: uints::U256,
}
#[derive(Serialize_tuple, Deserialize_tuple)]
#[serde(transparent)]
pub struct GetStorageAtReturn {
pub storage: uints::U256,
}
#[derive(Serialize_tuple, Deserialize_tuple)]
pub struct ConstructorParams {
/// The actor's "creator" (specified by the EAM).
pub creator: EthAddress,
/// The initcode that will construct the new EVM actor.
pub initcode: RawBytes,
}
/// Define an error type that implements [ContractRevert] and is a union
/// of multiple other such types. Intended to be used when a contract
/// calls other contracts that can also revert with known custom error
/// types, so that we can get something readable even if the error doesn't
/// directly come from the contract we call.
///
/// # Example
/// ```ignore
/// revert_errors! {
/// SubnetActorErrors {
/// SubnetActorManagerFacetErrors,
/// GatewayManagerFacetErrors
/// }
/// }
/// ```
#[macro_export]
macro_rules! revert_errors {
($typ:ident {$($elem:ident),+}) => {
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
pub enum $typ {
$($elem($elem),)+
}
impl ::ethers::core::abi::AbiDecode for $typ {
fn decode(data: impl AsRef<[u8]>) -> ::core::result::Result<Self, ::ethers::core::abi::AbiError> {
let data = data.as_ref();
$(
if let Ok(decoded) =
<$elem as ::ethers::core::abi::AbiDecode>::decode(data)
{
return Ok(Self::$elem(decoded));
}
)+
Err(::ethers::core::abi::Error::InvalidData.into())
}
}
impl ::ethers::core::abi::AbiEncode for $typ {
fn encode(self) -> ::std::vec::Vec<u8> {
match self {
$(
Self::$elem(element) => ::ethers::core::abi::AbiEncode::encode(element),
)+
}
}
}
impl ::ethers::contract::ContractRevert for $typ {
fn valid_selector(selector: [u8; 4]) -> bool {
$(
if <$elem as ::ethers::contract::ContractRevert>::valid_selector(selector) {
return true;
}
)+
false
}
}
impl ::core::fmt::Display for $typ {
fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
match self {
$(
Self::$elem(element) => ::core::fmt::Display::fmt(element, f),
)+
}
}
}
};
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/actor_interface/src/placeholder.rs | fendermint/vm/actor_interface/src/placeholder.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! Placeholders can be used for delegated address types.
//! The FVM automatically creates one if the recipient of a transaction
//! doesn't exist. Then, the executor replaces the code later based on
//! the namespace in the delegated address.
define_code!(PLACEHOLDER { code_id: 13 });
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/actor_interface/src/eam.rs | fendermint/vm/actor_interface/src/eam.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::fmt::{Debug, Display};
use cid::multihash::MultihashDigest;
use fendermint_crypto::PublicKey;
use fvm_ipld_encoding::{
strict_bytes,
tuple::{Deserialize_tuple, Serialize_tuple},
};
use fvm_shared::{
address::{Address, Error, SECP_PUB_LEN},
ActorID, METHOD_CONSTRUCTOR,
};
define_singleton!(EAM {
id: 10,
code_id: 15
});
pub const EAM_ACTOR_NAME: &str = "eam";
/// Ethereum Address Manager actor methods available.
#[repr(u64)]
pub enum Method {
Constructor = METHOD_CONSTRUCTOR,
Create = 2,
Create2 = 3,
CreateExternal = 4,
}
// TODO: We could re-export `fil_evm_actor_shared::address::EvmAddress`.
#[derive(
serde::Deserialize, serde::Serialize, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Default,
)]
pub struct EthAddress(#[serde(with = "strict_bytes")] pub [u8; 20]);
impl EthAddress {
/// Returns an EVM-form ID address from actor ID.
///
/// This is copied from the `evm` actor library.
pub fn from_id(id: u64) -> Self {
let mut bytes = [0u8; 20];
bytes[0] = 0xff;
bytes[12..].copy_from_slice(&id.to_be_bytes());
Self(bytes)
}
/// Hash the public key according to the Ethereum convention.
pub fn new_secp256k1(pubkey: &[u8]) -> Result<Self, Error> {
if pubkey.len() != SECP_PUB_LEN {
return Err(Error::InvalidSECPLength(pubkey.len()));
}
let mut hash20 = [0u8; 20];
// Based on [ethers_core::utils::secret_key_to_address]
let hash32 = cid::multihash::Code::Keccak256.digest(&pubkey[1..]);
hash20.copy_from_slice(&hash32.digest()[12..]);
Ok(Self(hash20))
}
/// Indicate whether this hash is really an actor ID.
pub fn is_masked_id(&self) -> bool {
self.0[0] == 0xff && self.0[1..].starts_with(&[0u8; 11])
}
}
impl Display for EthAddress {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Display::fmt(ðers::types::Address::from(self.0), f)
}
}
impl Debug for EthAddress {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Debug::fmt(ðers::types::Address::from(self.0), f)
}
}
impl From<EthAddress> for Address {
fn from(value: EthAddress) -> Address {
if value.is_masked_id() {
let mut bytes = [0u8; 8];
bytes.copy_from_slice(&value.0[12..]);
let id = u64::from_be_bytes(bytes);
Address::new_id(id)
} else {
Address::new_delegated(EAM_ACTOR_ID, &value.0).expect("EthAddress is delegated")
}
}
}
impl From<EthAddress> for ethers::types::Address {
fn from(value: EthAddress) -> Self {
Self(value.0)
}
}
impl From<&EthAddress> for ethers::types::Address {
fn from(value: &EthAddress) -> Self {
Self(value.0)
}
}
impl From<ethers::types::Address> for EthAddress {
fn from(value: ethers::types::Address) -> Self {
Self(value.0)
}
}
impl From<PublicKey> for EthAddress {
fn from(value: PublicKey) -> Self {
Self::new_secp256k1(&value.serialize()).expect("length is 65")
}
}
impl AsRef<[u8]> for EthAddress {
fn as_ref(&self) -> &[u8] {
&self.0
}
}
/// Helper to read return value from contract creation.
#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone)]
pub struct CreateReturn {
pub actor_id: ActorID,
pub robust_address: Option<Address>,
pub eth_address: EthAddress,
}
impl CreateReturn {
/// Delegated EAM address of the EVM actor, which can be used to invoke the contract.
pub fn delegated_address(&self) -> Address {
Address::new_delegated(EAM_ACTOR_ID, &self.eth_address.0).expect("ETH address should work")
}
}
#[cfg(test)]
mod tests {
use ethers_core::k256::ecdsa::SigningKey;
use fendermint_crypto::SecretKey;
use quickcheck_macros::quickcheck;
use rand::rngs::StdRng;
use rand::SeedableRng;
use super::EthAddress;
#[quickcheck]
fn prop_new_secp256k1(seed: u64) -> bool {
let mut rng = StdRng::seed_from_u64(seed);
let sk = SecretKey::random(&mut rng);
let signing_key = SigningKey::from_slice(sk.serialize().as_ref()).unwrap();
let address = ethers_core::utils::secret_key_to_address(&signing_key);
let eth_address = EthAddress::from(sk.public_key());
address.0 == eth_address.0
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/actor_interface/src/diamond.rs | fendermint/vm/actor_interface/src/diamond.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! Helper data structures to declare diamond pattern contracts.
// See https://medium.com/@MarqyMarq/how-to-implement-the-diamond-standard-69e87dae44e6
use std::collections::HashMap;
use ethers::abi::Abi;
use fvm_shared::ActorID;
#[derive(Clone, Debug)]
pub struct EthFacet {
pub name: &'static str,
pub abi: Abi,
}
/// Top level Ethereum contract with a pre-determined ID.
#[derive(Clone, Debug)]
pub struct EthContract {
/// Pre-determined ID for the contract.
///
/// 0 means the contract will get a dynamic ID.
pub actor_id: ActorID,
pub abi: Abi,
/// List of facets if the contract is using the diamond pattern.
pub facets: Vec<EthFacet>,
}
pub type EthContractMap = HashMap<&'static str, EthContract>;
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/actor_interface/src/lib.rs | fendermint/vm/actor_interface/src/lib.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! The modules in this crate a thin interfaces to builtin-actors,
//! so that the rest of the system doesn't have to copy-paste things
//! such as actor IDs, method numbers, method parameter data types.
//!
//! This is similar to how the FVM library contains copies for actors
//! it assumes to be deployed, like the init-actor. There, it's to avoid
//! circular project dependencies. Here, we have the option to reference
//! the actor projects directly and re-export what we need, or to copy
//! the relevant pieces of code. By limiting this choice to this crate,
//! the rest of the application can avoid ad-hoc magic numbers.
//!
//! The actor IDs can be found in [singletons](https://github.com/filecoin-project/builtin-actors/blob/master/runtime/src/builtin/singletons.rs),
//! while the code IDs are in [builtins](https://github.com/filecoin-project/builtin-actors/blob/master/runtime/src/runtime/builtins.rs)
/// Something we can use for empty state, similar to how the FVM uses `EMPTY_ARR_CID`.
pub const EMPTY_ARR: [(); 0] = [(); 0]; // Based on how it's done in `Tester`.
macro_rules! define_code {
($name:ident { code_id: $code_id:literal }) => {
paste::paste! {
/// Position of the actor in the builtin actor bundle manifest.
pub const [<$name _ACTOR_CODE_ID>]: u32 = $code_id;
}
};
}
macro_rules! define_id {
($name:ident { id: $id:literal }) => {
paste::paste! {
pub const [<$name _ACTOR_ID>]: fvm_shared::ActorID = $id;
pub const [<$name _ACTOR_ADDR>]: fvm_shared::address::Address = fvm_shared::address::Address::new_id([<$name _ACTOR_ID>]);
}
};
}
macro_rules! define_singleton {
($name:ident { id: $id:literal, code_id: $code_id:literal }) => {
define_id!($name { id: $id });
define_code!($name { code_id: $code_id });
};
}
pub mod account;
pub mod burntfunds;
pub mod cetf;
pub mod chainmetadata;
pub mod cron;
pub mod diamond;
pub mod eam;
pub mod ethaccount;
pub mod evm;
pub mod init;
pub mod ipc;
pub mod multisig;
pub mod placeholder;
pub mod reward;
pub mod system;
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/actor_interface/src/cron.rs | fendermint/vm/actor_interface/src/cron.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use fvm_ipld_encoding::tuple::*;
use fvm_shared::address::Address;
use fvm_shared::MethodNum;
use fvm_shared::METHOD_CONSTRUCTOR;
define_singleton!(CRON { id: 3, code_id: 3 });
/// Cron actor methods available.
#[repr(u64)]
pub enum Method {
Constructor = METHOD_CONSTRUCTOR,
EpochTick = 2,
}
/// Cron actor state which holds entries to call during epoch tick
#[derive(Default, Serialize_tuple, Deserialize_tuple, Clone, Debug)]
pub struct State {
/// Entries is a set of actors (and corresponding methods) to call during EpochTick.
pub entries: Vec<Entry>,
}
#[derive(Clone, PartialEq, Eq, Debug, Serialize_tuple, Deserialize_tuple)]
pub struct Entry {
/// The actor to call (ID address)
pub receiver: Address,
/// The method number to call (must accept empty parameters)
pub method_num: MethodNum,
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/actor_interface/src/chainmetadata.rs | fendermint/vm/actor_interface/src/chainmetadata.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
define_id!(CHAINMETADATA { id: 48 });
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/actor_interface/src/cetf.rs | fendermint/vm/actor_interface/src/cetf.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
define_id!(CETFSYSCALL { id: 49 });
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/actor_interface/src/system.rs | fendermint/vm/actor_interface/src/system.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use cid::Cid;
use fvm_ipld_encoding::tuple::*;
use fvm_shared::address::Address;
use lazy_static::lazy_static;
use crate::eam::EthAddress;
define_singleton!(SYSTEM { id: 0, code_id: 1 });
lazy_static! {
/// The Ethereum null-address 0x00..00 can also be used to identify the system actor.
pub static ref SYSTEM_ACTOR_ETH_ADDR: Address = EthAddress::default().into();
}
/// Check whether the address is one of those identifying the system actor.
pub fn is_system_addr(addr: &Address) -> bool {
*addr == SYSTEM_ACTOR_ADDR || *addr == *SYSTEM_ACTOR_ETH_ADDR
}
/// System actor state.
#[derive(Default, Deserialize_tuple, Serialize_tuple, Debug, Clone)]
pub struct State {
// builtin actor registry: Vec<(String, Cid)>
pub builtin_actors: Cid,
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/actor_interface/src/init.rs | fendermint/vm/actor_interface/src/init.rs | use std::collections::{BTreeMap, BTreeSet};
// Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use anyhow::Context;
use cid::multihash::MultihashDigest;
use cid::Cid;
use fendermint_vm_genesis::{Actor, ActorMeta};
use fvm_ipld_blockstore::Blockstore;
use fvm_ipld_encoding::tuple::*;
use fvm_ipld_hamt::Hamt;
use fvm_shared::{address::Address, ActorID, HAMT_BIT_WIDTH};
use crate::{eam::EthAddress, system};
/// Defines first available ID address after builtin actors
pub const FIRST_NON_SINGLETON_ADDR: ActorID = 100;
define_singleton!(INIT { id: 1, code_id: 2 });
pub type AddressMap = BTreeMap<Address, ActorID>;
/// Delegated address of an Ethereum built-in actor.
///
/// This is based on what seems to be going on in the `CREATE_EXTERNAL` method
/// of the EAM actor when it determines a robust address for an account ID,
/// in that we take something known (a public key, or in this case the ID),
/// hash it and truncate the results to 20 bytes.
///
/// But it's not a general rule of turning actor IDs into Ethereum addresses!
/// It's just something we do to assign an address that looks like an Ethereum one.
pub fn builtin_actor_eth_addr(id: ActorID) -> EthAddress {
// The EVM actor would reject a delegated address that looks like an ID address, so let's hash it.
// Based on `hash20` in the EAM actor:
// https://github.com/filecoin-project/builtin-actors/blob/v11.0.0/actors/eam/src/lib.rs#L213-L216
let eth_addr = EthAddress::from_id(id);
let eth_addr = cid::multihash::Code::Keccak256.digest(ð_addr.0);
let eth_addr: [u8; 20] = eth_addr.digest()[12..32].try_into().unwrap();
EthAddress(eth_addr)
}
#[derive(Serialize_tuple, Deserialize_tuple, Clone, Debug)]
pub struct State {
pub address_map: Cid,
pub next_id: ActorID,
pub network_name: String,
#[cfg(feature = "m2-native")]
pub installed_actors: Cid,
}
// TODO: Not happy about having to copy this. Maybe we should use project references after all.
impl State {
/// Create new state instance.
pub fn new<BS: Blockstore>(
store: &BS,
network_name: String,
// Accounts from the Genesis file.
accounts: &[Actor],
// Pre-defined IDs for top-level EVM contracts.
eth_builtin_ids: &BTreeSet<ActorID>,
// Number of dynamically deployed EVM library contracts.
eth_library_count: u64,
) -> anyhow::Result<(Self, AddressMap)> {
// Returning only the addreses that belong to user accounts.
let mut allocated_ids = AddressMap::new();
// Inserting both user accounts and built-in EVM actors.
let mut address_map = Hamt::<&BS, ActorID>::new_with_bit_width(store, HAMT_BIT_WIDTH);
let mut set_address = |addr: Address, id: ActorID| {
tracing::debug!(
addr = addr.to_string(),
actor_id = id,
"setting init address"
);
address_map.set(addr.to_bytes().into(), id)
};
let addresses = accounts.iter().flat_map(|a| match &a.meta {
ActorMeta::Account(acc) => {
vec![acc.owner.0]
}
ActorMeta::Multisig(ms) => ms.signers.iter().map(|a| a.0).collect(),
});
let mut next_id = FIRST_NON_SINGLETON_ADDR;
for addr in addresses {
if allocated_ids.contains_key(&addr) {
continue;
}
allocated_ids.insert(addr, next_id);
set_address(addr, next_id).context("cannot set ID of account address")?;
next_id += 1;
}
// We will need to allocate an ID for each multisig account, however,
// these do not have to be recorded in the map, because their addr->ID
// mapping is trivial (it's an ID type address). To avoid the init actor
// using the same ID for something else, give it a higher ID to use next.
for a in accounts.iter() {
if let ActorMeta::Multisig { .. } = a.meta {
next_id += 1;
}
}
// Insert top-level EVM contracts which have fixed IDs.
for id in eth_builtin_ids {
let addr = Address::from(builtin_actor_eth_addr(*id));
set_address(addr, *id).context("cannot set ID of eth contract address")?;
}
// Insert dynamic EVM library contracts.
for _ in 0..eth_library_count {
let addr = Address::from(builtin_actor_eth_addr(next_id));
set_address(addr, next_id).context("cannot set ID of eth library address")?;
next_id += 1;
}
// Insert the null-Ethereum address to equal the system actor,
// so the system actor can be identified by 0xff00..00 as well as 0x00..00
set_address(*system::SYSTEM_ACTOR_ETH_ADDR, system::SYSTEM_ACTOR_ID)
.context("cannot set ID of system eth address")?;
#[cfg(feature = "m2-native")]
let installed_actors = store.put_cbor(&Vec::<Cid>::new(), Code::Blake2b256)?;
let state = Self {
address_map: address_map.flush()?,
next_id,
network_name,
#[cfg(feature = "m2-native")]
installed_actors,
};
tracing::debug!(?state, "init actor state");
Ok((state, allocated_ids))
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/actor_interface/src/burntfunds.rs | fendermint/vm/actor_interface/src/burntfunds.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
// The burnt funds actor is just an Account actor.
define_id!(BURNT_FUNDS { id: 99 });
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/actor_interface/src/reward.rs | fendermint/vm/actor_interface/src/reward.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
// The reward actor is a singleton, but for now let's just use a
// simple account, instead of the one in the built-in actors library,
// because that has too many Filecoin mainnet specific things.
define_id!(REWARD { id: 2 });
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/actor_interface/src/multisig.rs | fendermint/vm/actor_interface/src/multisig.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use cid::Cid;
use fvm_ipld_blockstore::Blockstore;
use fvm_ipld_encoding::tuple::*;
use fvm_ipld_hamt::Hamt;
use fvm_shared::{address::Address, clock::ChainEpoch, econ::TokenAmount, ActorID, HAMT_BIT_WIDTH};
use serde::{Deserialize, Serialize};
define_code!(MULTISIG { code_id: 9 });
/// Transaction ID type
#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize, Hash, Eq, PartialEq, PartialOrd)]
#[serde(transparent)]
pub struct TxnID(pub i64);
/// Multisig actor state
#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone)]
pub struct State {
pub signers: Vec<Address>,
pub num_approvals_threshold: u64,
pub next_tx_id: TxnID,
// Linear unlock
pub initial_balance: TokenAmount,
pub start_epoch: ChainEpoch,
pub unlock_duration: ChainEpoch,
pub pending_txs: Cid,
}
impl State {
pub fn new<BS: Blockstore>(
store: &BS,
signers: Vec<ActorID>,
threshold: u64,
start: ChainEpoch,
duration: ChainEpoch,
balance: TokenAmount,
) -> anyhow::Result<Self> {
let empty_map_cid = Hamt::<_, ()>::new_with_bit_width(store, HAMT_BIT_WIDTH).flush()?;
let state = Self {
signers: signers.into_iter().map(Address::new_id).collect(),
num_approvals_threshold: threshold,
next_tx_id: Default::default(),
initial_balance: balance,
start_epoch: start,
unlock_duration: duration,
pending_txs: empty_map_cid,
};
Ok(state)
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/actor_interface/src/ipc.rs | fendermint/vm/actor_interface/src/ipc.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
// The IPC actors have bindings in `ipc_actors_abis`.
// Here we define stable IDs for them, so we can deploy the
// Solidity contracts during genesis.
use anyhow::Context;
use ethers::core::abi::Tokenize;
use ethers::core::types as et;
use ethers::core::utils::keccak256;
use fendermint_vm_genesis::{Power, Validator};
use fvm_shared::address::Error as AddressError;
use fvm_shared::address::Payload;
use ipc_actors_abis as ia;
pub use ipc_actors_abis::checkpointing_facet::BottomUpCheckpoint;
use ipc_api::subnet_id::SubnetID;
use lazy_static::lazy_static;
use merkle_tree_rs::{
core::{process_proof, Hash},
format::Raw,
standard::{standard_leaf_hash, LeafType, StandardMerkleTree},
};
use crate::{
diamond::{EthContract, EthContractMap, EthFacet},
eam::{EthAddress, EAM_ACTOR_ID},
};
define_id!(GATEWAY { id: 64 });
define_id!(SUBNETREGISTRY { id: 65 });
lazy_static! {
/// Contracts deployed at genesis with well-known IDs.
pub static ref IPC_CONTRACTS: EthContractMap = {
[
(
gateway::CONTRACT_NAME,
EthContract {
actor_id: GATEWAY_ACTOR_ID,
abi: ia::gateway_diamond::GATEWAYDIAMOND_ABI.to_owned(),
facets: vec![
EthFacet {
name: "GatewayGetterFacet",
abi: ia::gateway_getter_facet::GATEWAYGETTERFACET_ABI.to_owned(),
},
EthFacet {
name: "GatewayManagerFacet",
abi: ia::gateway_manager_facet::GATEWAYMANAGERFACET_ABI.to_owned(),
},
EthFacet {
name: "TopDownFinalityFacet",
abi: ia::top_down_finality_facet::TOPDOWNFINALITYFACET_ABI.to_owned(),
},
EthFacet {
name: "CheckpointingFacet",
abi: ia::checkpointing_facet::CHECKPOINTINGFACET_ABI.to_owned(),
},
EthFacet {
name: "GatewayMessengerFacet",
abi: ia::gateway_messenger_facet::GATEWAYMESSENGERFACET_ABI.to_owned(),
},
EthFacet {
name: "XnetMessagingFacet",
abi: ia::xnet_messaging_facet::XNETMESSAGINGFACET_ABI.to_owned(),
},
EthFacet {
name: "DiamondLoupeFacet",
abi: ia::diamond_loupe_facet::DIAMONDLOUPEFACET_ABI.to_owned(),
},
EthFacet {
name: "DiamondCutFacet",
abi: ia::diamond_cut_facet::DIAMONDCUTFACET_ABI.to_owned(),
},
EthFacet {
name: "OwnershipFacet",
abi: ia::ownership_facet::OWNERSHIPFACET_ABI.to_owned(),
},
],
},
),
(
registry::CONTRACT_NAME,
EthContract {
actor_id: SUBNETREGISTRY_ACTOR_ID,
abi: ia::subnet_registry_diamond::SUBNETREGISTRYDIAMOND_ABI.to_owned(),
facets: vec![
// The registry incorporates the SubnetActor facets, although these aren't expected differently in the constructor.
EthFacet {
name: "SubnetActorGetterFacet",
abi: ia::subnet_actor_getter_facet::SUBNETACTORGETTERFACET_ABI.to_owned(),
},
EthFacet {
name: "SubnetActorManagerFacet",
abi: ia::subnet_actor_manager_facet::SUBNETACTORMANAGERFACET_ABI.to_owned(),
},
EthFacet {
name: "SubnetActorRewardFacet",
abi: ia::subnet_actor_reward_facet::SUBNETACTORREWARDFACET_ABI.to_owned(),
},
EthFacet {
name: "SubnetActorCheckpointingFacet",
abi: ia::subnet_actor_checkpointing_facet::SUBNETACTORCHECKPOINTINGFACET_ABI.to_owned(),
},
EthFacet {
name: "SubnetActorPauseFacet",
abi: ia::subnet_actor_pause_facet::SUBNETACTORPAUSEFACET_ABI.to_owned(),
},
EthFacet {
name: "DiamondLoupeFacet",
abi: ia::diamond_loupe_facet::DIAMONDLOUPEFACET_ABI.to_owned(),
},
EthFacet {
name: "DiamondCutFacet",
abi: ia::diamond_cut_facet::DIAMONDCUTFACET_ABI.to_owned(),
},
EthFacet {
name: "OwnershipFacet",
abi: ia::ownership_facet::OWNERSHIPFACET_ABI.to_owned(),
},
// The registry has its own facets:
// https://github.com/consensus-shipyard/ipc-solidity-actors/blob/b01a2dffe367745f55111a65536a3f6fea9165f5/scripts/deploy-registry.template.ts#L58-L67
EthFacet {
name: "RegisterSubnetFacet",
abi: ia::register_subnet_facet::REGISTERSUBNETFACET_ABI
.to_owned(),
},
EthFacet {
name: "SubnetGetterFacet",
abi: ia::subnet_getter_facet::SUBNETGETTERFACET_ABI.to_owned(),
},
],
},
),
]
.into_iter()
.collect()
};
/// Contracts that need to be deployed afresh for each subnet.
///
/// See [deploy-sa-diamond.ts](https://github.com/consensus-shipyard/ipc-solidity-actors/blob/dev/scripts/deploy-sa-diamond.ts)
///
/// But it turns out that the [SubnetRegistry](https://github.com/consensus-shipyard/ipc-solidity-actors/blob/3b0f3528b79e53e3c90f15016a40892122938ef0/src/SubnetRegistry.sol#L67)
/// actor has this `SubnetActorDiamond` and its facets baked into it, and able to deploy without further ado.
pub static ref SUBNET_CONTRACTS: EthContractMap = {
[
(
subnet::CONTRACT_NAME,
EthContract {
actor_id: 0,
abi: ia::subnet_actor_diamond::SUBNETACTORDIAMOND_ABI.to_owned(),
facets: vec![
EthFacet {
name: "SubnetActorGetterFacet",
abi: ia::subnet_actor_getter_facet::SUBNETACTORGETTERFACET_ABI.to_owned(),
},
EthFacet {
name: "SubnetActorManagerFacet",
abi: ia::subnet_actor_manager_facet::SUBNETACTORMANAGERFACET_ABI.to_owned(),
},
EthFacet {
name: "SubnetActorRewardFacet",
abi: ia::subnet_actor_reward_facet::SUBNETACTORREWARDFACET_ABI.to_owned(),
},
EthFacet {
name: "SubnetActorCheckpointingFacet",
abi: ia::subnet_actor_checkpointing_facet::SUBNETACTORCHECKPOINTINGFACET_ABI.to_owned(),
},
EthFacet {
name: "SubnetActorPauseFacet",
abi: ia::subnet_actor_pause_facet::SUBNETACTORPAUSEFACET_ABI.to_owned(),
},
EthFacet {
name: "DiamondLoupeFacet",
abi: ia::diamond_loupe_facet::DIAMONDLOUPEFACET_ABI.to_owned(),
},
EthFacet {
name: "DiamondCutFacet",
abi: ia::diamond_cut_facet::DIAMONDCUTFACET_ABI.to_owned(),
},
EthFacet {
name: "OwnershipFacet",
abi: ia::ownership_facet::OWNERSHIPFACET_ABI.to_owned(),
},
],
},
),
]
.into_iter()
.collect()
};
/// ABI types of the Merkle tree which contains validator addresses and their voting power.
pub static ref VALIDATOR_TREE_FIELDS: Vec<String> =
vec!["address".to_owned(), "uint256".to_owned()];
}
/// Construct a Merkle tree from the power table in a format which can be validated by
/// https://github.com/OpenZeppelin/openzeppelin-contracts/blob/master/contracts/utils/cryptography/MerkleProof.sol
///
/// The reference implementation is https://github.com/OpenZeppelin/merkle-tree/
pub struct ValidatorMerkleTree {
tree: StandardMerkleTree<Raw>,
}
impl ValidatorMerkleTree {
pub fn new(validators: &[Validator<Power>]) -> anyhow::Result<Self> {
// Using the 20 byte address for keys because that's what the Solidity library returns
// when recovering a public key from a signature.
let values = validators
.iter()
.map(Self::validator_to_vec)
.collect::<anyhow::Result<Vec<_>>>()?;
let tree = StandardMerkleTree::of(&values, &VALIDATOR_TREE_FIELDS)
.context("failed to construct Merkle tree")?;
Ok(Self { tree })
}
pub fn root_hash(&self) -> Hash {
self.tree.root()
}
/// Create a Merkle proof for a validator.
pub fn prove(&self, validator: &Validator<Power>) -> anyhow::Result<Vec<Hash>> {
let v = Self::validator_to_vec(validator)?;
let proof = self
.tree
.get_proof(LeafType::LeafBytes(v))
.context("failed to produce Merkle proof")?;
Ok(proof)
}
/// Validate a proof against a known root hash.
pub fn validate(
validator: &Validator<Power>,
root: &Hash,
proof: &[Hash],
) -> anyhow::Result<bool> {
let v = Self::validator_to_vec(validator)?;
let h = standard_leaf_hash(v, &VALIDATOR_TREE_FIELDS)?;
let r = process_proof(&h, proof).context("failed to process Merkle proof")?;
Ok(*root == r)
}
/// Convert a validator to what we can pass to the tree.
fn validator_to_vec(validator: &Validator<Power>) -> anyhow::Result<Vec<String>> {
let addr = EthAddress::from(validator.public_key.0);
let addr = et::Address::from_slice(&addr.0);
let addr = format!("{addr:?}");
let power = et::U256::from(validator.power.0);
let power = power.to_string();
Ok(vec![addr, power])
}
}
/// Decompose a subnet ID into a root ID and a route of Ethereum addresses
pub fn subnet_id_to_eth(subnet_id: &SubnetID) -> Result<(u64, Vec<et::Address>), AddressError> {
// Every step along the way in the subnet ID we have an Ethereum address.
let mut route = Vec::new();
for addr in subnet_id.children() {
let addr = match addr.payload() {
Payload::ID(id) => EthAddress::from_id(*id),
Payload::Delegated(da)
if da.namespace() == EAM_ACTOR_ID && da.subaddress().len() == 20 =>
{
EthAddress(da.subaddress().try_into().expect("checked length"))
}
_ => return Err(AddressError::InvalidPayload),
};
route.push(et::H160::from(addr.0))
}
Ok((subnet_id.root_id(), route))
}
/// Hash some value in the same way we'd hash it in Solidity.
///
/// Be careful that if we have to hash a single struct,
/// Solidity's `abi.encode` function will treat it as a tuple,
/// so it has to be passed as a tuple in Rust. Vectors are fine.
pub fn abi_hash<T: Tokenize>(value: T) -> [u8; 32] {
keccak256(ethers::abi::encode(&value.into_tokens()))
}
/// Types where we need to match the way we sign them in Solidity and Rust.
pub trait AbiHash {
/// Hash the item the way we would in Solidity.
fn abi_hash(self) -> [u8; 32];
}
macro_rules! abi_hash {
(struct $name:ty) => {
// Structs have to be hashed as a tuple.
impl AbiHash for $name {
fn abi_hash(self) -> [u8; 32] {
abi_hash((self,))
}
}
};
(Vec < $name:ty >) => {
// Vectors can be hashed as-is
impl AbiHash for Vec<$name> {
fn abi_hash(self) -> [u8; 32] {
abi_hash(self)
}
}
};
}
abi_hash!(struct ipc_actors_abis::checkpointing_facet::BottomUpCheckpoint);
abi_hash!(struct ipc_actors_abis::subnet_actor_checkpointing_facet::BottomUpCheckpoint);
abi_hash!(Vec<ipc_actors_abis::gateway_getter_facet::IpcEnvelope>);
abi_hash!(Vec<ipc_actors_abis::subnet_actor_checkpointing_facet::IpcEnvelope>);
abi_hash!(Vec<ipc_actors_abis::subnet_actor_getter_facet::IpcEnvelope>);
pub mod gateway {
use super::subnet_id_to_eth;
use ethers::contract::{EthAbiCodec, EthAbiType};
use ethers::core::types::{Bytes, H160, U256};
use fendermint_vm_genesis::ipc::GatewayParams;
use fendermint_vm_genesis::{Collateral, Validator};
use fvm_shared::address::Error as AddressError;
use fvm_shared::econ::TokenAmount;
use ipc_actors_abis::gateway_diamond::SubnetID as GatewaySubnetID;
pub use ipc_actors_abis::gateway_getter_facet::Validator as GatewayValidator;
use crate::eam::EthAddress;
pub const CONTRACT_NAME: &str = "GatewayDiamond";
pub const METHOD_INVOKE_CONTRACT: u64 = crate::evm::Method::InvokeContract as u64;
// Constructor parameters aren't generated as part of the Rust bindings.
// TODO: Remove these once https://github.com/gakonst/ethers-rs/pull/2631 is merged.
/// Container type `ConstructorParameters`.
///
/// See [GatewayDiamond.sol](https://github.com/consensus-shipyard/ipc/blob/bc3512fc7c4b0dfcdaac89f297f99cafae68f097/contracts/src/GatewayDiamond.sol#L28-L36)
#[derive(Clone, EthAbiType, EthAbiCodec, Default, Debug, PartialEq, Eq, Hash)]
pub struct ConstructorParameters {
pub bottom_up_check_period: U256,
pub active_validators_limit: u16,
pub majority_percentage: u8,
pub network_name: GatewaySubnetID,
pub validators: Vec<GatewayValidator>,
}
impl ConstructorParameters {
pub fn new(
params: GatewayParams,
validators: Vec<Validator<Collateral>>,
) -> anyhow::Result<Self> {
// Every validator has an Ethereum address.
let validators = validators
.into_iter()
.map(|v| {
let pk = v.public_key.0.serialize();
let addr = EthAddress::new_secp256k1(&pk)?;
let collateral = tokens_to_u256(v.power.0);
Ok(GatewayValidator {
addr: H160::from(addr.0),
weight: collateral,
metadata: Bytes::from(pk),
})
})
.collect::<Result<Vec<_>, AddressError>>()?;
let (root, route) = subnet_id_to_eth(¶ms.subnet_id)?;
Ok(Self {
bottom_up_check_period: U256::from(params.bottom_up_check_period),
active_validators_limit: params.active_validators_limit,
majority_percentage: params.majority_percentage,
network_name: GatewaySubnetID { root, route },
validators,
})
}
}
fn tokens_to_u256(value: TokenAmount) -> U256 {
// XXX: Ignoring any error resulting from larger fee than what fits into U256. This is in genesis after all.
U256::from_big_endian(&value.atto().to_bytes_be().1)
}
#[cfg(test)]
mod tests {
use ethers::core::types::{Selector, U256};
use ethers_core::{
abi::Tokenize,
types::{Bytes, H160},
};
use fvm_shared::{bigint::BigInt, econ::TokenAmount};
use ipc_actors_abis::gateway_diamond::SubnetID as GatewaySubnetID;
use ipc_actors_abis::gateway_getter_facet::Validator as GatewayValidator;
use std::str::FromStr;
use crate::ipc::tests::{check_param_types, constructor_param_types};
use super::{tokens_to_u256, ConstructorParameters};
#[test]
fn tokenize_constructor_params() {
let cp = ConstructorParameters {
network_name: GatewaySubnetID {
root: 0,
route: Vec::new(),
},
bottom_up_check_period: U256::from(100),
majority_percentage: 67,
validators: vec![GatewayValidator {
addr: H160::zero(),
weight: U256::zero(),
metadata: Bytes::new(),
}],
active_validators_limit: 100,
};
// It looks like if we pass just the record then it will be passed as 5 tokens,
// but the constructor only expects one parameter, and it has to be a tuple.
let cp = (Vec::<Selector>::new(), cp);
let tokens = cp.into_tokens();
let cons = ipc_actors_abis::gateway_diamond::GATEWAYDIAMOND_ABI
.constructor()
.expect("Gateway has a constructor");
let param_types = constructor_param_types(cons);
check_param_types(&tokens, ¶m_types).unwrap();
cons.encode_input(vec![], &tokens)
.expect("should encode constructor input");
}
#[test]
#[should_panic]
fn max_fee_exceeded() {
let mut value = BigInt::from_str(&U256::MAX.to_string()).unwrap();
value += 1;
let value = TokenAmount::from_atto(value);
let _ = tokens_to_u256(value);
}
}
}
pub mod registry {
use ethers::contract::{EthAbiCodec, EthAbiType};
use ethers::core::types::Address;
type FunctionSelector = [u8; 4];
pub const CONTRACT_NAME: &str = "SubnetRegistryDiamond";
/// Container type `ConstructorParameters`.
///
/// See [SubnetRegistry.sol](https://github.com/consensus-shipyard/ipc/blob/62f0d64fea993196cd3f148498c25a108b0069c8/contracts/src/SubnetRegistryDiamond.sol#L16-L28)
#[derive(Clone, EthAbiType, EthAbiCodec, Default, Debug, PartialEq, Eq, Hash)]
pub struct ConstructorParameters {
pub gateway: Address,
pub getter_facet: Address,
pub manager_facet: Address,
pub rewarder_facet: Address,
pub pauser_facet: Address,
pub checkpointer_facet: Address,
pub diamond_cut_facet: Address,
pub diamond_loupe_facet: Address,
pub ownership_facet: Address,
pub subnet_getter_selectors: Vec<FunctionSelector>,
pub subnet_manager_selectors: Vec<FunctionSelector>,
pub subnet_rewarder_selectors: Vec<FunctionSelector>,
pub subnet_pauser_selectors: Vec<FunctionSelector>,
pub subnet_checkpointer_selectors: Vec<FunctionSelector>,
pub subnet_actor_diamond_cut_selectors: Vec<FunctionSelector>,
pub subnet_actor_diamond_loupe_selectors: Vec<FunctionSelector>,
pub subnet_actor_ownership_selectors: Vec<FunctionSelector>,
pub creation_privileges: u8, // 0 = Unrestricted, 1 = Owner.
}
}
pub mod subnet {
use crate::revert_errors;
use ipc_actors_abis::checkpointing_facet::CheckpointingFacetErrors;
use ipc_actors_abis::gateway_manager_facet::GatewayManagerFacetErrors;
use ipc_actors_abis::subnet_actor_checkpointing_facet::SubnetActorCheckpointingFacetErrors;
use ipc_actors_abis::subnet_actor_manager_facet::SubnetActorManagerFacetErrors;
use ipc_actors_abis::subnet_actor_pause_facet::SubnetActorPauseFacetErrors;
use ipc_actors_abis::subnet_actor_reward_facet::SubnetActorRewardFacetErrors;
use ipc_actors_abis::top_down_finality_facet::TopDownFinalityFacetErrors;
pub const CONTRACT_NAME: &str = "SubnetActorDiamond";
// The subnet actor has its own errors, but it also invokes the gateway, which might revert for its own reasons.
revert_errors! {
SubnetActorErrors {
SubnetActorManagerFacetErrors,
SubnetActorRewardFacetErrors,
SubnetActorPauseFacetErrors,
SubnetActorCheckpointingFacetErrors,
GatewayManagerFacetErrors,
CheckpointingFacetErrors,
TopDownFinalityFacetErrors
}
}
#[cfg(test)]
mod tests {
use ethers::abi::{AbiType, Tokenize};
use ethers::core::types::Bytes;
use ipc_actors_abis::subnet_actor_checkpointing_facet::{BottomUpCheckpoint, SubnetID};
#[test]
fn checkpoint_abi() {
// Some random checkpoint printed in a test that failed because the Rust ABI was different then the Solidity ABI.
let checkpoint = BottomUpCheckpoint {
subnet_id: SubnetID {
root: 12378393254986206693,
route: vec![
"0x7b11cf9ca8ccee13bb3d003c97af5c18434067a9",
"0x3d9019b8bf3bfd5e979ddc3b2761be54af867c47",
]
.into_iter()
.map(|h| h.parse().unwrap())
.collect(),
},
block_height: ethers::types::U256::from(21),
block_hash: [
107, 115, 111, 52, 42, 179, 77, 154, 254, 66, 52, 169, 43, 219, 25, 12, 53,
178, 232, 216, 34, 217, 96, 27, 0, 185, 215, 8, 155, 25, 15, 1,
],
next_configuration_number: 1,
msgs: vec![],
};
let param_type = BottomUpCheckpoint::param_type();
// Captured value of `abi.encode` in Solidity.
let expected_abi: Bytes = "0x000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000156b736f342ab34d9afe4234a92bdb190c35b2e8d822d9601b00b9d7089b190f0100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000abc8e314f58b4de5000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000020000000000000000000000007b11cf9ca8ccee13bb3d003c97af5c18434067a90000000000000000000000003d9019b8bf3bfd5e979ddc3b2761be54af867c470000000000000000000000000000000000000000000000000000000000000000".parse().unwrap();
// XXX: It doesn't work with `decode_whole`.
let expected_tokens =
ethers::abi::decode(&[param_type], &expected_abi).expect("invalid Solidity ABI");
// The data needs to be wrapped into a tuple.
let observed_tokens = (checkpoint,).into_tokens();
let observed_abi: Bytes = ethers::abi::encode(&observed_tokens).into();
assert_eq!(observed_tokens, expected_tokens);
assert_eq!(observed_abi, expected_abi);
}
}
}
#[cfg(test)]
mod tests {
use anyhow::bail;
use ethers_core::abi::{Constructor, ParamType, Token};
use fendermint_vm_genesis::{Power, Validator};
use quickcheck_macros::quickcheck;
use super::ValidatorMerkleTree;
/// Check all tokens against expected parameters; return any offending one.
///
/// Based on [Tokens::types_check]
pub fn check_param_types(tokens: &[Token], param_types: &[ParamType]) -> anyhow::Result<()> {
if param_types.len() != tokens.len() {
bail!(
"different number of parameters; expected {}, got {}",
param_types.len(),
tokens.len()
);
}
for (i, (pt, t)) in param_types.iter().zip(tokens).enumerate() {
if !t.type_check(pt) {
bail!("parameter {i} didn't type check: expected {pt:?}, got {t:?}");
}
}
Ok(())
}
/// Returns all input params of given constructor.
///
/// Based on [Constructor::param_types]
pub fn constructor_param_types(cons: &Constructor) -> Vec<ParamType> {
cons.inputs.iter().map(|p| p.kind.clone()).collect()
}
#[quickcheck]
fn merkleize_validators(validators: Vec<Validator<Power>>) {
if validators.is_empty() {
return;
}
let tree = ValidatorMerkleTree::new(&validators).expect("failed to create tree");
let root = tree.root_hash();
let validator = validators.first().unwrap();
let proof = tree.prove(validator).expect("failed to prove");
assert!(ValidatorMerkleTree::validate(validator, &root, &proof).expect("failed to validate"))
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/actor_interface/src/account.rs | fendermint/vm/actor_interface/src/account.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use fvm_ipld_encoding::tuple::*;
use fvm_shared::address::Address;
define_code!(ACCOUNT { code_id: 4 });
/// State includes the address for the actor
#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone)]
pub struct State {
pub address: Address,
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/encoding/src/lib.rs | fendermint/vm/encoding/src/lib.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use fvm_shared::address::Address;
use fvm_shared::bigint::BigInt;
use fvm_shared::econ::TokenAmount;
use ipc_api::subnet_id::SubnetID;
use num_traits::Num;
use serde::de::{DeserializeOwned, Error};
use serde::{de, Deserialize, Serialize, Serializer};
use serde_with::{DeserializeAs, SerializeAs};
use std::any::type_name;
use std::fmt::Display;
use std::str::FromStr;
use cid::Cid;
/// Serializer which can be used together with the [`serde_with`] crate to annotate
/// fields that we want to appear as strings in human readable formats like JSON,
/// and leave as their default serialization formats otherwise (ie. bytes, which
/// would appear as array of numbers in JSON).
///
/// # Example
///
/// ```ignore
/// #[serde_as(as = "Option<IsHumanReadable>")]
/// pub delegated_address: Option<Address>,
/// ```
pub struct IsHumanReadable;
pub fn serialize_str<T, S>(source: &T, serializer: S) -> Result<S::Ok, S::Error>
where
T: ToString + Serialize,
S: Serializer,
{
{
if serializer.is_human_readable() {
source.to_string().serialize(serializer)
} else {
source.serialize(serializer)
}
}
}
pub fn deserialize_str<'de, T, D>(deserializer: D) -> Result<T, D::Error>
where
T: FromStr + DeserializeOwned,
<T as FromStr>::Err: Display,
D: de::Deserializer<'de>,
{
{
if deserializer.is_human_readable() {
let s = String::deserialize(deserializer)?;
match T::from_str(&s) {
Ok(a) => Ok(a),
Err(e) => Err(D::Error::custom(format!(
"error deserializing {}: {}",
type_name::<T>(),
e
))),
}
} else {
T::deserialize(deserializer)
}
}
}
/// Create [`SerializeAs`] and [`DeserializeAs`] instances for `IsHumanReadable` for the
/// given type assuming it implements [`ToString`] and [`FromStr`].
///
/// # Example
///
/// ```ignore
/// struct IsHumanReadable;
///
/// human_readable_str!(Address);
///
/// // Or in full form:
/// human_readable_str!(Address: IsHumanReadable);
///
/// #[serde_as]
/// #[derive(Serialize, Deserialize)]
/// struct MyStruct {
/// #[serde_as(as = "Option<IsHumanReadable>")]
/// pub delegated_address: Option<Address>,
/// }
/// ```
#[macro_export]
macro_rules! human_readable_str {
($typ:ty : $mark:ty) => {
impl serde_with::SerializeAs<$typ> for $mark {
fn serialize_as<S>(value: &$typ, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
$crate::serialize_str(value, serializer)
}
}
impl<'de> serde_with::DeserializeAs<'de, $typ> for $mark {
fn deserialize_as<D>(deserializer: D) -> Result<$typ, D::Error>
where
D: serde::de::Deserializer<'de>,
{
$crate::deserialize_str(deserializer)
}
}
};
($typ: ty) => {
human_readable_str!($typ: IsHumanReadable);
};
}
/// Delegate [`SerializeAs`] and [`DeserializeAs`] to another instance.
///
/// # Example
///
/// ```ignore
/// struct IsHumanReadable;
///
/// human_readable_delegate!(Address);
///
/// // Or in full form:
/// human_readable_delegate!(Address: IsHumanReadable => fendermint_vm_encoding::IsHumanReadable);
///
/// #[serde_as]
/// #[derive(Serialize, Deserialize)]
/// struct MyStruct {
/// #[serde_as(as = "Option<IsHumanReadable>")]
/// pub delegated_address: Option<Address>,
/// }
/// ```
#[macro_export]
macro_rules! human_readable_delegate {
($typ:ty : $mark:ty => $deleg:ty) => {
impl serde_with::SerializeAs<$typ> for $mark {
fn serialize_as<S>(value: &$typ, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
$deleg: serde_with::SerializeAs<$typ>,
{
<$deleg>::serialize_as::<S>(value, serializer)
}
}
impl<'de> serde_with::DeserializeAs<'de, $typ> for $mark {
fn deserialize_as<D>(deserializer: D) -> Result<$typ, D::Error>
where
D: serde::de::Deserializer<'de>,
$deleg: serde_with::DeserializeAs<'de, $typ>,
{
<$deleg>::deserialize_as::<D>(deserializer)
}
}
};
($typ: ty : $mark:ty) => {
human_readable_delegate!($typ : $mark => $crate::IsHumanReadable);
};
($typ: ty) => {
human_readable_delegate!($typ : IsHumanReadable => $crate::IsHumanReadable);
};
}
// Defining these here so we don't have to wrap them in structs where we want to use them.
human_readable_str!(Address);
human_readable_str!(Cid);
human_readable_str!(SubnetID);
impl SerializeAs<TokenAmount> for IsHumanReadable {
/// Serialize tokens as human readable decimal string.
fn serialize_as<S>(tokens: &TokenAmount, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
if serializer.is_human_readable() {
tokens.atto().to_str_radix(10).serialize(serializer)
} else {
tokens.serialize(serializer)
}
}
}
impl<'de> DeserializeAs<'de, TokenAmount> for IsHumanReadable {
/// Deserialize tokens from human readable decimal format.
fn deserialize_as<D>(deserializer: D) -> Result<TokenAmount, D::Error>
where
D: de::Deserializer<'de>,
{
if deserializer.is_human_readable() {
let s = String::deserialize(deserializer)?;
match BigInt::from_str_radix(&s, 10) {
Ok(a) => Ok(TokenAmount::from_atto(a)),
Err(e) => Err(D::Error::custom(format!(
"error deserializing tokens: {}",
e
))),
}
} else {
TokenAmount::deserialize(deserializer)
}
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/topdown/src/lib.rs | fendermint/vm/topdown/src/lib.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
mod cache;
mod error;
mod finality;
pub mod sync;
pub mod convert;
pub mod proxy;
mod toggle;
pub mod voting;
use async_stm::Stm;
use async_trait::async_trait;
use ethers::utils::hex;
use fvm_shared::clock::ChainEpoch;
use ipc_api::cross::IpcEnvelope;
use ipc_api::staking::StakingChangeRequest;
use serde::{Deserialize, Serialize};
use std::fmt::{Display, Formatter};
use std::time::Duration;
pub use crate::cache::{SequentialAppendError, SequentialKeyCache, ValueIter};
pub use crate::error::Error;
pub use crate::finality::CachedFinalityProvider;
pub use crate::toggle::Toggle;
pub type BlockHeight = u64;
pub type Bytes = Vec<u8>;
pub type BlockHash = Bytes;
/// The null round error message
pub(crate) const NULL_ROUND_ERR_MSG: &str = "requested epoch was a null round";
/// Default topdown proposal height range
pub(crate) const DEFAULT_MAX_PROPOSAL_RANGE: BlockHeight = 100;
pub(crate) const DEFAULT_MAX_CACHE_BLOCK: BlockHeight = 500;
pub(crate) const DEFAULT_PROPOSAL_DELAY: BlockHeight = 2;
#[derive(Debug, Clone, Deserialize)]
pub struct Config {
/// The number of blocks to delay before reporting a height as final on the parent chain.
/// To propose a certain number of epochs delayed from the latest height, we see to be
/// conservative and avoid other from rejecting the proposal because they don't see the
/// height as final yet.
pub chain_head_delay: BlockHeight,
/// Parent syncing cron period, in seconds
pub polling_interval: Duration,
/// Top down exponential back off retry base
pub exponential_back_off: Duration,
/// The max number of retries for exponential backoff before giving up
pub exponential_retry_limit: usize,
/// The max number of blocks one should make the topdown proposal
pub max_proposal_range: Option<BlockHeight>,
/// Max number of blocks that should be stored in cache
pub max_cache_blocks: Option<BlockHeight>,
pub proposal_delay: Option<BlockHeight>,
}
impl Config {
pub fn new(
chain_head_delay: BlockHeight,
polling_interval: Duration,
exponential_back_off: Duration,
exponential_retry_limit: usize,
) -> Self {
Self {
chain_head_delay,
polling_interval,
exponential_back_off,
exponential_retry_limit,
max_proposal_range: None,
max_cache_blocks: None,
proposal_delay: None,
}
}
pub fn with_max_proposal_range(mut self, max_proposal_range: BlockHeight) -> Self {
self.max_proposal_range = Some(max_proposal_range);
self
}
pub fn with_proposal_delay(mut self, proposal_delay: BlockHeight) -> Self {
self.proposal_delay = Some(proposal_delay);
self
}
pub fn with_max_cache_blocks(mut self, max_cache_blocks: BlockHeight) -> Self {
self.max_cache_blocks = Some(max_cache_blocks);
self
}
pub fn max_proposal_range(&self) -> BlockHeight {
self.max_proposal_range
.unwrap_or(DEFAULT_MAX_PROPOSAL_RANGE)
}
pub fn proposal_delay(&self) -> BlockHeight {
self.proposal_delay.unwrap_or(DEFAULT_PROPOSAL_DELAY)
}
pub fn max_cache_blocks(&self) -> BlockHeight {
self.max_cache_blocks.unwrap_or(DEFAULT_MAX_CACHE_BLOCK)
}
}
/// The finality view for IPC parent at certain height.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct IPCParentFinality {
/// The latest chain height
pub height: BlockHeight,
/// The block hash. For FVM, it is a Cid. For Evm, it is bytes32 as one can now potentially
/// deploy a subnet on EVM.
pub block_hash: BlockHash,
}
impl IPCParentFinality {
pub fn new(height: ChainEpoch, hash: BlockHash) -> Self {
Self {
height: height as BlockHeight,
block_hash: hash,
}
}
}
impl Display for IPCParentFinality {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"IPCParentFinality(height: {}, block_hash: {})",
self.height,
hex::encode(&self.block_hash)
)
}
}
#[async_trait]
pub trait ParentViewProvider {
/// Obtain the genesis epoch of the current subnet in the parent
fn genesis_epoch(&self) -> anyhow::Result<BlockHeight>;
/// Get the validator changes from and to height.
async fn validator_changes_from(
&self,
from: BlockHeight,
to: BlockHeight,
) -> anyhow::Result<Vec<StakingChangeRequest>>;
/// Get the top down messages from and to height.
async fn top_down_msgs_from(
&self,
from: BlockHeight,
to: BlockHeight,
) -> anyhow::Result<Vec<IpcEnvelope>>;
}
pub trait ParentFinalityProvider: ParentViewProvider {
/// Latest proposal for parent finality
fn next_proposal(&self) -> Stm<Option<IPCParentFinality>>;
/// Check if the target proposal is valid
fn check_proposal(&self, proposal: &IPCParentFinality) -> Stm<bool>;
/// Called when finality is committed
fn set_new_finality(
&self,
finality: IPCParentFinality,
previous_finality: Option<IPCParentFinality>,
) -> Stm<()>;
}
/// If res is null round error, returns the default value from f()
pub(crate) fn handle_null_round<T, F: FnOnce() -> T>(
res: anyhow::Result<T>,
f: F,
) -> anyhow::Result<T> {
match res {
Ok(t) => Ok(t),
Err(e) => {
if is_null_round_error(&e) {
Ok(f())
} else {
Err(e)
}
}
}
}
pub(crate) fn is_null_round_error(err: &anyhow::Error) -> bool {
is_null_round_str(&err.to_string())
}
pub(crate) fn is_null_round_str(s: &str) -> bool {
s.contains(NULL_ROUND_ERR_MSG)
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/topdown/src/convert.rs | fendermint/vm/topdown/src/convert.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! Handles the type conversion to ethers contract types
use crate::IPCParentFinality;
use anyhow::anyhow;
use ethers::types::U256;
use ipc_actors_abis::{gateway_getter_facet, top_down_finality_facet};
impl TryFrom<IPCParentFinality> for top_down_finality_facet::ParentFinality {
type Error = anyhow::Error;
fn try_from(value: IPCParentFinality) -> Result<Self, Self::Error> {
if value.block_hash.len() != 32 {
return Err(anyhow!("invalid block hash length, expecting 32"));
}
let mut block_hash = [0u8; 32];
block_hash.copy_from_slice(&value.block_hash[0..32]);
Ok(Self {
height: U256::from(value.height),
block_hash,
})
}
}
impl From<gateway_getter_facet::ParentFinality> for IPCParentFinality {
fn from(value: gateway_getter_facet::ParentFinality) -> Self {
IPCParentFinality {
height: value.height.as_u64(),
block_hash: value.block_hash.to_vec(),
}
}
}
impl From<top_down_finality_facet::ParentFinality> for IPCParentFinality {
fn from(value: top_down_finality_facet::ParentFinality) -> Self {
IPCParentFinality {
height: value.height.as_u64(),
block_hash: value.block_hash.to_vec(),
}
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/topdown/src/toggle.rs | fendermint/vm/topdown/src/toggle.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use crate::finality::ParentViewPayload;
use crate::{
BlockHash, BlockHeight, CachedFinalityProvider, Error, IPCParentFinality,
ParentFinalityProvider, ParentViewProvider,
};
use anyhow::anyhow;
use async_stm::{Stm, StmResult};
use ipc_api::cross::IpcEnvelope;
use ipc_api::staking::StakingChangeRequest;
/// The parent finality provider could have all functionalities disabled.
#[derive(Clone)]
pub struct Toggle<P> {
inner: Option<P>,
}
impl<P> Toggle<P> {
pub fn disabled() -> Self {
Self { inner: None }
}
pub fn enabled(inner: P) -> Self {
Self { inner: Some(inner) }
}
pub fn is_enabled(&self) -> bool {
self.inner.is_some()
}
fn perform_or_else<F, T, E>(&self, f: F, other: T) -> Result<T, E>
where
F: FnOnce(&P) -> Result<T, E>,
{
match &self.inner {
Some(p) => f(p),
None => Ok(other),
}
}
}
#[async_trait::async_trait]
impl<P: ParentViewProvider + Send + Sync + 'static> ParentViewProvider for Toggle<P> {
fn genesis_epoch(&self) -> anyhow::Result<BlockHeight> {
match self.inner.as_ref() {
Some(p) => p.genesis_epoch(),
None => Err(anyhow!("provider is toggled off")),
}
}
async fn validator_changes_from(
&self,
from: BlockHeight,
to: BlockHeight,
) -> anyhow::Result<Vec<StakingChangeRequest>> {
match self.inner.as_ref() {
Some(p) => p.validator_changes_from(from, to).await,
None => Err(anyhow!("provider is toggled off")),
}
}
async fn top_down_msgs_from(
&self,
from: BlockHeight,
to: BlockHeight,
) -> anyhow::Result<Vec<IpcEnvelope>> {
match self.inner.as_ref() {
Some(p) => p.top_down_msgs_from(from, to).await,
None => Err(anyhow!("provider is toggled off")),
}
}
}
impl<P: ParentFinalityProvider + Send + Sync + 'static> ParentFinalityProvider for Toggle<P> {
fn next_proposal(&self) -> Stm<Option<IPCParentFinality>> {
self.perform_or_else(|p| p.next_proposal(), None)
}
fn check_proposal(&self, proposal: &IPCParentFinality) -> Stm<bool> {
self.perform_or_else(|p| p.check_proposal(proposal), false)
}
fn set_new_finality(
&self,
finality: IPCParentFinality,
previous_finality: Option<IPCParentFinality>,
) -> Stm<()> {
self.perform_or_else(|p| p.set_new_finality(finality, previous_finality), ())
}
}
impl<P> Toggle<CachedFinalityProvider<P>> {
pub fn block_hash(&self, height: BlockHeight) -> Stm<Option<BlockHash>> {
self.perform_or_else(|p| p.block_hash(height), None)
}
pub fn latest_height_in_cache(&self) -> Stm<Option<BlockHeight>> {
self.perform_or_else(|p| p.latest_height_in_cache(), None)
}
pub fn latest_height(&self) -> Stm<Option<BlockHeight>> {
self.perform_or_else(|p| p.latest_height(), None)
}
pub fn last_committed_finality(&self) -> Stm<Option<IPCParentFinality>> {
self.perform_or_else(|p| p.last_committed_finality(), None)
}
pub fn new_parent_view(
&self,
height: BlockHeight,
maybe_payload: Option<ParentViewPayload>,
) -> StmResult<(), Error> {
self.perform_or_else(|p| p.new_parent_view(height, maybe_payload), ())
}
pub fn reset(&self, finality: IPCParentFinality) -> Stm<()> {
self.perform_or_else(|p| p.reset(finality), ())
}
pub fn cached_blocks(&self) -> Stm<BlockHeight> {
self.perform_or_else(|p| p.cached_blocks(), BlockHeight::MAX)
}
pub fn first_non_null_block(&self, height: BlockHeight) -> Stm<Option<BlockHeight>> {
self.perform_or_else(|p| p.first_non_null_block(height), None)
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/topdown/src/error.rs | fendermint/vm/topdown/src/error.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use crate::{BlockHeight, SequentialAppendError};
use thiserror::Error;
/// The errors for top down checkpointing
#[derive(Error, Debug, Eq, PartialEq, Clone)]
pub enum Error {
#[error("Incoming items are not order sequentially")]
NotSequential,
#[error("The parent view update with block height is not sequential: {0:?}")]
NonSequentialParentViewInsert(SequentialAppendError),
#[error("Parent chain reorg detected")]
ParentChainReorgDetected,
#[error("Cannot query parent at height {1}: {0}")]
CannotQueryParent(String, BlockHeight),
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/topdown/src/voting.rs | fendermint/vm/topdown/src/voting.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use async_stm::{abort, atomically_or_err, retry, Stm, StmResult, TVar};
use serde::{de::DeserializeOwned, Serialize};
use std::hash::Hash;
use std::{fmt::Debug, time::Duration};
use crate::{BlockHash, BlockHeight};
// Usign this type because it's `Hash`, unlike the normal `libsecp256k1::PublicKey`.
pub use ipc_ipld_resolver::ValidatorKey;
use ipc_ipld_resolver::VoteRecord;
pub type Weight = u64;
#[derive(Debug, thiserror::Error)]
pub enum Error<K = ValidatorKey, V: AsRef<[u8]> = BlockHash> {
#[error("the last finalized block has not been set")]
Uninitialized,
#[error("failed to extend chain; expected block height {0}, got {1}")]
UnexpectedBlock(BlockHeight, BlockHeight),
#[error("validator unknown or has no power: {0:?}")]
UnpoweredValidator(K),
#[error(
"equivocation by validator {0:?} at height {1}; {} != {}",
hex::encode(.2),
hex::encode(.3)
)]
Equivocation(K, BlockHeight, V, V),
}
/// Keep track of votes being gossiped about parent chain finality
/// and tally up the weights of the validators on the child subnet,
/// so that we can ask for proposals that are not going to be voted
/// down.
#[derive(Clone)]
pub struct VoteTally<K = ValidatorKey, V = BlockHash> {
/// Current validator weights. These are the ones who will vote on the blocks,
/// so these are the weights which need to form a quorum.
power_table: TVar<im::HashMap<K, Weight>>,
/// The *finalized mainchain* of the parent as observed by this node.
///
/// These are assumed to be final because IIRC that's how the syncer works,
/// only fetching the info about blocks which are already sufficiently deep.
///
/// When we want to propose, all we have to do is walk back this chain and
/// tally the votes we collected for the block hashes until we reach a quorum.
///
/// The block hash is optional to allow for null blocks on Filecoin rootnet.
chain: TVar<im::OrdMap<BlockHeight, Option<V>>>,
/// Index votes received by height and hash, which makes it easy to look up
/// all the votes for a given block hash and also to verify that a validator
/// isn't equivocating by trying to vote for two different things at the
/// same height.
votes: TVar<im::OrdMap<BlockHeight, im::HashMap<V, im::HashSet<K>>>>,
/// Adding votes can be paused if we observe that looking for a quorum takes too long
/// and is often retried due to votes being added.
pause_votes: TVar<bool>,
}
impl<K, V> VoteTally<K, V>
where
K: Clone + Hash + Eq + Sync + Send + 'static + Debug,
V: AsRef<[u8]> + Clone + Hash + Eq + Sync + Send + 'static,
{
/// Create an uninitialized instance. Before blocks can be added to it
/// we will have to set the last finalized block.
///
/// The reason this exists is so that we can delay initialization until
/// after the genesis block has been executed.
pub fn empty() -> Self {
Self {
power_table: TVar::default(),
chain: TVar::default(),
votes: TVar::default(),
pause_votes: TVar::new(false),
}
}
/// Initialize the vote tally from the current power table
/// and the last finalized block from the ledger.
pub fn new(power_table: Vec<(K, Weight)>, last_finalized_block: (BlockHeight, V)) -> Self {
let (height, hash) = last_finalized_block;
Self {
power_table: TVar::new(im::HashMap::from_iter(power_table)),
chain: TVar::new(im::OrdMap::from_iter([(height, Some(hash))])),
votes: TVar::default(),
pause_votes: TVar::new(false),
}
}
/// Check that a validator key is currently part of the power table.
pub fn has_power(&self, validator_key: &K) -> Stm<bool> {
let pt = self.power_table.read()?;
// For consistency consider validators without power unknown.
match pt.get(validator_key) {
None => Ok(false),
Some(weight) => Ok(*weight > 0),
}
}
/// Calculate the minimum weight needed for a proposal to pass with the current membership.
///
/// This is inclusive, that is, if the sum of weight is greater or equal to this, it should pass.
/// The equivalent formula can be found in CometBFT [here](https://github.com/cometbft/cometbft/blob/a8991d63e5aad8be82b90329b55413e3a4933dc0/types/vote_set.go#L307).
pub fn quorum_threshold(&self) -> Stm<Weight> {
let total_weight: Weight = self.power_table.read().map(|pt| pt.values().sum())?;
Ok(total_weight * 2 / 3 + 1)
}
/// Return the height of the first entry in the chain.
///
/// This is the block that was finalized *in the ledger*.
pub fn last_finalized_height(&self) -> Stm<BlockHeight> {
self.chain
.read()
.map(|c| c.get_min().map(|(h, _)| *h).unwrap_or_default())
}
/// Return the height of the last entry in the chain.
///
/// This is the block that we can cast our vote on as final.
pub fn latest_height(&self) -> Stm<BlockHeight> {
self.chain
.read()
.map(|c| c.get_max().map(|(h, _)| *h).unwrap_or_default())
}
/// Get the hash of a block at the given height, if known.
pub fn block_hash(&self, height: BlockHeight) -> Stm<Option<V>> {
self.chain.read().map(|c| c.get(&height).cloned().flatten())
}
/// Add the next final block observed on the parent blockchain.
///
/// Returns an error unless it's exactly the next expected height,
/// so the caller has to call this in every epoch. If the parent
/// chain produced no blocks in that epoch then pass `None` to
/// represent that null-round in the tally.
pub fn add_block(
&self,
block_height: BlockHeight,
block_hash: Option<V>,
) -> StmResult<(), Error<K>> {
let mut chain = self.chain.read_clone()?;
// Check that we are extending the chain. We could also ignore existing heights.
match chain.get_max() {
None => {
return abort(Error::Uninitialized);
}
Some((parent_height, _)) => {
if block_height != parent_height + 1 {
return abort(Error::UnexpectedBlock(parent_height + 1, block_height));
}
}
}
chain.insert(block_height, block_hash);
self.chain.write(chain)?;
Ok(())
}
/// Add a vote we received.
///
/// Returns `true` if this vote was added, `false` if it was ignored as a
/// duplicate or a height we already finalized, and an error if it's an
/// equivocation or from a validator we don't know.
pub fn add_vote(
&self,
validator_key: K,
block_height: BlockHeight,
block_hash: V,
) -> StmResult<bool, Error<K, V>> {
if *self.pause_votes.read()? {
retry()?;
}
let min_height = self.last_finalized_height()?;
if block_height < min_height {
return Ok(false);
}
if !self.has_power(&validator_key)? {
return abort(Error::UnpoweredValidator(validator_key));
}
let mut votes = self.votes.read_clone()?;
let votes_at_height = votes.entry(block_height).or_default();
for (bh, vs) in votes_at_height.iter() {
if *bh != block_hash && vs.contains(&validator_key) {
return abort(Error::Equivocation(
validator_key,
block_height,
block_hash,
bh.clone(),
));
}
}
let votes_for_block = votes_at_height.entry(block_hash).or_default();
if votes_for_block.insert(validator_key).is_some() {
return Ok(false);
}
self.votes.write(votes)?;
Ok(true)
}
/// Pause adding more votes until we are finished calling `find_quorum` which
/// automatically re-enables them.
pub fn pause_votes_until_find_quorum(&self) -> Stm<()> {
self.pause_votes.write(true)
}
/// Find a block on the (from our perspective) finalized chain that gathered enough votes from validators.
pub fn find_quorum(&self) -> Stm<Option<(BlockHeight, V)>> {
self.pause_votes.write(false)?;
let quorum_threshold = self.quorum_threshold()?;
let chain = self.chain.read()?;
let Some((finalized_height, _)) = chain.get_min() else {
tracing::debug!("finalized height not found");
return Ok(None);
};
let votes = self.votes.read()?;
let power_table = self.power_table.read()?;
let mut weight = 0;
let mut voters = im::HashSet::new();
for (block_height, block_hash) in chain.iter().rev() {
if block_height == finalized_height {
tracing::debug!(
block_height,
finalized_height,
"finalized height and block height equal, no new proposals"
);
break; // This block is already finalized in the ledger, no need to propose it again.
}
let Some(block_hash) = block_hash else {
tracing::debug!(block_height, "null block found in vote proposal");
continue; // Skip null blocks
};
let Some(votes_at_height) = votes.get(block_height) else {
tracing::debug!(block_height, "no votes");
continue;
};
let Some(votes_for_block) = votes_at_height.get(block_hash) else {
tracing::debug!(block_height, "no votes for block");
continue; // We could detect equovicating voters here.
};
for vk in votes_for_block {
if voters.insert(vk.clone()).is_none() {
// New voter, get their current weight; it might be 0 if they have been removed.
weight += power_table.get(vk).cloned().unwrap_or_default();
tracing::debug!(weight, key = ?vk, "new voter");
}
}
tracing::debug!(weight, quorum_threshold, "showdown");
if weight >= quorum_threshold {
return Ok(Some((*block_height, block_hash.clone())));
}
}
Ok(None)
}
/// Call when a new finalized block is added to the ledger, to clear out all preceding blocks.
///
/// After this operation the minimum item in the chain will the new finalized block.
pub fn set_finalized(&self, block_height: BlockHeight, block_hash: V) -> Stm<()> {
self.chain.update(|chain| {
let (_, mut chain) = chain.split(&block_height);
chain.insert(block_height, Some(block_hash));
chain
})?;
self.votes.update(|votes| votes.split(&block_height).1)?;
Ok(())
}
/// Overwrite the power table after it has changed to a new snapshot.
///
/// This method expects absolute values, it completely replaces the existing powers.
pub fn set_power_table(&self, power_table: Vec<(K, Weight)>) -> Stm<()> {
let power_table = im::HashMap::from_iter(power_table);
// We don't actually have to remove the votes of anyone who is no longer a validator,
// we just have to make sure to handle the case when they are not in the power table.
self.power_table.write(power_table)
}
/// Update the power table after it has changed with changes.
///
/// This method expects only the updated values, leaving everyone who isn't in it untouched
pub fn update_power_table(&self, power_updates: Vec<(K, Weight)>) -> Stm<()> {
if power_updates.is_empty() {
return Ok(());
}
// We don't actually have to remove the votes of anyone who is no longer a validator,
// we just have to make sure to handle the case when they are not in the power table.
self.power_table.update_mut(|pt| {
for (vk, w) in power_updates {
if w == 0 {
pt.remove(&vk);
} else {
*pt.entry(vk).or_default() = w;
}
}
})
}
}
/// Poll the vote tally for new finalized blocks and publish a vote about them if the validator is part of the power table.
pub async fn publish_vote_loop<V, F>(
vote_tally: VoteTally,
// Throttle votes to maximum 1/interval
vote_interval: Duration,
// Publish a vote after a timeout even if it's the same as before.
vote_timeout: Duration,
key: libp2p::identity::Keypair,
subnet_id: ipc_api::subnet_id::SubnetID,
client: ipc_ipld_resolver::Client<V>,
to_vote: F,
) where
F: Fn(BlockHeight, BlockHash) -> V,
V: Serialize + DeserializeOwned,
{
let validator_key = ValidatorKey::from(key.public());
let mut vote_interval = tokio::time::interval(vote_interval);
vote_interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip);
let mut prev = None;
loop {
let prev_height = prev
.as_ref()
.map(|(height, _, _)| *height)
.unwrap_or_default();
let result = tokio::time::timeout(
vote_timeout,
atomically_or_err(|| {
let next_height = vote_tally.latest_height()?;
if next_height == prev_height {
retry()?;
}
let next_hash = match vote_tally.block_hash(next_height)? {
Some(next_hash) => next_hash,
None => retry()?,
};
let has_power = vote_tally.has_power(&validator_key)?;
if has_power {
// Add our own vote to the tally directly rather than expecting a message from the gossip channel.
// TODO (ENG-622): I'm not sure gossip messages published by this node would be delivered to it, so this might be the only way.
// NOTE: We should not see any other error from this as we just checked that the validator had power,
// but for piece of mind let's return and log any potential errors, rather than ignore them.
vote_tally.add_vote(validator_key.clone(), next_height, next_hash.clone())?;
}
Ok((next_height, next_hash, has_power))
}),
)
.await;
let (next_height, next_hash, has_power) = match result {
Ok(Ok(vs)) => vs,
Err(_) => {
if let Some(ref vs) = prev {
tracing::debug!("vote timeout; re-publishing previous vote");
vs.clone()
} else {
tracing::debug!("vote timeout, but no previous vote to re-publish");
continue;
}
}
Ok(Err(e)) => {
tracing::error!(
error = e.to_string(),
"failed to get next height to vote on"
);
continue;
}
};
if has_power && prev_height > 0 {
tracing::debug!(block_height = next_height, "publishing finality vote");
let vote = to_vote(next_height, next_hash.clone());
match VoteRecord::signed(&key, subnet_id.clone(), vote) {
Ok(vote) => {
if let Err(e) = client.publish_vote(vote) {
tracing::error!(error = e.to_string(), "failed to publish vote");
}
}
Err(e) => {
tracing::error!(error = e.to_string(), "failed to sign vote");
}
}
// Throttle vote gossiping at periods of fast syncing. For example if we create a subnet contract on Friday
// and bring up a local testnet on Monday, all nodes would be ~7000 blocks behind a Lotus parent. CometBFT
// would be in-sync, and they could rapidly try to gossip votes on previous heights. GossipSub might not like
// that, and we can just cast our votes every now and then to finalize multiple blocks.
vote_interval.tick().await;
}
prev = Some((next_height, next_hash, has_power));
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/topdown/src/cache.rs | fendermint/vm/topdown/src/cache.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use num_traits::PrimInt;
use std::collections::{vec_deque, VecDeque};
use std::fmt::Debug;
/// The key value cache such that:
/// 1. Key must be numeric
/// 2. Keys must be sequential
#[derive(Clone)]
pub struct SequentialKeyCache<K, V> {
increment: K,
/// The underlying data
data: VecDeque<(K, V)>,
}
/// The result enum for sequential cache insertion
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum SequentialAppendError {
AboveBound,
/// The key has already been inserted
AlreadyInserted,
BelowBound,
}
impl<K: PrimInt + Debug, V> Default for SequentialKeyCache<K, V> {
fn default() -> Self {
Self::sequential()
}
}
impl<K: PrimInt + Debug, V> SequentialKeyCache<K, V> {
pub fn new(increment: K) -> Self {
Self {
increment,
data: Default::default(),
}
}
/// Create a cache with key increment 1
pub fn sequential() -> Self {
Self {
increment: K::one(),
data: Default::default(),
}
}
pub fn increment(&self) -> K {
self.increment
}
pub fn size(&self) -> usize {
self.data.len()
}
pub fn is_empty(&self) -> bool {
self.data.is_empty()
}
pub fn upper_bound(&self) -> Option<K> {
self.data.back().map(|v| v.0)
}
pub fn lower_bound(&self) -> Option<K> {
self.data.front().map(|v| v.0)
}
fn within_bound(&self, k: K) -> bool {
match (self.lower_bound(), self.upper_bound()) {
(Some(lower), Some(upper)) => lower <= k && k <= upper,
(None, None) => false,
// other states are not reachable, even if there is one entry, both upper and
// lower bounds should be the same, both should be Some.
_ => unreachable!(),
}
}
pub fn get_value(&self, key: K) -> Option<&V> {
if !self.within_bound(key) {
return None;
}
let lower = self.lower_bound().unwrap();
// safe to unwrap as index must be uint
let index = ((key - lower) / self.increment).to_usize().unwrap();
self.data.get(index).map(|entry| &entry.1)
}
pub fn values_from(&self, start: K) -> ValueIter<K, V> {
if self.is_empty() {
return ValueIter {
i: self.data.iter(),
};
}
let lower = self.lower_bound().unwrap();
// safe to unwrap as index must be uint
let index = ((start.max(lower) - lower) / self.increment)
.to_usize()
.unwrap();
ValueIter {
i: self.data.range(index..),
}
}
pub fn values_within(&self, start: K, end: K) -> ValueIter<K, V> {
if self.is_empty() {
return ValueIter {
i: self.data.iter(),
};
}
let lower = self.lower_bound().unwrap();
let upper = self.upper_bound().unwrap();
// safe to unwrap as index must be uint
let end_idx = ((end.min(upper) - lower) / self.increment)
.to_usize()
.unwrap();
let start_idx = ((start.max(lower) - lower) / self.increment)
.to_usize()
.unwrap();
ValueIter {
i: self.data.range(start_idx..=end_idx),
}
}
pub fn values(&self) -> ValueIter<K, V> {
ValueIter {
i: self.data.iter(),
}
}
/// Removes the all the keys below the target value, exclusive.
pub fn remove_key_below(&mut self, key: K) {
while let Some((k, _)) = self.data.front() {
if *k < key {
self.data.pop_front();
continue;
}
break;
}
}
/// Removes the all the keys above the target value, exclusive.
pub fn remove_key_above(&mut self, key: K) {
while let Some((k, _)) = self.data.back() {
if *k > key {
self.data.pop_back();
continue;
}
break;
}
}
/// Insert the key and value pair only if the key is upper_bound + 1
pub fn append(&mut self, key: K, val: V) -> Result<(), SequentialAppendError> {
let expected_next_key = if let Some(upper) = self.upper_bound() {
upper.add(self.increment)
} else {
// no upper bound means no data yet, push back directly
self.data.push_back((key, val));
return Ok(());
};
if expected_next_key == key {
self.data.push_back((key, val));
return Ok(());
}
if expected_next_key < key {
return Err(SequentialAppendError::AboveBound);
}
// safe to unwrap as we must have lower bound at this stage
let lower = self.lower_bound().unwrap();
if key < lower {
Err(SequentialAppendError::BelowBound)
} else {
Err(SequentialAppendError::AlreadyInserted)
}
}
}
pub struct ValueIter<'a, K, V> {
i: vec_deque::Iter<'a, (K, V)>,
}
impl<'a, K, V> Iterator for ValueIter<'a, K, V> {
type Item = &'a V;
fn next(&mut self) -> Option<Self::Item> {
self.i.next().map(|entry| &entry.1)
}
}
#[cfg(test)]
mod tests {
use crate::cache::SequentialKeyCache;
#[test]
fn insert_works() {
let mut cache = SequentialKeyCache::new(1);
for k in 9..100 {
cache.append(k, k).unwrap();
}
for i in 9..100 {
assert_eq!(cache.get_value(i), Some(&i));
}
assert_eq!(cache.get_value(100), None);
assert_eq!(cache.lower_bound(), Some(9));
assert_eq!(cache.upper_bound(), Some(99));
}
#[test]
fn range_works() {
let mut cache = SequentialKeyCache::new(1);
for k in 1..100 {
cache.append(k, k).unwrap();
}
let range = cache.values_from(50);
assert_eq!(
range.into_iter().cloned().collect::<Vec<_>>(),
(50..100).collect::<Vec<_>>()
);
let range = cache.values_from(0);
assert_eq!(
range.into_iter().cloned().collect::<Vec<_>>(),
(1..100).collect::<Vec<_>>()
);
let range = cache.values_within(50, 60);
assert_eq!(
range.into_iter().cloned().collect::<Vec<_>>(),
(50..=60).collect::<Vec<_>>()
);
let range = cache.values_within(0, 1000);
assert_eq!(
range.into_iter().cloned().collect::<Vec<_>>(),
(1..100).collect::<Vec<_>>()
);
let values = cache.values();
assert_eq!(
values.cloned().collect::<Vec<_>>(),
(1..100).collect::<Vec<_>>()
);
}
#[test]
fn remove_works() {
let mut cache = SequentialKeyCache::new(1);
for k in 0..100 {
cache.append(k, k).unwrap();
}
cache.remove_key_below(10);
cache.remove_key_above(50);
let values = cache.values();
assert_eq!(
values.into_iter().cloned().collect::<Vec<_>>(),
(10..51).collect::<Vec<_>>()
);
}
#[test]
fn diff_increment_works() {
let incre = 101;
let mut cache = SequentialKeyCache::new(101);
for k in 0..100 {
cache.append(k * incre, k).unwrap();
}
let values = cache.values_from(incre + 1);
assert_eq!(
values.into_iter().cloned().collect::<Vec<_>>(),
(1..100).collect::<Vec<_>>()
);
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/topdown/src/proxy.rs | fendermint/vm/topdown/src/proxy.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use crate::BlockHeight;
use anyhow::anyhow;
use async_trait::async_trait;
use fvm_shared::clock::ChainEpoch;
use ipc_api::cross::IpcEnvelope;
use ipc_api::staking::StakingChangeRequest;
use ipc_api::subnet_id::SubnetID;
use ipc_provider::manager::{GetBlockHashResult, TopDownQueryPayload};
use ipc_provider::IpcProvider;
use tracing::instrument;
/// The interface to querying state of the parent
#[async_trait]
pub trait ParentQueryProxy {
/// Get the parent chain head block number or block height
async fn get_chain_head_height(&self) -> anyhow::Result<BlockHeight>;
/// Get the genesis epoch of the child subnet, i.e. the epoch that the subnet was created in
/// the parent subnet.
async fn get_genesis_epoch(&self) -> anyhow::Result<BlockHeight>;
/// Getting the block hash at the target height.
async fn get_block_hash(&self, height: BlockHeight) -> anyhow::Result<GetBlockHashResult>;
/// Get the top down messages at epoch with the block hash at that height
async fn get_top_down_msgs(
&self,
height: BlockHeight,
) -> anyhow::Result<TopDownQueryPayload<Vec<IpcEnvelope>>>;
/// Get the validator set at the specified height
async fn get_validator_changes(
&self,
height: BlockHeight,
) -> anyhow::Result<TopDownQueryPayload<Vec<StakingChangeRequest>>>;
}
/// The proxy to the subnet's parent
pub struct IPCProviderProxy {
ipc_provider: IpcProvider,
/// The parent subnet for the child subnet we are target. We can derive from child subnet,
/// but storing it separately so that we dont have to derive every time.
parent_subnet: SubnetID,
/// The child subnet that this node belongs to.
child_subnet: SubnetID,
}
impl IPCProviderProxy {
pub fn new(ipc_provider: IpcProvider, target_subnet: SubnetID) -> anyhow::Result<Self> {
let parent = target_subnet
.parent()
.ok_or_else(|| anyhow!("subnet does not have parent"))?;
Ok(Self {
ipc_provider,
parent_subnet: parent,
child_subnet: target_subnet,
})
}
}
#[async_trait]
impl ParentQueryProxy for IPCProviderProxy {
async fn get_chain_head_height(&self) -> anyhow::Result<BlockHeight> {
let height = self.ipc_provider.chain_head(&self.parent_subnet).await?;
Ok(height as BlockHeight)
}
/// Get the genesis epoch of the child subnet, i.e. the epoch that the subnet was created in
/// the parent subnet.
async fn get_genesis_epoch(&self) -> anyhow::Result<BlockHeight> {
let height = self.ipc_provider.genesis_epoch(&self.child_subnet).await?;
Ok(height as BlockHeight)
}
/// Getting the block hash at the target height.
#[instrument(skip(self))]
async fn get_block_hash(&self, height: BlockHeight) -> anyhow::Result<GetBlockHashResult> {
self.ipc_provider
.get_block_hash(&self.parent_subnet, height as ChainEpoch)
.await
}
/// Get the top down messages from the starting to the ending height.
#[instrument(skip(self))]
async fn get_top_down_msgs(
&self,
height: BlockHeight,
) -> anyhow::Result<TopDownQueryPayload<Vec<IpcEnvelope>>> {
self.ipc_provider
.get_top_down_msgs(&self.child_subnet, height as ChainEpoch)
.await
.map(|mut v| {
// sort ascending, we dont assume the changes are ordered
v.value.sort_by(|a, b| a.nonce.cmp(&b.nonce));
v
})
}
/// Get the validator set at the specified height.
#[instrument(skip(self))]
async fn get_validator_changes(
&self,
height: BlockHeight,
) -> anyhow::Result<TopDownQueryPayload<Vec<StakingChangeRequest>>> {
self.ipc_provider
.get_validator_changeset(&self.child_subnet, height as ChainEpoch)
.await
.map(|mut v| {
// sort ascending, we dont assume the changes are ordered
v.value
.sort_by(|a, b| a.configuration_number.cmp(&b.configuration_number));
v
})
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/topdown/src/finality/fetch.rs | fendermint/vm/topdown/src/finality/fetch.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use crate::finality::null::FinalityWithNull;
use crate::finality::ParentViewPayload;
use crate::proxy::ParentQueryProxy;
use crate::{
handle_null_round, BlockHash, BlockHeight, Config, Error, IPCParentFinality,
ParentFinalityProvider, ParentViewProvider,
};
use async_stm::{Stm, StmResult};
use ipc_api::cross::IpcEnvelope;
use ipc_api::staking::StakingChangeRequest;
use std::sync::Arc;
/// The finality provider that performs io to the parent if not found in cache
#[derive(Clone)]
pub struct CachedFinalityProvider<T> {
inner: FinalityWithNull,
config: Config,
/// The ipc client proxy that works as a back up if cache miss
parent_client: Arc<T>,
}
/// Exponential backoff for futures
macro_rules! retry {
($wait:expr, $retires:expr, $f:expr) => {{
let mut retries = $retires;
let mut wait = $wait;
loop {
let res = $f;
if let Err(e) = &res {
// there is no point in retrying if the current block is null round
if crate::is_null_round_str(&e.to_string()) {
tracing::warn!(
"cannot query ipc parent_client due to null round, skip retry"
);
break res;
}
tracing::warn!(
error = e.to_string(),
retries,
wait = ?wait,
"cannot query ipc parent_client"
);
if retries > 0 {
retries -= 1;
tokio::time::sleep(wait).await;
wait *= 2;
continue;
}
}
break res;
}
}};
}
#[async_trait::async_trait]
impl<T: ParentQueryProxy + Send + Sync + 'static> ParentViewProvider for CachedFinalityProvider<T> {
fn genesis_epoch(&self) -> anyhow::Result<BlockHeight> {
self.inner.genesis_epoch()
}
async fn validator_changes_from(
&self,
from: BlockHeight,
to: BlockHeight,
) -> anyhow::Result<Vec<StakingChangeRequest>> {
let mut v = vec![];
for h in from..=to {
let mut r = self.validator_changes(h).await?;
tracing::debug!(
number_of_messages = r.len(),
height = h,
"fetched validator change set",
);
v.append(&mut r);
}
Ok(v)
}
/// Get top down message in the range `from` to `to`, both inclusive. For the check to be valid, one
/// should not pass a height `to` that is a null block, otherwise the check is useless. In debug
/// mode, it will throw an error.
async fn top_down_msgs_from(
&self,
from: BlockHeight,
to: BlockHeight,
) -> anyhow::Result<Vec<IpcEnvelope>> {
let mut v = vec![];
for h in from..=to {
let mut r = self.top_down_msgs(h).await?;
tracing::debug!(
number_of_top_down_messages = r.len(),
height = h,
"obtained topdown messages",
);
v.append(&mut r);
}
Ok(v)
}
}
impl<T: ParentQueryProxy + Send + Sync + 'static> ParentFinalityProvider
for CachedFinalityProvider<T>
{
fn next_proposal(&self) -> Stm<Option<IPCParentFinality>> {
self.inner.next_proposal()
}
fn check_proposal(&self, proposal: &IPCParentFinality) -> Stm<bool> {
self.inner.check_proposal(proposal)
}
fn set_new_finality(
&self,
finality: IPCParentFinality,
previous_finality: Option<IPCParentFinality>,
) -> Stm<()> {
self.inner.set_new_finality(finality, previous_finality)
}
}
impl<T: ParentQueryProxy + Send + Sync + 'static> CachedFinalityProvider<T> {
/// Creates an uninitialized provider
/// We need this because `fendermint` has yet to be initialized and might
/// not be able to provide an existing finality from the storage. This provider requires an
/// existing committed finality. Providing the finality will enable other functionalities.
pub async fn uninitialized(config: Config, parent_client: Arc<T>) -> anyhow::Result<Self> {
let genesis = parent_client.get_genesis_epoch().await?;
Ok(Self::new(config, genesis, None, parent_client))
}
/// Should always return the top down messages, only when ipc parent_client is down after exponential
/// retries
async fn validator_changes(
&self,
height: BlockHeight,
) -> anyhow::Result<Vec<StakingChangeRequest>> {
let r = self.inner.validator_changes(height).await?;
if let Some(v) = r {
return Ok(v);
}
let r = retry!(
self.config.exponential_back_off,
self.config.exponential_retry_limit,
self.parent_client
.get_validator_changes(height)
.await
.map(|r| r.value)
);
handle_null_round(r, Vec::new)
}
/// Should always return the top down messages, only when ipc parent_client is down after exponential
/// retries
async fn top_down_msgs(&self, height: BlockHeight) -> anyhow::Result<Vec<IpcEnvelope>> {
let r = self.inner.top_down_msgs(height).await?;
if let Some(v) = r {
return Ok(v);
}
let r = retry!(
self.config.exponential_back_off,
self.config.exponential_retry_limit,
self.parent_client
.get_top_down_msgs(height)
.await
.map(|r| r.value)
);
handle_null_round(r, Vec::new)
}
}
impl<T> CachedFinalityProvider<T> {
pub(crate) fn new(
config: Config,
genesis_epoch: BlockHeight,
committed_finality: Option<IPCParentFinality>,
parent_client: Arc<T>,
) -> Self {
let inner = FinalityWithNull::new(config.clone(), genesis_epoch, committed_finality);
Self {
inner,
config,
parent_client,
}
}
pub fn block_hash(&self, height: BlockHeight) -> Stm<Option<BlockHash>> {
self.inner.block_hash_at_height(height)
}
pub fn latest_height_in_cache(&self) -> Stm<Option<BlockHeight>> {
self.inner.latest_height_in_cache()
}
/// Get the latest height tracked in the provider, includes both cache and last committed finality
pub fn latest_height(&self) -> Stm<Option<BlockHeight>> {
self.inner.latest_height()
}
pub fn last_committed_finality(&self) -> Stm<Option<IPCParentFinality>> {
self.inner.last_committed_finality()
}
/// Clear the cache and set the committed finality to the provided value
pub fn reset(&self, finality: IPCParentFinality) -> Stm<()> {
self.inner.reset(finality)
}
pub fn new_parent_view(
&self,
height: BlockHeight,
maybe_payload: Option<ParentViewPayload>,
) -> StmResult<(), Error> {
self.inner.new_parent_view(height, maybe_payload)
}
/// Returns the number of blocks cached.
pub fn cached_blocks(&self) -> Stm<BlockHeight> {
self.inner.cached_blocks()
}
pub fn first_non_null_block(&self, height: BlockHeight) -> Stm<Option<BlockHeight>> {
self.inner.first_non_null_block(height)
}
}
#[cfg(test)]
mod tests {
use crate::finality::ParentViewPayload;
use crate::proxy::ParentQueryProxy;
use crate::{
BlockHeight, CachedFinalityProvider, Config, IPCParentFinality, ParentViewProvider,
SequentialKeyCache, NULL_ROUND_ERR_MSG,
};
use anyhow::anyhow;
use async_trait::async_trait;
use fvm_shared::address::Address;
use fvm_shared::econ::TokenAmount;
use ipc_api::cross::IpcEnvelope;
use ipc_api::staking::{StakingChange, StakingChangeRequest, StakingOperation};
use ipc_api::subnet_id::SubnetID;
use ipc_provider::manager::{GetBlockHashResult, TopDownQueryPayload};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::time::Duration;
/// Creates a mock of a new parent blockchain view. The key is the height and the value is the
/// block hash. If block hash is None, it means the current height is a null block.
macro_rules! new_parent_blocks {
($($key:expr => $val:expr),* ,) => (
hash_map!($($key => $val),*)
);
($($key:expr => $val:expr),*) => ({
let mut map = SequentialKeyCache::sequential();
$( map.append($key, $val).unwrap(); )*
map
});
}
struct TestParentProxy {
blocks: SequentialKeyCache<BlockHeight, Option<ParentViewPayload>>,
}
#[async_trait]
impl ParentQueryProxy for TestParentProxy {
async fn get_chain_head_height(&self) -> anyhow::Result<BlockHeight> {
Ok(self.blocks.upper_bound().unwrap())
}
async fn get_genesis_epoch(&self) -> anyhow::Result<BlockHeight> {
Ok(self.blocks.lower_bound().unwrap() - 1)
}
async fn get_block_hash(&self, height: BlockHeight) -> anyhow::Result<GetBlockHashResult> {
let r = self.blocks.get_value(height).unwrap();
if r.is_none() {
return Err(anyhow!(NULL_ROUND_ERR_MSG));
}
for h in (self.blocks.lower_bound().unwrap()..height).rev() {
let v = self.blocks.get_value(h).unwrap();
if v.is_none() {
continue;
}
return Ok(GetBlockHashResult {
parent_block_hash: v.clone().unwrap().0,
block_hash: r.clone().unwrap().0,
});
}
panic!("invalid testing data")
}
async fn get_top_down_msgs(
&self,
height: BlockHeight,
) -> anyhow::Result<TopDownQueryPayload<Vec<IpcEnvelope>>> {
let r = self.blocks.get_value(height).cloned().unwrap();
if r.is_none() {
return Err(anyhow!(NULL_ROUND_ERR_MSG));
}
let r = r.unwrap();
Ok(TopDownQueryPayload {
value: r.2,
block_hash: r.0,
})
}
async fn get_validator_changes(
&self,
height: BlockHeight,
) -> anyhow::Result<TopDownQueryPayload<Vec<StakingChangeRequest>>> {
let r = self.blocks.get_value(height).cloned().unwrap();
if r.is_none() {
return Err(anyhow!(NULL_ROUND_ERR_MSG));
}
let r = r.unwrap();
Ok(TopDownQueryPayload {
value: r.1,
block_hash: r.0,
})
}
}
fn new_provider(
blocks: SequentialKeyCache<BlockHeight, Option<ParentViewPayload>>,
) -> CachedFinalityProvider<TestParentProxy> {
let config = Config {
chain_head_delay: 2,
polling_interval: Default::default(),
exponential_back_off: Default::default(),
exponential_retry_limit: 0,
max_proposal_range: Some(1),
max_cache_blocks: None,
proposal_delay: None,
};
let genesis_epoch = blocks.lower_bound().unwrap();
let proxy = Arc::new(TestParentProxy { blocks });
let committed_finality = IPCParentFinality {
height: genesis_epoch,
block_hash: vec![0; 32],
};
CachedFinalityProvider::new(config, genesis_epoch, Some(committed_finality), proxy)
}
fn new_cross_msg(nonce: u64) -> IpcEnvelope {
let subnet_id = SubnetID::new(10, vec![Address::new_id(1000)]);
let mut msg = IpcEnvelope::new_fund_msg(
&subnet_id,
&Address::new_id(1),
&Address::new_id(2),
TokenAmount::from_atto(100),
)
.unwrap();
msg.nonce = nonce;
msg
}
fn new_validator_changes(configuration_number: u64) -> StakingChangeRequest {
StakingChangeRequest {
configuration_number,
change: StakingChange {
op: StakingOperation::Deposit,
payload: vec![],
validator: Address::new_id(1),
},
}
}
#[tokio::test]
async fn test_retry() {
struct Test {
nums_run: AtomicUsize,
}
impl Test {
async fn run(&self) -> Result<(), &'static str> {
self.nums_run.fetch_add(1, Ordering::SeqCst);
Err("mocked error")
}
}
let t = Test {
nums_run: AtomicUsize::new(0),
};
let res = retry!(Duration::from_secs(1), 2, t.run().await);
assert!(res.is_err());
// execute the first time, retries twice
assert_eq!(t.nums_run.load(Ordering::SeqCst), 3);
}
#[tokio::test]
async fn test_query_topdown_msgs() {
let parent_blocks = new_parent_blocks!(
100 => Some((vec![0; 32], vec![], vec![new_cross_msg(0)])), // genesis block
101 => Some((vec![1; 32], vec![], vec![new_cross_msg(1)])),
102 => Some((vec![2; 32], vec![], vec![new_cross_msg(2)])),
103 => Some((vec![3; 32], vec![], vec![new_cross_msg(3)])),
104 => None,
105 => None,
106 => Some((vec![6; 32], vec![], vec![new_cross_msg(6)]))
);
let provider = new_provider(parent_blocks);
let messages = provider.top_down_msgs_from(100, 106).await.unwrap();
assert_eq!(
messages,
vec![
new_cross_msg(0),
new_cross_msg(1),
new_cross_msg(2),
new_cross_msg(3),
new_cross_msg(6),
]
)
}
#[tokio::test]
async fn test_query_validator_changes() {
let parent_blocks = new_parent_blocks!(
100 => Some((vec![0; 32], vec![new_validator_changes(0)], vec![])), // genesis block
101 => Some((vec![1; 32], vec![new_validator_changes(1)], vec![])),
102 => Some((vec![2; 32], vec![], vec![])),
103 => Some((vec![3; 32], vec![new_validator_changes(3)], vec![])),
104 => None,
105 => None,
106 => Some((vec![6; 32], vec![new_validator_changes(6)], vec![]))
);
let provider = new_provider(parent_blocks);
let messages = provider.validator_changes_from(100, 106).await.unwrap();
assert_eq!(messages.len(), 4)
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.