repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/topdown/src/finality/mod.rs | fendermint/vm/topdown/src/finality/mod.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
mod fetch;
mod null;
use crate::error::Error;
use crate::BlockHash;
use async_stm::{abort, StmResult};
use ipc_api::cross::IpcEnvelope;
use ipc_api::staking::StakingChangeRequest;
pub use fetch::CachedFinalityProvider;
pub(crate) type ParentViewPayload = (BlockHash, Vec<StakingChangeRequest>, Vec<IpcEnvelope>);
fn ensure_sequential<T, F: Fn(&T) -> u64>(msgs: &[T], f: F) -> StmResult<(), Error> {
if msgs.is_empty() {
return Ok(());
}
let first = msgs.first().unwrap();
let mut nonce = f(first);
for msg in msgs.iter().skip(1) {
if nonce + 1 != f(msg) {
return abort(Error::NotSequential);
}
nonce += 1;
}
Ok(())
}
pub(crate) fn validator_changes(p: &ParentViewPayload) -> Vec<StakingChangeRequest> {
p.1.clone()
}
pub(crate) fn topdown_cross_msgs(p: &ParentViewPayload) -> Vec<IpcEnvelope> {
p.2.clone()
}
#[cfg(test)]
mod tests {
use crate::proxy::ParentQueryProxy;
use crate::{
BlockHeight, CachedFinalityProvider, Config, IPCParentFinality, ParentFinalityProvider,
};
use async_stm::atomically_or_err;
use async_trait::async_trait;
use ipc_api::cross::IpcEnvelope;
use ipc_api::staking::StakingChangeRequest;
use ipc_provider::manager::{GetBlockHashResult, TopDownQueryPayload};
use std::sync::Arc;
use tokio::time::Duration;
struct MockedParentQuery;
#[async_trait]
impl ParentQueryProxy for MockedParentQuery {
async fn get_chain_head_height(&self) -> anyhow::Result<BlockHeight> {
Ok(1)
}
async fn get_genesis_epoch(&self) -> anyhow::Result<BlockHeight> {
Ok(10)
}
async fn get_block_hash(&self, _height: BlockHeight) -> anyhow::Result<GetBlockHashResult> {
Ok(GetBlockHashResult::default())
}
async fn get_top_down_msgs(
&self,
_height: BlockHeight,
) -> anyhow::Result<TopDownQueryPayload<Vec<IpcEnvelope>>> {
Ok(TopDownQueryPayload {
value: vec![],
block_hash: vec![],
})
}
async fn get_validator_changes(
&self,
_height: BlockHeight,
) -> anyhow::Result<TopDownQueryPayload<Vec<StakingChangeRequest>>> {
Ok(TopDownQueryPayload {
value: vec![],
block_hash: vec![],
})
}
}
fn mocked_agent_proxy() -> Arc<MockedParentQuery> {
Arc::new(MockedParentQuery)
}
fn genesis_finality() -> IPCParentFinality {
IPCParentFinality {
height: 0,
block_hash: vec![0; 32],
}
}
fn new_provider() -> CachedFinalityProvider<MockedParentQuery> {
let config = Config {
chain_head_delay: 20,
polling_interval: Duration::from_secs(10),
exponential_back_off: Duration::from_secs(10),
exponential_retry_limit: 10,
max_proposal_range: None,
max_cache_blocks: None,
proposal_delay: None,
};
CachedFinalityProvider::new(config, 10, Some(genesis_finality()), mocked_agent_proxy())
}
#[tokio::test]
async fn test_finality_works() {
let provider = new_provider();
atomically_or_err(|| {
// inject data
for i in 10..=100 {
provider.new_parent_view(i, Some((vec![1u8; 32], vec![], vec![])))?;
}
let target_block = 120;
let finality = IPCParentFinality {
height: target_block,
block_hash: vec![1u8; 32],
};
provider.set_new_finality(finality.clone(), Some(genesis_finality()))?;
// all cache should be cleared
let r = provider.next_proposal()?;
assert!(r.is_none());
let f = provider.last_committed_finality()?;
assert_eq!(f, Some(finality));
Ok(())
})
.await
.unwrap();
}
#[tokio::test]
async fn test_check_proposal_works() {
let provider = new_provider();
atomically_or_err(|| {
let target_block = 100;
// inject data
provider.new_parent_view(target_block, Some((vec![1u8; 32], vec![], vec![])))?;
provider.set_new_finality(
IPCParentFinality {
height: target_block - 1,
block_hash: vec![1u8; 32],
},
Some(genesis_finality()),
)?;
let finality = IPCParentFinality {
height: target_block,
block_hash: vec![1u8; 32],
};
assert!(provider.check_proposal(&finality).is_ok());
Ok(())
})
.await
.unwrap();
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/topdown/src/finality/null.rs | fendermint/vm/topdown/src/finality/null.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use crate::finality::{
ensure_sequential, topdown_cross_msgs, validator_changes, ParentViewPayload,
};
use crate::{BlockHash, BlockHeight, Config, Error, IPCParentFinality, SequentialKeyCache};
use async_stm::{abort, atomically, Stm, StmResult, TVar};
use ipc_api::cross::IpcEnvelope;
use ipc_api::staking::StakingChangeRequest;
use std::cmp::min;
use fendermint_tracing::emit;
use fendermint_vm_event::ParentFinalityCommitted;
/// Finality provider that can handle null blocks
#[derive(Clone)]
pub struct FinalityWithNull {
config: Config,
genesis_epoch: BlockHeight,
/// Cached data that always syncs with the latest parent chain proactively
cached_data: TVar<SequentialKeyCache<BlockHeight, Option<ParentViewPayload>>>,
/// This is a in memory view of the committed parent finality. We need this as a starting point
/// for populating the cache
last_committed_finality: TVar<Option<IPCParentFinality>>,
}
impl FinalityWithNull {
pub fn new(
config: Config,
genesis_epoch: BlockHeight,
committed_finality: Option<IPCParentFinality>,
) -> Self {
Self {
config,
genesis_epoch,
cached_data: TVar::new(SequentialKeyCache::sequential()),
last_committed_finality: TVar::new(committed_finality),
}
}
pub fn genesis_epoch(&self) -> anyhow::Result<BlockHeight> {
Ok(self.genesis_epoch)
}
pub async fn validator_changes(
&self,
height: BlockHeight,
) -> anyhow::Result<Option<Vec<StakingChangeRequest>>> {
let r = atomically(|| self.handle_null_block(height, validator_changes, Vec::new)).await;
Ok(r)
}
pub async fn top_down_msgs(
&self,
height: BlockHeight,
) -> anyhow::Result<Option<Vec<IpcEnvelope>>> {
let r = atomically(|| self.handle_null_block(height, topdown_cross_msgs, Vec::new)).await;
Ok(r)
}
pub fn last_committed_finality(&self) -> Stm<Option<IPCParentFinality>> {
self.last_committed_finality.read_clone()
}
/// Clear the cache and set the committed finality to the provided value
pub fn reset(&self, finality: IPCParentFinality) -> Stm<()> {
self.cached_data.write(SequentialKeyCache::sequential())?;
self.last_committed_finality.write(Some(finality))
}
pub fn new_parent_view(
&self,
height: BlockHeight,
maybe_payload: Option<ParentViewPayload>,
) -> StmResult<(), Error> {
if let Some((block_hash, validator_changes, top_down_msgs)) = maybe_payload {
self.parent_block_filled(height, block_hash, validator_changes, top_down_msgs)
} else {
self.parent_null_round(height)
}
}
pub fn next_proposal(&self) -> Stm<Option<IPCParentFinality>> {
let height = if let Some(h) = self.propose_next_height()? {
h
} else {
return Ok(None);
};
// safe to unwrap as we make sure null height will not be proposed
let block_hash = self.block_hash_at_height(height)?.unwrap();
let proposal = IPCParentFinality { height, block_hash };
tracing::debug!(proposal = proposal.to_string(), "new proposal");
Ok(Some(proposal))
}
pub fn check_proposal(&self, proposal: &IPCParentFinality) -> Stm<bool> {
if !self.check_height(proposal)? {
return Ok(false);
}
self.check_block_hash(proposal)
}
pub fn set_new_finality(
&self,
finality: IPCParentFinality,
previous_finality: Option<IPCParentFinality>,
) -> Stm<()> {
debug_assert!(previous_finality == self.last_committed_finality.read_clone()?);
// the height to clear
let height = finality.height;
self.cached_data.update(|mut cache| {
// only remove cache below height, but not at height, as we have delayed execution
cache.remove_key_below(height);
cache
})?;
let hash = hex::encode(&finality.block_hash);
self.last_committed_finality.write(Some(finality))?;
// emit event only after successful write
emit!(ParentFinalityCommitted {
block_height: height,
block_hash: &hash
});
Ok(())
}
}
impl FinalityWithNull {
/// Returns the number of blocks cached.
pub(crate) fn cached_blocks(&self) -> Stm<BlockHeight> {
let cache = self.cached_data.read()?;
Ok(cache.size() as BlockHeight)
}
pub(crate) fn block_hash_at_height(&self, height: BlockHeight) -> Stm<Option<BlockHash>> {
if let Some(f) = self.last_committed_finality.read()?.as_ref() {
if f.height == height {
return Ok(Some(f.block_hash.clone()));
}
}
self.get_at_height(height, |i| i.0.clone())
}
pub(crate) fn latest_height_in_cache(&self) -> Stm<Option<BlockHeight>> {
let cache = self.cached_data.read()?;
Ok(cache.upper_bound())
}
/// Get the latest height tracked in the provider, includes both cache and last committed finality
pub(crate) fn latest_height(&self) -> Stm<Option<BlockHeight>> {
let h = if let Some(h) = self.latest_height_in_cache()? {
h
} else if let Some(p) = self.last_committed_finality()? {
p.height
} else {
return Ok(None);
};
Ok(Some(h))
}
/// Get the first non-null block in the range of earliest cache block till the height specified, inclusive.
pub(crate) fn first_non_null_block(&self, height: BlockHeight) -> Stm<Option<BlockHeight>> {
let cache = self.cached_data.read()?;
Ok(cache.lower_bound().and_then(|lower_bound| {
for h in (lower_bound..=height).rev() {
if let Some(Some(_)) = cache.get_value(h) {
return Some(h);
}
}
None
}))
}
}
/// All the private functions
impl FinalityWithNull {
fn propose_next_height(&self) -> Stm<Option<BlockHeight>> {
let latest_height = if let Some(h) = self.latest_height_in_cache()? {
h
} else {
tracing::debug!("no proposal yet as height not available");
return Ok(None);
};
let last_committed_height = if let Some(h) = self.last_committed_finality.read_clone()? {
h.height
} else {
unreachable!("last committed finality will be available at this point");
};
let max_proposal_height = last_committed_height + self.config.max_proposal_range();
let candidate_height = min(max_proposal_height, latest_height);
tracing::debug!(max_proposal_height, candidate_height, "propose heights");
let first_non_null_height = if let Some(h) = self.first_non_null_block(candidate_height)? {
h
} else {
tracing::debug!(height = candidate_height, "no non-null block found before");
return Ok(None);
};
tracing::debug!(first_non_null_height, candidate_height);
// an extra layer of delay
let maybe_proposal_height =
self.first_non_null_block(first_non_null_height - self.config.proposal_delay())?;
tracing::debug!(
delayed_height = maybe_proposal_height,
delay = self.config.proposal_delay()
);
if let Some(proposal_height) = maybe_proposal_height {
// this is possible due to delayed execution as the proposed height's data cannot be
// executed because they have yet to be executed.
return if last_committed_height == proposal_height {
tracing::debug!(
last_committed_height,
proposal_height,
"no new blocks from cache, not proposing"
);
Ok(None)
} else {
tracing::debug!(proposal_height, "new proposal height");
Ok(Some(proposal_height))
};
}
tracing::debug!(last_committed_height, "no non-null block after delay");
Ok(None)
}
fn handle_null_block<T, F: Fn(&ParentViewPayload) -> T, D: Fn() -> T>(
&self,
height: BlockHeight,
f: F,
d: D,
) -> Stm<Option<T>> {
let cache = self.cached_data.read()?;
Ok(cache.get_value(height).map(|v| {
if let Some(i) = v.as_ref() {
f(i)
} else {
tracing::debug!(height, "a null round detected, return default");
d()
}
}))
}
fn get_at_height<T, F: Fn(&ParentViewPayload) -> T>(
&self,
height: BlockHeight,
f: F,
) -> Stm<Option<T>> {
let cache = self.cached_data.read()?;
Ok(if let Some(Some(v)) = cache.get_value(height) {
Some(f(v))
} else {
None
})
}
fn parent_block_filled(
&self,
height: BlockHeight,
block_hash: BlockHash,
validator_changes: Vec<StakingChangeRequest>,
top_down_msgs: Vec<IpcEnvelope>,
) -> StmResult<(), Error> {
if !top_down_msgs.is_empty() {
// make sure incoming top down messages are ordered by nonce sequentially
tracing::debug!(?top_down_msgs);
ensure_sequential(&top_down_msgs, |msg| msg.nonce)?;
};
if !validator_changes.is_empty() {
tracing::debug!(?validator_changes, "validator changes");
ensure_sequential(&validator_changes, |change| change.configuration_number)?;
}
let r = self.cached_data.modify(|mut cache| {
let r = cache
.append(height, Some((block_hash, validator_changes, top_down_msgs)))
.map_err(Error::NonSequentialParentViewInsert);
(cache, r)
})?;
if let Err(e) = r {
return abort(e);
}
Ok(())
}
/// When there is a new parent view, but it is actually a null round, call this function.
fn parent_null_round(&self, height: BlockHeight) -> StmResult<(), Error> {
let r = self.cached_data.modify(|mut cache| {
let r = cache
.append(height, None)
.map_err(Error::NonSequentialParentViewInsert);
(cache, r)
})?;
if let Err(e) = r {
return abort(e);
}
Ok(())
}
fn check_height(&self, proposal: &IPCParentFinality) -> Stm<bool> {
let binding = self.last_committed_finality.read()?;
// last committed finality is not ready yet, we don't vote, just reject
let last_committed_finality = if let Some(f) = binding.as_ref() {
f
} else {
return Ok(false);
};
// the incoming proposal has height already committed, reject
if last_committed_finality.height >= proposal.height {
tracing::debug!(
last_committed = last_committed_finality.height,
proposed = proposal.height,
"proposed height already committed",
);
return Ok(false);
}
if let Some(latest_height) = self.latest_height_in_cache()? {
let r = latest_height >= proposal.height;
tracing::debug!(
is_true = r,
latest_height,
proposal = proposal.height.to_string(),
"incoming proposal height seen?"
);
// requires the incoming height cannot be more advanced than our trusted parent node
Ok(r)
} else {
// latest height is not found, meaning we dont have any prefetched cache, we just be
// strict and vote no simply because we don't know.
tracing::debug!(
proposal = proposal.height.to_string(),
"reject proposal, no data in cache"
);
Ok(false)
}
}
fn check_block_hash(&self, proposal: &IPCParentFinality) -> Stm<bool> {
Ok(
if let Some(block_hash) = self.block_hash_at_height(proposal.height)? {
let r = block_hash == proposal.block_hash;
tracing::debug!(proposal = proposal.to_string(), is_same = r, "same hash?");
r
} else {
tracing::debug!(proposal = proposal.to_string(), "reject, hash not found");
false
},
)
}
}
#[cfg(test)]
mod tests {
use super::FinalityWithNull;
use crate::finality::ParentViewPayload;
use crate::{BlockHeight, Config, IPCParentFinality};
use async_stm::{atomically, atomically_or_err};
async fn new_provider(
mut blocks: Vec<(BlockHeight, Option<ParentViewPayload>)>,
) -> FinalityWithNull {
let config = Config {
chain_head_delay: 2,
polling_interval: Default::default(),
exponential_back_off: Default::default(),
exponential_retry_limit: 0,
max_proposal_range: Some(6),
max_cache_blocks: None,
proposal_delay: Some(2),
};
let committed_finality = IPCParentFinality {
height: blocks[0].0,
block_hash: vec![0; 32],
};
blocks.remove(0);
let f = FinalityWithNull::new(config, 1, Some(committed_finality));
for (h, p) in blocks {
atomically_or_err(|| f.new_parent_view(h, p.clone()))
.await
.unwrap();
}
f
}
#[tokio::test]
async fn test_happy_path() {
// max_proposal_range is 6. proposal_delay is 2
let parent_blocks = vec![
(100, Some((vec![0; 32], vec![], vec![]))), // last committed block
(101, Some((vec![1; 32], vec![], vec![]))), // cache start
(102, Some((vec![2; 32], vec![], vec![]))),
(103, Some((vec![3; 32], vec![], vec![]))),
(104, Some((vec![4; 32], vec![], vec![]))), // final delayed height + proposal height
(105, Some((vec![5; 32], vec![], vec![]))),
(106, Some((vec![6; 32], vec![], vec![]))), // max proposal height (last committed + 6), first non null block
(107, Some((vec![7; 32], vec![], vec![]))), // cache latest height
];
let provider = new_provider(parent_blocks).await;
let f = IPCParentFinality {
height: 104,
block_hash: vec![4; 32],
};
assert_eq!(
atomically(|| provider.next_proposal()).await,
Some(f.clone())
);
// Test set new finality
atomically(|| {
let last = provider.last_committed_finality.read_clone()?;
provider.set_new_finality(f.clone(), last)
})
.await;
assert_eq!(
atomically(|| provider.last_committed_finality()).await,
Some(f.clone())
);
// this ensures sequential insertion is still valid
atomically_or_err(|| provider.new_parent_view(108, None))
.await
.unwrap();
}
#[tokio::test]
async fn test_not_enough_view() {
// max_proposal_range is 6. proposal_delay is 2
let parent_blocks = vec![
(100, Some((vec![0; 32], vec![], vec![]))), // last committed block
(101, Some((vec![1; 32], vec![], vec![]))),
(102, Some((vec![2; 32], vec![], vec![]))),
(103, Some((vec![3; 32], vec![], vec![]))), // delayed height + final height
(104, Some((vec![4; 32], vec![], vec![]))),
(105, Some((vec![4; 32], vec![], vec![]))), // cache latest height, first non null block
// max proposal height is 106
];
let provider = new_provider(parent_blocks).await;
assert_eq!(
atomically(|| provider.next_proposal()).await,
Some(IPCParentFinality {
height: 103,
block_hash: vec![3; 32]
})
);
}
#[tokio::test]
async fn test_with_all_null_blocks() {
// max_proposal_range is 10. proposal_delay is 2
let parent_blocks = vec![
(102, Some((vec![2; 32], vec![], vec![]))), // last committed block
(103, None),
(104, None),
(105, None),
(106, None),
(107, None),
(108, None),
(109, None),
(110, Some((vec![4; 32], vec![], vec![]))), // cache latest height
// max proposal height is 112
];
let mut provider = new_provider(parent_blocks).await;
provider.config.max_proposal_range = Some(8);
assert_eq!(atomically(|| provider.next_proposal()).await, None);
}
#[tokio::test]
async fn test_with_partially_null_blocks_i() {
// max_proposal_range is 10. proposal_delay is 2
let parent_blocks = vec![
(102, Some((vec![2; 32], vec![], vec![]))), // last committed block
(103, None),
(104, None), // we wont have a proposal because after delay, there is no more non-null proposal
(105, None),
(106, None),
(107, None),
(108, None), // delayed block
(109, Some((vec![8; 32], vec![], vec![]))),
(110, Some((vec![10; 32], vec![], vec![]))), // cache latest height, first non null block
// max proposal height is 112
];
let mut provider = new_provider(parent_blocks).await;
provider.config.max_proposal_range = Some(10);
assert_eq!(atomically(|| provider.next_proposal()).await, None);
}
#[tokio::test]
async fn test_with_partially_null_blocks_ii() {
// max_proposal_range is 10. proposal_delay is 2
let parent_blocks = vec![
(102, Some((vec![2; 32], vec![], vec![]))), // last committed block
(103, Some((vec![3; 32], vec![], vec![]))),
(104, None),
(105, None),
(106, None),
(107, Some((vec![7; 32], vec![], vec![]))), // first non null after delay
(108, None), // delayed block
(109, None),
(110, Some((vec![10; 32], vec![], vec![]))), // cache latest height, first non null block
// max proposal height is 112
];
let mut provider = new_provider(parent_blocks).await;
provider.config.max_proposal_range = Some(10);
assert_eq!(
atomically(|| provider.next_proposal()).await,
Some(IPCParentFinality {
height: 107,
block_hash: vec![7; 32]
})
);
}
#[tokio::test]
async fn test_with_partially_null_blocks_iii() {
let parent_blocks = vec![
(102, Some((vec![2; 32], vec![], vec![]))), // last committed block
(103, Some((vec![3; 32], vec![], vec![]))),
(104, None),
(105, None),
(106, None),
(107, Some((vec![7; 32], vec![], vec![]))), // first non null delayed block, final
(108, None), // delayed block
(109, None),
(110, Some((vec![10; 32], vec![], vec![]))), // first non null block
(111, None),
(112, None),
// max proposal height is 122
];
let mut provider = new_provider(parent_blocks).await;
provider.config.max_proposal_range = Some(20);
assert_eq!(
atomically(|| provider.next_proposal()).await,
Some(IPCParentFinality {
height: 107,
block_hash: vec![7; 32]
})
);
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/topdown/src/sync/syncer.rs | fendermint/vm/topdown/src/sync/syncer.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! The inner type of parent syncer
use crate::finality::ParentViewPayload;
use crate::proxy::ParentQueryProxy;
use crate::sync::{query_starting_finality, ParentFinalityStateQuery};
use crate::voting::{self, VoteTally};
use crate::{
is_null_round_str, BlockHash, BlockHeight, CachedFinalityProvider, Config, Error, Toggle,
};
use anyhow::anyhow;
use async_stm::{atomically, atomically_or_err, StmError};
use ethers::utils::hex;
use libp2p::futures::TryFutureExt;
use std::sync::Arc;
use tracing::instrument;
use fendermint_tracing::emit;
use fendermint_vm_event::{BlockHashHex, NewParentView};
/// Parent syncer that constantly poll parent. This struct handles lotus null blocks and deferred
/// execution. For ETH based parent, it should work out of the box as well.
pub(crate) struct LotusParentSyncer<T, P> {
config: Config,
parent_proxy: Arc<P>,
provider: Arc<Toggle<CachedFinalityProvider<P>>>,
vote_tally: VoteTally,
query: Arc<T>,
/// For testing purposes, we can sync one block at a time.
/// Not part of `Config` as it's a very niche setting;
/// if enabled it would slow down catching up with parent
/// history to a crawl, or one would have to increase
/// the polling frequence to where it's impractical after
/// we have caught up.
sync_many: bool,
}
impl<T, P> LotusParentSyncer<T, P>
where
T: ParentFinalityStateQuery + Send + Sync + 'static,
P: ParentQueryProxy + Send + Sync + 'static,
{
pub fn new(
config: Config,
parent_proxy: Arc<P>,
provider: Arc<Toggle<CachedFinalityProvider<P>>>,
vote_tally: VoteTally,
query: Arc<T>,
) -> anyhow::Result<Self> {
Ok(Self {
config,
parent_proxy,
provider,
vote_tally,
query,
sync_many: true,
})
}
/// Insert the height into cache when we see a new non null block
pub async fn sync(&mut self) -> anyhow::Result<()> {
let chain_head = if let Some(h) = self.finalized_chain_head().await? {
h
} else {
return Ok(());
};
let (mut latest_height_fetched, mut first_non_null_parent_hash) =
self.latest_cached_data().await;
tracing::debug!(chain_head, latest_height_fetched, "syncing heights");
if latest_height_fetched > chain_head {
tracing::warn!(
chain_head,
latest_height_fetched,
"chain head went backwards, potential reorg detected from height"
);
return self.reset().await;
}
if latest_height_fetched == chain_head {
tracing::debug!(
chain_head,
latest_height_fetched,
"the parent has yet to produce a new block"
);
return Ok(());
}
loop {
if self.exceed_cache_size_limit().await {
tracing::debug!("exceeded cache size limit");
break;
}
first_non_null_parent_hash = match self
.poll_next(latest_height_fetched + 1, first_non_null_parent_hash)
.await
{
Ok(h) => h,
Err(Error::ParentChainReorgDetected) => {
tracing::warn!("potential reorg detected, clear cache and retry");
self.reset().await?;
break;
}
Err(e) => return Err(anyhow!(e)),
};
latest_height_fetched += 1;
if latest_height_fetched == chain_head {
tracing::debug!("reached the tip of the chain");
break;
} else if !self.sync_many {
break;
}
}
Ok(())
}
}
impl<T, P> LotusParentSyncer<T, P>
where
T: ParentFinalityStateQuery + Send + Sync + 'static,
P: ParentQueryProxy + Send + Sync + 'static,
{
async fn exceed_cache_size_limit(&self) -> bool {
let max_cache_blocks = self.config.max_cache_blocks();
atomically(|| self.provider.cached_blocks()).await > max_cache_blocks
}
/// Get the latest data stored in the cache to pull the next block
async fn latest_cached_data(&self) -> (BlockHeight, BlockHash) {
// we are getting the latest height fetched in cache along with the first non null block
// that is stored in cache.
// we are doing two fetches in one `atomically` as if we get the data in two `atomically`,
// the cache might be updated in between the two calls. `atomically` should guarantee atomicity.
atomically(|| {
let latest_height = if let Some(h) = self.provider.latest_height()? {
h
} else {
unreachable!("guaranteed to have latest height, report bug please")
};
// first try to get the first non null block before latest_height + 1, i.e. from cache
let prev_non_null_height =
if let Some(height) = self.provider.first_non_null_block(latest_height)? {
tracing::debug!(height, "first non null block in cache");
height
} else if let Some(p) = self.provider.last_committed_finality()? {
tracing::debug!(
height = p.height,
"first non null block not in cache, use latest finality"
);
p.height
} else {
unreachable!("guaranteed to have last committed finality, report bug please")
};
let hash = if let Some(h) = self.provider.block_hash(prev_non_null_height)? {
h
} else {
unreachable!(
"guaranteed to have hash as the height {} is found",
prev_non_null_height
)
};
Ok((latest_height, hash))
})
.await
}
/// Poll the next block height. Returns finalized and executed block data.
async fn poll_next(
&mut self,
height: BlockHeight,
parent_block_hash: BlockHash,
) -> Result<BlockHash, Error> {
tracing::debug!(
height,
parent_block_hash = hex::encode(&parent_block_hash),
"polling height with parent hash"
);
let block_hash_res = match self.parent_proxy.get_block_hash(height).await {
Ok(res) => res,
Err(e) => {
let err = e.to_string();
if is_null_round_str(&err) {
tracing::debug!(
height,
"detected null round at height, inserted None to cache"
);
atomically_or_err::<_, Error, _>(|| {
self.provider.new_parent_view(height, None)?;
self.vote_tally
.add_block(height, None)
.map_err(map_voting_err)?;
Ok(())
})
.await?;
emit!(NewParentView {
is_null: true,
block_height: height,
block_hash: None::<BlockHashHex>,
num_msgs: 0,
num_validator_changes: 0
});
// Null block received, no block hash for the current height being polled.
// Return the previous parent hash as the non-null block hash.
return Ok(parent_block_hash);
}
return Err(Error::CannotQueryParent(
format!("get_block_hash: {e}"),
height,
));
}
};
if block_hash_res.parent_block_hash != parent_block_hash {
tracing::warn!(
height,
parent_hash = hex::encode(&block_hash_res.parent_block_hash),
previous_hash = hex::encode(&parent_block_hash),
"parent block hash diff than previous hash",
);
return Err(Error::ParentChainReorgDetected);
}
let data = self.fetch_data(height, block_hash_res.block_hash).await?;
tracing::debug!(
height,
staking_requests = data.1.len(),
cross_messages = data.2.len(),
"fetched data"
);
atomically_or_err::<_, Error, _>(|| {
// This is here so we see if there is abnormal amount of retries for some reason.
tracing::debug!(height, "adding data to the cache");
self.provider.new_parent_view(height, Some(data.clone()))?;
self.vote_tally
.add_block(height, Some(data.0.clone()))
.map_err(map_voting_err)?;
tracing::debug!(height, "non-null block pushed to cache");
Ok(())
})
.await?;
emit!(NewParentView {
is_null: false,
block_height: height,
block_hash: Some(&hex::encode(&data.0)),
num_msgs: data.2.len(),
num_validator_changes: data.1.len(),
});
Ok(data.0)
}
async fn fetch_data(
&self,
height: BlockHeight,
block_hash: BlockHash,
) -> Result<ParentViewPayload, Error> {
fetch_data(self.parent_proxy.as_ref(), height, block_hash).await
}
async fn finalized_chain_head(&self) -> anyhow::Result<Option<BlockHeight>> {
let parent_chain_head_height = self.parent_proxy.get_chain_head_height().await?;
// sanity check
if parent_chain_head_height < self.config.chain_head_delay {
tracing::debug!("latest height not more than the chain head delay");
return Ok(None);
}
// we consider the chain head finalized only after the `chain_head_delay`
Ok(Some(
parent_chain_head_height - self.config.chain_head_delay,
))
}
/// Reset the cache in the face of a reorg
async fn reset(&self) -> anyhow::Result<()> {
let finality = query_starting_finality(&self.query, &self.parent_proxy).await?;
atomically(|| self.provider.reset(finality.clone())).await;
Ok(())
}
}
fn map_voting_err(e: StmError<voting::Error>) -> StmError<Error> {
match e {
StmError::Abort(e) => {
tracing::error!(
error = e.to_string(),
"failed to append block to voting tally"
);
StmError::Abort(Error::NotSequential)
}
StmError::Control(c) => StmError::Control(c),
}
}
#[instrument(skip(parent_proxy))]
async fn fetch_data<P>(
parent_proxy: &P,
height: BlockHeight,
block_hash: BlockHash,
) -> Result<ParentViewPayload, Error>
where
P: ParentQueryProxy + Send + Sync + 'static,
{
let changes_res = parent_proxy
.get_validator_changes(height)
.map_err(|e| Error::CannotQueryParent(format!("get_validator_changes: {e}"), height));
let topdown_msgs_res = parent_proxy
.get_top_down_msgs(height)
.map_err(|e| Error::CannotQueryParent(format!("get_top_down_msgs: {e}"), height));
let (changes_res, topdown_msgs_res) = tokio::join!(changes_res, topdown_msgs_res);
let (changes_res, topdown_msgs_res) = (changes_res?, topdown_msgs_res?);
if changes_res.block_hash != block_hash {
tracing::warn!(
height,
change_set_hash = hex::encode(&changes_res.block_hash),
block_hash = hex::encode(&block_hash),
"change set block hash does not equal block hash",
);
return Err(Error::ParentChainReorgDetected);
}
if topdown_msgs_res.block_hash != block_hash {
tracing::warn!(
height,
topdown_msgs_hash = hex::encode(&topdown_msgs_res.block_hash),
block_hash = hex::encode(&block_hash),
"topdown messages block hash does not equal block hash",
);
return Err(Error::ParentChainReorgDetected);
}
Ok((block_hash, changes_res.value, topdown_msgs_res.value))
}
pub async fn fetch_topdown_events<P>(
parent_proxy: &P,
start_height: BlockHeight,
end_height: BlockHeight,
) -> Result<Vec<(BlockHeight, ParentViewPayload)>, Error>
where
P: ParentQueryProxy + Send + Sync + 'static,
{
let mut events = Vec::new();
for height in start_height..=end_height {
match parent_proxy.get_block_hash(height).await {
Ok(res) => {
let (block_hash, changes, msgs) =
fetch_data(parent_proxy, height, res.block_hash).await?;
if !(changes.is_empty() && msgs.is_empty()) {
events.push((height, (block_hash, changes, msgs)));
}
}
Err(e) => {
if is_null_round_str(&e.to_string()) {
continue;
} else {
return Err(Error::CannotQueryParent(
format!("get_block_hash: {e}"),
height,
));
}
}
}
}
Ok(events)
}
#[cfg(test)]
mod tests {
use crate::proxy::ParentQueryProxy;
use crate::sync::syncer::LotusParentSyncer;
use crate::sync::ParentFinalityStateQuery;
use crate::voting::VoteTally;
use crate::{
BlockHash, BlockHeight, CachedFinalityProvider, Config, IPCParentFinality,
SequentialKeyCache, Toggle, NULL_ROUND_ERR_MSG,
};
use anyhow::anyhow;
use async_stm::atomically;
use async_trait::async_trait;
use fendermint_vm_genesis::{Power, Validator};
use ipc_api::cross::IpcEnvelope;
use ipc_api::staking::StakingChangeRequest;
use ipc_provider::manager::{GetBlockHashResult, TopDownQueryPayload};
use std::sync::Arc;
/// How far behind the tip of the chain do we consider blocks final in the tests.
const FINALITY_DELAY: u64 = 2;
struct TestParentFinalityStateQuery {
latest_finality: IPCParentFinality,
}
impl ParentFinalityStateQuery for TestParentFinalityStateQuery {
fn get_latest_committed_finality(&self) -> anyhow::Result<Option<IPCParentFinality>> {
Ok(Some(self.latest_finality.clone()))
}
fn get_power_table(&self) -> anyhow::Result<Option<Vec<Validator<Power>>>> {
Ok(Some(vec![]))
}
}
struct TestParentProxy {
blocks: SequentialKeyCache<BlockHeight, Option<BlockHash>>,
}
#[async_trait]
impl ParentQueryProxy for TestParentProxy {
async fn get_chain_head_height(&self) -> anyhow::Result<BlockHeight> {
Ok(self.blocks.upper_bound().unwrap())
}
async fn get_genesis_epoch(&self) -> anyhow::Result<BlockHeight> {
Ok(self.blocks.lower_bound().unwrap() - 1)
}
async fn get_block_hash(&self, height: BlockHeight) -> anyhow::Result<GetBlockHashResult> {
let r = self.blocks.get_value(height).unwrap();
if r.is_none() {
return Err(anyhow!(NULL_ROUND_ERR_MSG));
}
for h in (self.blocks.lower_bound().unwrap()..height).rev() {
let v = self.blocks.get_value(h).unwrap();
if v.is_none() {
continue;
}
return Ok(GetBlockHashResult {
parent_block_hash: v.clone().unwrap(),
block_hash: r.clone().unwrap(),
});
}
panic!("invalid testing data")
}
async fn get_top_down_msgs(
&self,
height: BlockHeight,
) -> anyhow::Result<TopDownQueryPayload<Vec<IpcEnvelope>>> {
Ok(TopDownQueryPayload {
value: vec![],
block_hash: self.blocks.get_value(height).cloned().unwrap().unwrap(),
})
}
async fn get_validator_changes(
&self,
height: BlockHeight,
) -> anyhow::Result<TopDownQueryPayload<Vec<StakingChangeRequest>>> {
Ok(TopDownQueryPayload {
value: vec![],
block_hash: self.blocks.get_value(height).cloned().unwrap().unwrap(),
})
}
}
async fn new_syncer(
blocks: SequentialKeyCache<BlockHeight, Option<BlockHash>>,
sync_many: bool,
) -> LotusParentSyncer<TestParentFinalityStateQuery, TestParentProxy> {
let config = Config {
chain_head_delay: FINALITY_DELAY,
polling_interval: Default::default(),
exponential_back_off: Default::default(),
exponential_retry_limit: 0,
max_proposal_range: Some(1),
max_cache_blocks: None,
proposal_delay: None,
};
let genesis_epoch = blocks.lower_bound().unwrap();
let proxy = Arc::new(TestParentProxy { blocks });
let committed_finality = IPCParentFinality {
height: genesis_epoch,
block_hash: vec![0; 32],
};
let vote_tally = VoteTally::new(
vec![],
(
committed_finality.height,
committed_finality.block_hash.clone(),
),
);
let provider = CachedFinalityProvider::new(
config.clone(),
genesis_epoch,
Some(committed_finality.clone()),
proxy.clone(),
);
let mut syncer = LotusParentSyncer::new(
config,
proxy,
Arc::new(Toggle::enabled(provider)),
vote_tally,
Arc::new(TestParentFinalityStateQuery {
latest_finality: committed_finality,
}),
)
.unwrap();
// Some tests expect to sync one block at a time.
syncer.sync_many = sync_many;
syncer
}
/// Creates a mock of a new parent blockchain view. The key is the height and the value is the
/// block hash. If block hash is None, it means the current height is a null block.
macro_rules! new_parent_blocks {
($($key:expr => $val:expr),* ,) => (
hash_map!($($key => $val),*)
);
($($key:expr => $val:expr),*) => ({
let mut map = SequentialKeyCache::sequential();
$( map.append($key, $val).unwrap(); )*
map
});
}
#[tokio::test]
async fn happy_path() {
let parent_blocks = new_parent_blocks!(
100 => Some(vec![0; 32]), // genesis block
101 => Some(vec![1; 32]),
102 => Some(vec![2; 32]),
103 => Some(vec![3; 32]),
104 => Some(vec![4; 32]), // after chain head delay, we fetch only to here
105 => Some(vec![5; 32]),
106 => Some(vec![6; 32]) // chain head
);
let mut syncer = new_syncer(parent_blocks, false).await;
for h in 101..=104 {
syncer.sync().await.unwrap();
let p = atomically(|| syncer.provider.latest_height()).await;
assert_eq!(p, Some(h));
}
}
#[tokio::test]
async fn with_non_null_block() {
let parent_blocks = new_parent_blocks!(
100 => Some(vec![0; 32]), // genesis block
101 => None,
102 => None,
103 => None,
104 => Some(vec![4; 32]),
105 => None,
106 => None,
107 => None,
108 => Some(vec![5; 32]),
109 => None,
110 => None,
111 => None
);
let mut syncer = new_syncer(parent_blocks, false).await;
for h in 101..=109 {
syncer.sync().await.unwrap();
assert_eq!(
atomically(|| syncer.provider.latest_height()).await,
Some(h)
);
}
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/topdown/src/sync/tendermint.rs | fendermint/vm/topdown/src/sync/tendermint.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! The tendermint aware syncer
use crate::proxy::ParentQueryProxy;
use crate::sync::syncer::LotusParentSyncer;
use crate::sync::ParentFinalityStateQuery;
use anyhow::Context;
/// Tendermint aware syncer
pub(crate) struct TendermintAwareSyncer<T, C, P> {
inner: LotusParentSyncer<T, P>,
tendermint_client: C,
}
impl<T, C, P> TendermintAwareSyncer<T, C, P>
where
T: ParentFinalityStateQuery + Send + Sync + 'static,
C: tendermint_rpc::Client + Send + Sync + 'static,
P: ParentQueryProxy + Send + Sync + 'static,
{
pub fn new(inner: LotusParentSyncer<T, P>, tendermint_client: C) -> Self {
Self {
inner,
tendermint_client,
}
}
/// Sync with the parent, unless CometBFT is still catching up with the network,
/// in which case we'll get the changes from the subnet peers in the blocks.
pub async fn sync(&mut self) -> anyhow::Result<()> {
if self.is_syncing_peer().await? {
tracing::debug!("syncing with peer, skip parent finality syncing this round");
return Ok(());
}
self.inner.sync().await
}
async fn is_syncing_peer(&self) -> anyhow::Result<bool> {
let status: tendermint_rpc::endpoint::status::Response = self
.tendermint_client
.status()
.await
.context("failed to get Tendermint status")?;
Ok(status.sync_info.catching_up)
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/topdown/src/sync/mod.rs | fendermint/vm/topdown/src/sync/mod.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! A constant running process that fetch or listener to parent state
mod syncer;
mod tendermint;
use crate::proxy::ParentQueryProxy;
use crate::sync::syncer::LotusParentSyncer;
use crate::sync::tendermint::TendermintAwareSyncer;
use crate::voting::VoteTally;
use crate::{CachedFinalityProvider, Config, IPCParentFinality, ParentFinalityProvider, Toggle};
use anyhow::anyhow;
use async_stm::atomically;
use ethers::utils::hex;
use ipc_ipld_resolver::ValidatorKey;
use std::sync::Arc;
use std::time::Duration;
use fendermint_vm_genesis::{Power, Validator};
pub use syncer::fetch_topdown_events;
/// Query the parent finality from the block chain state.
///
/// It returns `None` from queries until the ledger has been initialized.
pub trait ParentFinalityStateQuery {
/// Get the latest committed finality from the state
fn get_latest_committed_finality(&self) -> anyhow::Result<Option<IPCParentFinality>>;
/// Get the current committee voting powers.
fn get_power_table(&self) -> anyhow::Result<Option<Vec<Validator<Power>>>>;
}
/// Queries the starting finality for polling. First checks the committed finality, if none, that
/// means the chain has just started, then query from the parent to get the genesis epoch.
async fn query_starting_finality<T, P>(
query: &Arc<T>,
parent_client: &Arc<P>,
) -> anyhow::Result<IPCParentFinality>
where
T: ParentFinalityStateQuery + Send + Sync + 'static,
P: ParentQueryProxy + Send + Sync + 'static,
{
loop {
let mut finality = match query.get_latest_committed_finality() {
Ok(Some(finality)) => finality,
Ok(None) => {
tracing::debug!("app not ready for query yet");
tokio::time::sleep(Duration::from_secs(5)).await;
continue;
}
Err(e) => {
tracing::warn!(error = e.to_string(), "cannot get committed finality");
tokio::time::sleep(Duration::from_secs(5)).await;
continue;
}
};
tracing::info!(finality = finality.to_string(), "latest finality committed");
// this means there are no previous committed finality yet, we fetch from parent to get
// the genesis epoch of the current subnet and its corresponding block hash.
if finality.height == 0 {
let genesis_epoch = parent_client.get_genesis_epoch().await?;
tracing::debug!(genesis_epoch = genesis_epoch, "obtained genesis epoch");
let r = parent_client.get_block_hash(genesis_epoch).await?;
tracing::debug!(
block_hash = hex::encode(&r.block_hash),
"obtained genesis block hash",
);
finality = IPCParentFinality {
height: genesis_epoch,
block_hash: r.block_hash,
};
tracing::info!(
genesis_finality = finality.to_string(),
"no previous finality committed, fetched from genesis epoch"
);
}
return Ok(finality);
}
}
/// Queries the starting finality for polling. First checks the committed finality, if none, that
/// means the chain has just started, then query from the parent to get the genesis epoch.
async fn query_starting_comittee<T>(query: &Arc<T>) -> anyhow::Result<Vec<Validator<Power>>>
where
T: ParentFinalityStateQuery + Send + Sync + 'static,
{
loop {
match query.get_power_table() {
Ok(Some(power_table)) => return Ok(power_table),
Ok(None) => {
tracing::debug!("app not ready for query yet");
tokio::time::sleep(Duration::from_secs(5)).await;
continue;
}
Err(e) => {
tracing::warn!(error = e.to_string(), "cannot get comittee");
tokio::time::sleep(Duration::from_secs(5)).await;
continue;
}
}
}
}
/// Start the polling parent syncer in the background
pub async fn launch_polling_syncer<T, C, P>(
query: T,
config: Config,
view_provider: Arc<Toggle<CachedFinalityProvider<P>>>,
vote_tally: VoteTally,
parent_client: Arc<P>,
tendermint_client: C,
) -> anyhow::Result<()>
where
T: ParentFinalityStateQuery + Send + Sync + 'static,
C: tendermint_rpc::Client + Send + Sync + 'static,
P: ParentQueryProxy + Send + Sync + 'static,
{
if !view_provider.is_enabled() {
return Err(anyhow!("provider not enabled, enable to run syncer"));
}
let query = Arc::new(query);
let finality = query_starting_finality(&query, &parent_client).await?;
let power_table = query_starting_comittee(&query).await?;
let power_table = power_table
.into_iter()
.map(|v| {
let vk = ValidatorKey::from(v.public_key.0);
let w = v.power.0;
(vk, w)
})
.collect::<Vec<_>>();
atomically(|| {
view_provider.set_new_finality(finality.clone(), None)?;
vote_tally.set_finalized(finality.height, finality.block_hash.clone())?;
vote_tally.set_power_table(power_table.clone())?;
Ok(())
})
.await;
tracing::info!(
finality = finality.to_string(),
"launching parent syncer with last committed finality"
);
start_syncing(
config,
view_provider,
vote_tally,
parent_client,
query,
tendermint_client,
);
Ok(())
}
/// Start the parent finality listener in the background
fn start_syncing<T, C, P>(
config: Config,
view_provider: Arc<Toggle<CachedFinalityProvider<P>>>,
vote_tally: VoteTally,
parent_proxy: Arc<P>,
query: Arc<T>,
tendermint_client: C,
) where
T: ParentFinalityStateQuery + Send + Sync + 'static,
C: tendermint_rpc::Client + Send + Sync + 'static,
P: ParentQueryProxy + Send + Sync + 'static,
{
let mut interval = tokio::time::interval(config.polling_interval);
interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip);
tokio::spawn(async move {
let lotus_syncer =
LotusParentSyncer::new(config, parent_proxy, view_provider, vote_tally, query)
.expect("");
let mut tendermint_syncer = TendermintAwareSyncer::new(lotus_syncer, tendermint_client);
loop {
interval.tick().await;
if let Err(e) = tendermint_syncer.sync().await {
tracing::error!(error = e.to_string(), "sync with parent encountered error");
}
}
});
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/topdown/tests/smt_voting.rs | fendermint/vm/topdown/tests/smt_voting.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! State Machine Test for the finality voting tally component.
//!
//! The test simulates random events that the tally can receive, such as votes received
//! over gossip, power table updates, block being executed, and tests that the tally
//! correctly identifies the blocks which are agreeable to the majority of validator.
//!
//! It can be executed the following way:
//!
//! ```text
//! cargo test --release -p fendermint_vm_topdown --test smt_voting
//! ```
use std::{
cmp::{max, min},
collections::BTreeMap,
fmt::Debug,
};
use arbitrary::Unstructured;
use async_stm::{atomically, atomically_or_err, Stm, StmResult};
use fendermint_testing::{smt, state_machine_test};
use fendermint_vm_topdown::{
voting::{self, VoteTally, Weight},
BlockHash, BlockHeight,
};
use im::HashSet;
//use rand::{rngs::StdRng, SeedableRng};
/// Size of window of voting relative to the last cast vote.
const MAX_VOTE_DELTA: BlockHeight = 5;
/// Maximum number of blocks to finalize at a time.
const MAX_FINALIZED_DELTA: BlockHeight = 5;
state_machine_test!(voting, 10000 ms, 65512 bytes, 200 steps, VotingMachine::new());
//state_machine_test!(voting, 0xf7ac11a50000ffe8, 200 steps, VotingMachine::new());
/// Test key to make debugging more readable.
#[derive(Debug, Clone, Hash, Eq, PartialEq, PartialOrd, Ord)]
pub struct VotingKey(u64);
pub type VotingError = voting::Error<VotingKey>;
pub enum VotingCommand {
/// The tally observes the next block fo the chain.
ExtendChain(BlockHeight, Option<BlockHash>),
/// One of the validators voted on a block.
AddVote(VotingKey, BlockHeight, BlockHash),
/// Update the power table.
UpdatePower(Vec<(VotingKey, Weight)>),
/// A certain height was finalized in the ledger.
BlockFinalized(BlockHeight, BlockHash),
/// Ask the tally for the highest agreeable block.
FindQuorum,
}
// Debug format without block hashes which make it unreadable.
impl Debug for VotingCommand {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::ExtendChain(arg0, arg1) => f
.debug_tuple("ExtendChain")
.field(arg0)
.field(&arg1.is_some())
.finish(),
Self::AddVote(arg0, arg1, _arg2) => {
f.debug_tuple("AddVote").field(arg0).field(arg1).finish()
}
Self::UpdatePower(arg0) => f.debug_tuple("UpdatePower").field(arg0).finish(),
Self::BlockFinalized(arg0, _arg1) => {
f.debug_tuple("BlockFinalized").field(arg0).finish()
}
Self::FindQuorum => write!(f, "FindQuorum"),
}
}
}
/// Model state of voting
#[derive(Clone)]
pub struct VotingState {
/// We have a single parent chain that everybody observes, just at different heights.
/// There is no forking in this test because we assume that the syncing component
/// only downloads blocks which are final, and that reorgs don't happen.
///
/// Null blocks are represented by `None`.
///
/// The tally is currently unable to handle reorgs and rejects equivocations anyway.
///
/// TODO (ENG-623): Decide what we want to achieve with Equivocation detection.
chain: Vec<Option<BlockHash>>,
/// All the validator keys to help pic random ones.
validator_keys: Vec<VotingKey>,
/// All the validators with varying weights (can be zero).
validator_states: BTreeMap<VotingKey, ValidatorState>,
last_finalized_block: BlockHeight,
last_chain_block: BlockHeight,
}
impl VotingState {
pub fn can_extend(&self) -> bool {
self.last_chain_block < self.max_chain_height()
}
pub fn can_finalize(&self) -> bool {
// We can finalize a block even if we haven't observed the votes,
// if the majority of validators vote for an actual block that
// proposed it for execution.
self.last_finalized_block < self.max_chain_height()
}
pub fn next_chain_block(&self) -> Option<(BlockHeight, Option<BlockHash>)> {
if self.can_extend() {
let h = self.last_chain_block + 1;
Some((h, self.block_hash(h)))
} else {
None
}
}
pub fn max_chain_height(&self) -> BlockHeight {
self.chain.len() as BlockHeight - 1
}
pub fn block_hash(&self, h: BlockHeight) -> Option<BlockHash> {
self.chain[h as usize].clone()
}
pub fn has_quorum(&self, h: BlockHeight) -> bool {
if self.block_hash(h).is_none() {
return false;
}
let mut total_weight: Weight = 0;
let mut vote_weight: Weight = 0;
for vs in self.validator_states.values() {
total_weight += vs.weight;
if vs.highest_vote >= h {
vote_weight += vs.weight;
}
}
let threshold = total_weight * 2 / 3;
vote_weight > threshold
}
}
#[derive(Clone, Debug)]
pub struct ValidatorState {
/// Current voting power (can be zero).
weight: Weight,
/// The heights this validator explicitly voted on.
votes: HashSet<BlockHeight>,
/// The highest vote *currently on the chain* the validator has voted for already.
/// Initially zero, meaning everyone voted on the initial finalized block.
highest_vote: BlockHeight,
}
pub struct VotingMachine {
/// Runtime for executing async commands.
runtime: tokio::runtime::Runtime,
}
impl VotingMachine {
pub fn new() -> Self {
Self {
runtime: tokio::runtime::Runtime::new().expect("create tokio runtime"),
}
}
fn atomically_or_err<F, T>(&self, f: F) -> Result<T, VotingError>
where
F: Fn() -> StmResult<T, VotingError>,
{
self.runtime.block_on(atomically_or_err(f))
}
fn atomically<F, T>(&self, f: F) -> T
where
F: Fn() -> Stm<T>,
{
self.runtime.block_on(atomically(f))
}
// For convenience in the command handler.
fn atomically_ok<F, T>(&self, f: F) -> Result<T, VotingError>
where
F: Fn() -> Stm<T>,
{
Ok(self.atomically(f))
}
}
impl Default for VotingMachine {
fn default() -> Self {
Self::new()
}
}
impl smt::StateMachine for VotingMachine {
/// The System Under Test is the Vote Tally.
type System = VoteTally<VotingKey>;
/// The model state is defined here in the test.
type State = VotingState;
/// Random commands we can apply in a step.
type Command = VotingCommand;
/// Result of command application on the system.
///
/// The only return value we are interested in is the finality.
type Result = Result<Option<(BlockHeight, BlockHash)>, voting::Error<VotingKey>>;
/// New random state.
fn gen_state(&self, u: &mut Unstructured) -> arbitrary::Result<Self::State> {
let chain_length = u.int_in_range(40..=60)?;
let mut chain = Vec::new();
for i in 0..chain_length {
if i == 0 || u.ratio(9, 10)? {
let block_hash = u.bytes(32)?;
chain.push(Some(Vec::from(block_hash)));
} else {
chain.push(None);
}
}
let validator_count = u.int_in_range(1..=5)?;
//let mut rng = StdRng::seed_from_u64(u.arbitrary()?);
let mut validator_states = BTreeMap::new();
for i in 0..validator_count {
let min_weight = if i == 0 { 1u64 } else { 0u64 };
let weight = u.int_in_range(min_weight..=100)?;
// A VotingKey is has a lot of wrapping...
// let secret_key = fendermint_crypto::SecretKey::random(&mut rng);
// let public_key = secret_key.public_key();
// let public_key = libp2p::identity::secp256k1::PublicKey::try_from_bytes(
// &public_key.serialize_compressed(),
// )
// .expect("secp256k1 public key");
// let public_key = libp2p::identity::PublicKey::from(public_key);
// let validator_key = VotingKey::from(public_key);
let validator_key = VotingKey(i);
validator_states.insert(
validator_key,
ValidatorState {
weight,
votes: HashSet::default(),
highest_vote: 0,
},
);
}
eprintln!("NEW STATE: {validator_states:?}");
Ok(VotingState {
chain,
validator_keys: validator_states.keys().cloned().collect(),
validator_states,
last_chain_block: 0,
last_finalized_block: 0,
})
}
/// New System Under Test.
fn new_system(&self, state: &Self::State) -> Self::System {
let power_table = state
.validator_states
.iter()
.filter(|(_, vs)| vs.weight > 0)
.map(|(vk, vs)| (vk.clone(), vs.weight))
.collect();
let last_finalized_block = (0, state.block_hash(0).expect("first block is not null"));
VoteTally::<VotingKey>::new(power_table, last_finalized_block)
}
/// New random command.
fn gen_command(
&self,
u: &mut Unstructured,
state: &Self::State,
) -> arbitrary::Result<Self::Command> {
let cmd = match u.int_in_range(0..=100)? {
// Add a block to the observed chain
i if i < 25 && state.can_extend() => {
let (height, hash) = state.next_chain_block().unwrap();
VotingCommand::ExtendChain(height, hash)
}
// Add a new (or repeated) vote by a validator, extending its chain
i if i < 70 => {
let vk = u.choose(&state.validator_keys)?;
let high_vote = state.validator_states[vk].highest_vote;
let max_vote: BlockHeight =
min(state.max_chain_height(), high_vote + MAX_VOTE_DELTA);
let min_vote: BlockHeight = high_vote.saturating_sub(MAX_VOTE_DELTA);
let mut vote_height = u.int_in_range(min_vote..=max_vote)?;
while state.block_hash(vote_height).is_none() {
vote_height -= 1;
}
let vote_hash = state
.block_hash(vote_height)
.expect("the first block not null");
VotingCommand::AddVote(vk.clone(), vote_height, vote_hash)
}
// Update the power table
i if i < 80 => {
// Move power from one validator to another (so we never have everyone be zero).
let vk1 = u.choose(&state.validator_keys)?;
let vk2 = u.choose(&state.validator_keys)?;
let w1 = state.validator_states[vk1].weight;
let w2 = state.validator_states[vk2].weight;
let delta = u.int_in_range(0..=w1)?;
let updates = vec![(vk1.clone(), w1 - delta), (vk2.clone(), w2 + delta)];
VotingCommand::UpdatePower(updates)
}
// Finalize a block
i if i < 90 && state.can_finalize() => {
let min_fin = state.last_finalized_block + 1;
let max_fin = min(
state.max_chain_height(),
state.last_finalized_block + MAX_FINALIZED_DELTA,
);
let mut fin_height = u.int_in_range(min_fin..=max_fin)?;
while state.block_hash(fin_height).is_none() {
fin_height -= 1;
}
let fin_hash = state
.block_hash(fin_height)
.expect("the first block not null");
// Might be a duplicate, which doesn't happen in the real ledger, but it's okay.
VotingCommand::BlockFinalized(fin_height, fin_hash)
}
_ => VotingCommand::FindQuorum,
};
Ok(cmd)
}
/// Apply the command on the System Under Test.
fn run_command(&self, system: &mut Self::System, cmd: &Self::Command) -> Self::Result {
eprintln!("RUN CMD {cmd:?}");
match cmd {
VotingCommand::ExtendChain(block_height, block_hash) => self.atomically_or_err(|| {
system
.add_block(*block_height, block_hash.clone())
.map(|_| None)
}),
VotingCommand::AddVote(vk, block_height, block_hash) => self.atomically_or_err(|| {
system
.add_vote(vk.clone(), *block_height, block_hash.clone())
.map(|_| None)
}),
VotingCommand::UpdatePower(power_table) => {
self.atomically_ok(|| system.update_power_table(power_table.clone()).map(|_| None))
}
VotingCommand::BlockFinalized(block_height, block_hash) => self.atomically_ok(|| {
system
.set_finalized(*block_height, block_hash.clone())
.map(|_| None)
}),
VotingCommand::FindQuorum => self.atomically_ok(|| system.find_quorum()),
}
}
/// Check that the result returned by the tally is correct.
fn check_result(&self, cmd: &Self::Command, pre_state: &Self::State, result: Self::Result) {
match cmd {
VotingCommand::ExtendChain(_, _) => {
result.expect("chain extension should succeed; not simulating unexpected heights");
}
VotingCommand::AddVote(vk, h, _) => {
if *h < pre_state.last_finalized_block {
result.expect("old votes are ignored");
} else if pre_state.validator_states[vk].weight == 0 {
result.expect_err("not accepting votes from validators with 0 power");
} else {
result.expect("vote should succeed; not simulating equivocations");
}
}
VotingCommand::FindQuorum => {
let result = result.expect("finding quorum should succeed");
let height = match result {
None => pre_state.last_finalized_block,
Some((height, hash)) => {
assert!(
pre_state.has_quorum(height),
"find: height {height} should have quorum"
);
assert!(
height > pre_state.last_finalized_block,
"find: should be above last finalized"
);
assert!(
height <= pre_state.last_chain_block,
"find: should not be beyond last chain"
);
assert_eq!(
pre_state.block_hash(height),
Some(hash),
"find: should be correct hash"
);
height
}
};
// Check that the first non-null block after the finalized one has no quorum.
let mut next = height + 1;
if next > pre_state.max_chain_height() || next > pre_state.last_chain_block {
return;
}
while next < pre_state.last_chain_block && pre_state.block_hash(next).is_none() {
next += 1;
}
assert!(
!pre_state.has_quorum(next),
"next block at {next} should not have quorum"
)
}
other => {
assert!(result.is_ok(), "{other:?} should succeed: {result:?}");
}
}
}
/// Update the model state.
fn next_state(&self, cmd: &Self::Command, mut state: Self::State) -> Self::State {
match cmd {
VotingCommand::ExtendChain(h, _) => {
state.last_chain_block = *h;
for vs in state.validator_states.values_mut() {
if vs.votes.contains(h) {
vs.highest_vote = *h;
}
}
}
VotingCommand::AddVote(vk, h, _) => {
let vs = state
.validator_states
.get_mut(vk)
.expect("validator exists");
if vs.weight > 0 {
vs.votes.insert(*h);
if *h <= state.last_chain_block {
vs.highest_vote = max(vs.highest_vote, *h);
}
}
}
VotingCommand::UpdatePower(pt) => {
for (vk, w) in pt {
state
.validator_states
.get_mut(vk)
.expect("validators exist")
.weight = *w;
}
}
VotingCommand::BlockFinalized(h, _) => {
state.last_finalized_block = *h;
state.last_chain_block = max(state.last_chain_block, state.last_finalized_block);
}
VotingCommand::FindQuorum => {}
}
state
}
/// Compare the tally agains the updated model state.
fn check_system(
&self,
_cmd: &Self::Command,
post_state: &Self::State,
post_system: &Self::System,
) -> bool {
let last_finalized_block = self.atomically(|| post_system.last_finalized_height());
assert_eq!(
last_finalized_block, post_state.last_finalized_block,
"last finalized blocks should match"
);
// Stop if we finalized everything.
last_finalized_block < post_state.max_chain_height()
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/resolver/src/ipld.rs | fendermint/vm/resolver/src/ipld.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::{future::Future, time::Duration};
use async_stm::{atomically, queues::TQueueLike};
use ipc_api::subnet_id::SubnetID;
use ipc_ipld_resolver::Resolver;
use crate::pool::{ResolveQueue, ResolveTask};
/// The IPLD Resolver takes resolution tasks from the [ResolvePool] and
/// uses the [ipc_ipld_resolver] to fetch the content from subnets.
pub struct IpldResolver<C> {
client: C,
queue: ResolveQueue,
retry_delay: Duration,
own_subnet_id: SubnetID,
}
impl<C> IpldResolver<C>
where
C: Resolver + Clone + Send + 'static,
{
pub fn new(
client: C,
queue: ResolveQueue,
retry_delay: Duration,
own_subnet_id: SubnetID,
) -> Self {
Self {
client,
queue,
retry_delay,
own_subnet_id,
}
}
/// Start taking tasks from the resolver pool and resolving them using the IPLD Resolver.
pub async fn run(self) {
loop {
let (task, use_own_subnet) = atomically(|| {
let task = self.queue.read()?;
let use_own_subnet = task.use_own_subnet()?;
Ok((task, use_own_subnet))
})
.await;
start_resolve(
task,
self.client.clone(),
self.queue.clone(),
self.retry_delay,
if use_own_subnet {
Some(self.own_subnet_id.clone())
} else {
None
},
);
}
}
}
/// Run task resolution in the background, so as not to block items from other
/// subnets being tried.
fn start_resolve<C>(
task: ResolveTask,
client: C,
queue: ResolveQueue,
retry_delay: Duration,
own_subnet_id: Option<SubnetID>,
) where
C: Resolver + Send + 'static,
{
tokio::spawn(async move {
let from_theirs = client.resolve(task.cid(), task.subnet_id());
let from_own = own_subnet_id.map(|subnet_id| client.resolve(task.cid(), subnet_id));
let (theirs, own) = tokio::join!(from_theirs, future_opt(from_own));
let err = match (theirs, own) {
(Err(e), _) => {
tracing::error!(error = e.to_string(), "failed to submit resolution task");
// The service is no longer listening, we might as well stop taking new tasks from the queue.
// By not quitting we should see this error every time there is a new task, which is at least is a constant reminder.
return;
}
(Ok(Ok(())), _) | (_, Some(Ok(Ok(())))) => None,
(Ok(Err(e)), _) => Some(e),
};
match err {
None => {
tracing::debug!(cid = ?task.cid(), "content resolved");
atomically(|| task.set_resolved()).await;
}
Some(e) => {
tracing::error!(
cid = ?task.cid(),
error = e.to_string(),
"content resolution failed; retrying later"
);
schedule_retry(task, queue, retry_delay);
}
}
});
}
/// Run a future option, returning the optional result.
async fn future_opt<F, T>(f: Option<F>) -> Option<T>
where
F: Future<Output = T>,
{
match f {
None => None,
Some(f) => Some(f.await),
}
}
/// Part of error handling.
///
/// In our case we enqueued the task from transaction processing,
/// which will not happen again, so there is no point further
/// propagating this error back to the sender to deal with.
/// Rather, we should retry until we can conclude whether it will
/// ever complete. Some errors raised by the service are transitive,
/// such as having no peers currently, but that might change.
///
/// For now, let's retry the same task later.
fn schedule_retry(task: ResolveTask, queue: ResolveQueue, retry_delay: Duration) {
tokio::spawn(async move {
tokio::time::sleep(retry_delay).await;
tracing::debug!(cid = ?task.cid(), "retrying content resolution");
atomically(move || queue.write(task.clone())).await;
});
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/resolver/src/lib.rs | fendermint/vm/resolver/src/lib.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
pub mod ipld;
pub mod pool;
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/resolver/src/pool.rs | fendermint/vm/resolver/src/pool.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::{collections::HashSet, hash::Hash};
use async_stm::{
queues::{tchan::TChan, TQueueLike},
Stm, TVar,
};
use cid::Cid;
use ipc_api::subnet_id::SubnetID;
/// CIDs we need to resolve from a specific source subnet, or our own.
pub type ResolveKey = (SubnetID, Cid);
/// Ongoing status of a resolution.
///
/// The status also keeps track of which original items mapped to the same resolution key.
/// These could be for example checkpoint of the same data with slightly different signatories.
/// Once resolved, they all become available at the same time.
#[derive(Clone)]
pub struct ResolveStatus<T> {
/// Indicate whether the content has been resolved.
///
/// If needed we can expand on this to include failure states.
is_resolved: TVar<bool>,
/// Indicate whether our peers in our own subnet should be contacted.
use_own_subnet: TVar<bool>,
/// The collection of items that all resolve to the same root CID and subnet.
items: TVar<im::HashSet<T>>,
}
impl<T> ResolveStatus<T>
where
T: Clone + Hash + Eq + PartialEq + Sync + Send + 'static,
{
pub fn new(item: T, use_own_subnet: bool) -> Self {
let mut items = im::HashSet::new();
items.insert(item);
Self {
is_resolved: TVar::new(false),
use_own_subnet: TVar::new(use_own_subnet),
items: TVar::new(items),
}
}
pub fn is_resolved(&self) -> Stm<bool> {
self.is_resolved.read_clone()
}
}
/// Tasks emitted by the pool for background resolution.
#[derive(Clone)]
pub struct ResolveTask {
/// Content to resolve.
key: ResolveKey,
/// Flag to flip when the task is done.
is_resolved: TVar<bool>,
/// Flag to flip if consensus reached a state on its own
/// where the majority of our own peers should have an item.
use_own_subnet: TVar<bool>,
}
impl ResolveTask {
pub fn cid(&self) -> Cid {
self.key.1
}
pub fn subnet_id(&self) -> SubnetID {
self.key.0.clone()
}
pub fn set_resolved(&self) -> Stm<()> {
self.is_resolved.write(true)
}
pub fn use_own_subnet(&self) -> Stm<bool> {
self.use_own_subnet.read_clone()
}
}
pub type ResolveQueue = TChan<ResolveTask>;
/// A data structure used to communicate resolution requirements and outcomes
/// between the resolver running in the background and the application waiting
/// for the results.
///
/// It is designed to resolve a single CID from a single subnet, per item,
/// with the possibility of multiple items mapping to the same CID.
///
/// If items needed to have multiple CIDs, the completion of all resolutions
/// culminating in the availability of the item, then we have to refactor this
/// component to track dependencies in a different way. For now I am assuming
/// that we can always design our messages in a way that there is a single root.
/// We can also use technical wrappers to submit the same item under different
/// guises and track the completion elsewhere.
#[derive(Clone, Default)]
pub struct ResolvePool<T>
where
T: Clone + Sync + Send + 'static,
{
/// The resolution status of each item.
items: TVar<im::HashMap<ResolveKey, ResolveStatus<T>>>,
/// Items queued for resolution.
queue: ResolveQueue,
}
impl<T> ResolvePool<T>
where
for<'a> ResolveKey: From<&'a T>,
T: Sync + Send + Clone + Hash + Eq + PartialEq + 'static,
{
pub fn new() -> Self {
Self {
items: Default::default(),
queue: Default::default(),
}
}
/// Queue to consume for task items.
///
/// Exposed as-is to allow re-queueing items.
pub fn queue(&self) -> ResolveQueue {
self.queue.clone()
}
/// Add an item to the resolution targets.
///
/// If the item is new, enqueue it from background resolution, otherwise just return its existing status.
pub fn add(&self, item: T, use_own_subnet: bool) -> Stm<ResolveStatus<T>> {
let key = ResolveKey::from(&item);
let mut items = self.items.read_clone()?;
if items.contains_key(&key) {
let status = items.get(&key).cloned().unwrap();
status.use_own_subnet.update(|u| u || use_own_subnet)?;
status.items.update_mut(|items| {
items.insert(item);
})?;
Ok(status)
} else {
let status = ResolveStatus::new(item, use_own_subnet);
items.insert(key.clone(), status.clone());
self.items.write(items)?;
self.queue.write(ResolveTask {
key,
is_resolved: status.is_resolved.clone(),
use_own_subnet: status.use_own_subnet.clone(),
})?;
Ok(status)
}
}
/// Return the status of an item. It can be queried for completion.
pub fn get_status(&self, item: &T) -> Stm<Option<ResolveStatus<T>>> {
let key = ResolveKey::from(item);
Ok(self.items.read()?.get(&key).cloned())
}
/// Collect resolved items, ready for execution.
///
/// The items collected are not removed, in case they need to be proposed again.
pub fn collect_resolved(&self) -> Stm<HashSet<T>> {
let mut resolved = HashSet::new();
let items = self.items.read()?;
for item in items.values() {
if item.is_resolved()? {
let items = item.items.read()?;
resolved.extend(items.iter().cloned());
}
}
Ok(resolved)
}
/// Await the next item to be resolved.
pub fn next(&self) -> Stm<ResolveTask> {
self.queue.read()
}
// TODO #197: Implement methods to remove executed items.
}
#[cfg(test)]
mod tests {
use async_stm::{atomically, queues::TQueueLike};
use cid::Cid;
use ipc_api::subnet_id::SubnetID;
#[derive(Clone, Hash, Eq, PartialEq, Debug)]
struct TestItem {
subnet_id: SubnetID,
cid: Cid,
}
impl TestItem {
pub fn dummy(root_id: u64) -> Self {
Self {
subnet_id: SubnetID::new_root(root_id),
cid: Cid::default(),
}
}
}
impl From<&TestItem> for ResolveKey {
fn from(value: &TestItem) -> Self {
(value.subnet_id.clone(), value.cid)
}
}
use super::{ResolveKey, ResolvePool};
#[tokio::test]
async fn add_new_item() {
let pool = ResolvePool::new();
let item = TestItem::dummy(0);
atomically(|| pool.add(item.clone(), false)).await;
atomically(|| {
assert!(pool.get_status(&item)?.is_some());
assert!(!pool.queue.is_empty()?);
assert_eq!(pool.queue.read()?.key, ResolveKey::from(&item));
Ok(())
})
.await;
}
#[tokio::test]
async fn add_existing_item() {
let pool = ResolvePool::new();
let item = TestItem::dummy(0);
// Add once.
atomically(|| pool.add(item.clone(), false)).await;
// Consume it from the queue.
atomically(|| {
assert!(!pool.queue.is_empty()?);
let _ = pool.queue.read()?;
Ok(())
})
.await;
// Add again.
atomically(|| pool.add(item.clone(), true)).await;
// Should not be queued a second time.
atomically(|| {
let status = pool.get_status(&item)?;
assert!(status.is_some());
assert!(status.unwrap().use_own_subnet.read_clone()?);
assert!(pool.queue.is_empty()?);
Ok(())
})
.await;
}
#[tokio::test]
async fn get_status() {
let pool = ResolvePool::new();
let item = TestItem::dummy(0);
let status1 = atomically(|| pool.add(item.clone(), false)).await;
let status2 = atomically(|| pool.get_status(&item))
.await
.expect("status exists");
// Complete the item.
atomically(|| {
assert!(!pool.queue.is_empty()?);
let task = pool.queue.read()?;
task.is_resolved.write(true)
})
.await;
// Check status.
atomically(|| {
assert!(status1.items.read()?.contains(&item));
assert!(status1.is_resolved()?);
assert!(status2.is_resolved()?);
Ok(())
})
.await;
}
#[tokio::test]
async fn collect_resolved() {
let pool = ResolvePool::new();
let item = TestItem::dummy(0);
atomically(|| {
let status = pool.add(item.clone(), false)?;
status.is_resolved.write(true)?;
let resolved1 = pool.collect_resolved()?;
let resolved2 = pool.collect_resolved()?;
assert_eq!(resolved1, resolved2);
assert!(resolved1.contains(&item));
Ok(())
})
.await;
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/event/src/lib.rs | fendermint/vm/event/src/lib.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
pub type BlockHeight = u64;
/// Hex encoded block hash.
pub type BlockHashHex<'a> = &'a str;
#[derive(Debug, Default)]
pub struct NewParentView<'a> {
pub is_null: bool,
pub block_height: BlockHeight,
pub block_hash: Option<BlockHashHex<'a>>, // hex encoded, unless null block
pub num_msgs: usize,
pub num_validator_changes: usize,
}
#[derive(Debug, Default)]
pub struct ParentFinalityCommitted<'a> {
pub block_height: BlockHeight,
pub block_hash: BlockHashHex<'a>,
}
#[derive(Debug, Default)]
pub struct NewBottomUpCheckpoint<'a> {
pub block_height: BlockHeight,
pub block_hash: BlockHashHex<'a>,
pub num_msgs: usize,
pub next_configuration_number: u64,
}
/// This node sees something as final, but it's missing the quorum for it.
///
/// The opposite does not happen because we only look for quorum for things we see as final.
#[derive(Debug, Default)]
pub struct ParentFinalityMissingQuorum<'a> {
pub block_height: BlockHeight,
pub block_hash: BlockHashHex<'a>,
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/message/src/lib.rs | fendermint/vm/message/src/lib.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use cid::{multihash, multihash::MultihashDigest, Cid};
use fvm_ipld_encoding::{to_vec, Error as IpldError, DAG_CBOR};
use serde::Serialize;
pub mod cetf;
pub mod chain;
pub mod conv;
pub mod ipc;
pub mod query;
pub mod signed;
/// Calculate the CID using Blake2b256 digest and DAG_CBOR.
///
/// This used to be part of the `Cbor` trait, which is deprecated.
pub fn cid<T: Serialize>(value: &T) -> Result<Cid, IpldError> {
let bz = to_vec(value)?;
let digest = multihash::Code::Blake2b256.digest(&bz);
let cid = Cid::new_v1(DAG_CBOR, digest);
Ok(cid)
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/message/src/chain.rs | fendermint/vm/message/src/chain.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use serde::{Deserialize, Serialize};
use crate::{cetf::CetfMessage, ipc::IpcMessage, signed::SignedMessage};
/// The different kinds of messages that can appear in blocks, ie. the transactions
/// we can receive from Tendermint through the ABCI.
///
/// Unlike Filecoin, we don't have `Unsigned` messages here. In Filecoin, the messages
/// signed by BLS signatures are aggregated to the block level, and their original
/// signatures are stripped from the messages, to save space. Tendermint Core will
/// not do this for us (perhaps with ABCI++ Vote Extensions we could do it), though.
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum ChainMessage {
/// A message that can be passed on to the FVM as-is.
Signed(SignedMessage),
/// Messages involved in InterPlanetaryConsensus, which are basically top-down and bottom-up
/// checkpoints that piggy-back on the Tendermint voting mechanism for finality and CID resolution.
///
/// Possible mechanisms include:
/// * Proposing "for resolution" - A message with a CID proposed for async resolution. These would be bottom-up
/// messages that need to be relayed, so they also include some relayer identity and signature, for rewards.
/// * Proposing "for execution" - A message with a CID with proven availability and finality, ready to be executed.
/// Such messages are proposed by the validators themselves, and their execution might trigger rewards for others.
///
/// Because of the involvement of data availability voting and CID resolution, these messages require support
/// from the application, which is why they are handled in a special way.
Ipc(IpcMessage),
Cetf(CetfMessage),
}
#[cfg(feature = "arb")]
mod arb {
use super::ChainMessage;
use crate::{cetf::CetfMessage, ipc::IpcMessage, signed::SignedMessage};
impl quickcheck::Arbitrary for ChainMessage {
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
match u8::arbitrary(g) % 3 {
0 => ChainMessage::Signed(SignedMessage::arbitrary(g)),
1 => ChainMessage::Ipc(IpcMessage::arbitrary(g)),
// _ => ChainMessage::Cetf(CetfMessage::arbitrary(g)),
_ => todo!(),
}
}
}
}
#[cfg(test)]
mod tests {
use crate::chain::ChainMessage;
use quickcheck_macros::quickcheck;
#[quickcheck]
fn chain_message_cbor(value0: ChainMessage) {
let repr = fvm_ipld_encoding::to_vec(&value0).expect("failed to encode");
let value1: ChainMessage =
fvm_ipld_encoding::from_slice(repr.as_ref()).expect("failed to decode");
assert_eq!(value1, value0)
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/message/src/cetf.rs | fendermint/vm/message/src/cetf.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use fendermint_actor_cetf::BlsSignature;
use serde::{Deserialize, Serialize};
/// Messages involved in Cetf.
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
#[allow(clippy::large_enum_variant)]
pub enum CetfMessage {
CetfTag(u64, BlsSignature),
// BlockHeightTag(u64, BlsSignature),
}
#[cfg(feature = "arb")]
mod arb {
// use quickcheck::{Arbitrary, Gen};
// use super::CetfMessage;
// impl Arbitrary for CetfMessage {
// fn arbitrary(u: &mut Gen) -> Self {
// match u8::arbitrary(u) % 3 {
// 0 => CetfMessage::CetfTag(u64::arbitrary(u), Vec::arbitrary(u)),
// 1 => CetfMessage::BlockHashTag(Vec::arbitrary(u), Vec::arbitrary(u)),
// _ => CetfMessage::BlockHeightTag(u64::arbitrary(u), Vec::arbitrary(u)),
// }
// }
// }
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/message/src/query.rs | fendermint/vm/message/src/query.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use cid::Cid;
use fvm_ipld_encoding::RawBytes;
use fvm_shared::{
address::Address, econ::TokenAmount, error::ExitCode, message::Message as FvmMessage,
version::NetworkVersion,
};
use serde::{Deserialize, Serialize};
use serde_with::serde_as;
use fendermint_vm_encoding::IsHumanReadable;
/// Height at which to run a query.
#[derive(Debug, Clone, PartialEq, Eq, Copy, Default)]
pub enum FvmQueryHeight {
/// Choose whatever the latest committed state is.
#[default]
Committed,
/// Take pending changes (ie. the "check state") into account,
/// or if there are not pending changes then use the latest commit.
///
/// This option is less performant because a shared state needs to be locked.
Pending,
/// Run it on some historical block height, if it's still available.
/// Otherwise use the latest commit.
Height(u64),
}
impl From<u64> for FvmQueryHeight {
fn from(value: u64) -> Self {
match value {
0 => FvmQueryHeight::Committed,
// Tendermint's `Height` type makes sure it fits in `i64`.
// 0 is used as default height in queries; we can use MAX for pending.
n if n >= i64::MAX as u64 => FvmQueryHeight::Pending,
n => FvmQueryHeight::Height(n),
}
}
}
impl From<FvmQueryHeight> for u64 {
fn from(value: FvmQueryHeight) -> Self {
match value {
FvmQueryHeight::Committed => 0,
FvmQueryHeight::Pending => i64::MAX as u64,
FvmQueryHeight::Height(n) => n,
}
}
}
/// Queries over the IPLD blockstore or the state tree.
///
/// Maybe we can have some common queries over the known state of built-in actors,
/// and actors supporting IPC, or FEVM.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub enum FvmQuery {
/// Query something from the IPLD store.
///
/// The response is the raw bytes from the store.
Ipld(Cid),
/// Query the state of an actor.
///
/// The response is IPLD encoded `ActorState`.
ActorState(Address),
/// Immediately execute an FVM message, without adding it to the blockchain.
///
/// The main motivation for this method is to facilitate `eth_call`.
Call(Box<FvmMessage>),
/// Estimate the gas required to execute a message.
///
/// This is effectively a [`Call`], but it's included so that in the future
/// it can do more sophisticated things with premiums, caps and over estimation.
EstimateGas(Box<FvmMessage>),
/// Retrieve the slowly changing state parameters that aren't part of the state tree.
StateParams,
/// Query the built-in actors known by the System actor.
BuiltinActors,
}
/// State of all actor implementations.
///
/// This is a copy of `fvm::state_tree::ActorState` so that this crate
/// doesn't need a dependency on `fvm` itself, only `fvm_shared`.
///
/// Note that it changes changes `Serialize_tuple` into `Serialize`
/// to preserve the field names; the intention is to display the results
/// as JSON, where tuple serialization wouldn't be as useful.
#[serde_as]
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct ActorState {
/// Link to code for the actor.
#[serde_as(as = "IsHumanReadable")]
pub code: Cid,
/// Link to the state of the actor.
#[serde_as(as = "IsHumanReadable")]
pub state: Cid,
/// Sequence of the actor.
pub sequence: u64,
/// Tokens available to the actor.
#[serde_as(as = "IsHumanReadable")]
pub balance: TokenAmount,
/// The actor's "delegated" address, if assigned.
///
/// This field is set on actor creation and never modified.
#[serde_as(as = "Option<IsHumanReadable>")]
pub delegated_address: Option<Address>,
}
/// Result of gas estimation.
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct GasEstimate {
/// Exit code, potentially signalling out-of-gas errors, or that the actor was not found.
pub exit_code: ExitCode,
/// Any information about failed estimations from `ApplyRet::failure_info`.
pub info: String,
/// Potential revert data as it appreared in `ApplyRet`.
pub return_data: RawBytes,
/// Gas used during the probing.
///
/// Potentially contains an over-estimate, but it should be within the account balance limit.
pub gas_limit: u64,
}
/// Slowly changing state parameters outside the state tree.
#[serde_as]
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct StateParams {
/// Base fee.
///
/// Its evolution can depend on the size of blocks, contention, etc.
#[serde_as(as = "IsHumanReadable")]
pub base_fee: TokenAmount,
/// Circulating supply.
///
/// Its value depends on the amount moving in/out of the subnet.
#[serde_as(as = "IsHumanReadable")]
pub circ_supply: TokenAmount,
/// Numeric chain ID for signing transactions.
///
/// Its value is most likely fixed since genesis, but it might change during a fork.
pub chain_id: u64,
/// Current network version.
pub network_version: NetworkVersion,
}
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct BuiltinActors {
/// Registry of built-in actors known by the system.
pub registry: Vec<(String, Cid)>,
}
#[cfg(feature = "arb")]
mod arb {
use fendermint_testing::arb::{ArbAddress, ArbCid, ArbTokenAmount};
use crate::signed::SignedMessage;
use super::{ActorState, FvmQuery};
impl quickcheck::Arbitrary for FvmQuery {
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
match u8::arbitrary(g) % 5 {
0 => FvmQuery::Ipld(ArbCid::arbitrary(g).0),
1 => FvmQuery::ActorState(ArbAddress::arbitrary(g).0),
2 => FvmQuery::Call(Box::new(SignedMessage::arbitrary(g).into_message())),
3 => FvmQuery::EstimateGas(Box::new(SignedMessage::arbitrary(g).into_message())),
_ => FvmQuery::StateParams,
}
}
}
impl quickcheck::Arbitrary for ActorState {
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
Self {
code: ArbCid::arbitrary(g).0,
state: ArbCid::arbitrary(g).0,
sequence: u64::arbitrary(g),
balance: ArbTokenAmount::arbitrary(g).0,
delegated_address: Option::<ArbAddress>::arbitrary(g).map(|a| a.0),
}
}
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/message/src/signed.rs | fendermint/vm/message/src/signed.rs | // Copyright 2022-2024 Protocol Labs
// Copyright 2019-2022 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
use anyhow::anyhow;
use cid::multihash::MultihashDigest;
use cid::Cid;
use ethers_core::types as et;
use ethers_core::types::transaction::eip2718::TypedTransaction;
use fendermint_crypto::{PublicKey, SecretKey};
use fendermint_vm_actor_interface::eam::EthAddress;
use fendermint_vm_actor_interface::{eam, evm};
use fvm_ipld_encoding::tuple::{Deserialize_tuple, Serialize_tuple};
use fvm_shared::address::{Address, Payload};
use fvm_shared::chainid::ChainID;
use fvm_shared::crypto::signature::ops::recover_secp_public_key;
use fvm_shared::crypto::signature::{Signature, SignatureType, SECP_SIG_LEN};
use fvm_shared::message::Message;
use thiserror::Error;
use crate::conv::from_fvm;
enum Signable {
/// Pair of transaction hash and from.
Ethereum((et::H256, et::H160)),
/// Bytes to be passed to the FVM Signature for hashing or verification.
Regular(Vec<u8>),
/// Same signature as `Regular` but using an Ethereum account hash as sender.
/// This is used if the recipient of the message is not an Ethereum account.
RegularFromEth((Vec<u8>, et::H160)),
}
#[derive(Error, Debug)]
pub enum SignedMessageError {
#[error("message cannot be serialized")]
Ipld(#[from] fvm_ipld_encoding::Error),
#[error("invalid signature: {0}")]
InvalidSignature(String),
#[error("message cannot be converted to ethereum: {0}")]
Ethereum(#[from] anyhow::Error),
}
/// Domain specific transaction hash.
///
/// Some tools like ethers.js refuse to accept Tendermint hashes,
/// which use a different algorithm than Ethereum.
///
/// We can potentially extend this list to include CID based indexing.
#[derive(Debug, Clone)]
pub enum DomainHash {
Eth([u8; 32]),
}
/// Represents a wrapped message with signature bytes.
///
/// This is the message that the client needs to send, but only the `message`
/// part is signed over.
///
/// Tuple serialization is used because it might result in a more compact data structure for storage,
/// and because the `Message` is already serialized as a tuple.
#[derive(PartialEq, Clone, Debug, Serialize_tuple, Deserialize_tuple, Hash, Eq)]
pub struct SignedMessage {
pub message: Message,
pub signature: Signature,
}
impl SignedMessage {
/// Generate a new signed message from fields.
///
/// The signature will not be verified.
pub fn new_unchecked(message: Message, signature: Signature) -> SignedMessage {
SignedMessage { message, signature }
}
/// Create a signed message.
pub fn new_secp256k1(
message: Message,
sk: &SecretKey,
chain_id: &ChainID,
) -> Result<Self, SignedMessageError> {
let signature = match Self::signable(&message, chain_id)? {
Signable::Ethereum((hash, _)) => sign_eth(sk, hash),
Signable::Regular(data) => sign_regular(sk, &data),
Signable::RegularFromEth((data, _)) => sign_regular(sk, &data),
};
Ok(Self { message, signature })
}
/// Calculate the CID of an FVM message.
pub fn cid(message: &Message) -> Result<Cid, fvm_ipld_encoding::Error> {
crate::cid(message)
}
/// Calculate the bytes that need to be signed.
///
/// The [`ChainID`] is used as a replay attack protection, a variation of
/// https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0039.md
fn signable(message: &Message, chain_id: &ChainID) -> Result<Signable, SignedMessageError> {
// Here we look at the sender to decide what scheme to use for hashing.
//
// This is in contrast to https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0055.md#delegated-signature-type
// which introduces a `SignatureType::Delegated`, in which case the signature check should be done by the recipient actor.
//
// However, that isn't implemented, and adding that type would mean copying the entire `Signature` type into Fendermint,
// similarly to how Forest did it https://github.com/ChainSafe/forest/blob/b3c5efe6cc81607da945227bb41c60cec47909c3/utils/forest_shim/src/crypto.rs#L166
//
// Instead of special casing on the signature type, we are special casing on the sender,
// which should be okay because the CLI only uses `f1` addresses and the Ethereum API only uses `f410` addresses,
// so at least for now they are easy to tell apart: any `f410` address is coming from Ethereum API and must have
// been signed according to the Ethereum scheme, and it could not have been signed by an `f1` address, it doesn't
// work with regular accounts.
//
// We detect the case where the recipient is not an ethereum address. If that is the case then use regular signing rules,
// which should allow messages from ethereum accounts to go to any other type of account, e.g. custom Wasm actors.
match maybe_eth_address(&message.from) {
Some(addr) if is_eth_addr_compat(&message.to) => {
let tx: TypedTransaction = from_fvm::to_eth_transaction_request(message, chain_id)
.map_err(SignedMessageError::Ethereum)?
.into();
Ok(Signable::Ethereum((tx.sighash(), addr)))
}
Some(addr) => {
let mut data = Self::cid(message)?.to_bytes();
data.extend(chain_id_bytes(chain_id).iter());
Ok(Signable::RegularFromEth((data, addr)))
}
None => {
let mut data = Self::cid(message)?.to_bytes();
data.extend(chain_id_bytes(chain_id).iter());
Ok(Signable::Regular(data))
}
}
}
/// Verify that the message CID was signed by the `from` address.
pub fn verify_signature(
message: &Message,
signature: &Signature,
chain_id: &ChainID,
) -> Result<(), SignedMessageError> {
match Self::signable(message, chain_id)? {
Signable::Ethereum((hash, from)) => {
// If the sender is ethereum, recover the public key from the signature (which verifies it),
// then turn it into an `EthAddress` and verify it matches the `from` of the message.
let sig = from_fvm::to_eth_signature(signature, true)
.map_err(SignedMessageError::Ethereum)?;
let rec = sig
.recover(hash)
.map_err(|e| SignedMessageError::Ethereum(anyhow!(e)))?;
if rec == from {
verify_eth_method(message)
} else {
Err(SignedMessageError::InvalidSignature(format!("the Ethereum delegated address did not match the one recovered from the signature (sighash = {:?})", hash)))
}
}
Signable::Regular(data) => {
// This works when `from` corresponds to the signature type.
signature
.verify(&data, &message.from)
.map_err(SignedMessageError::InvalidSignature)
}
Signable::RegularFromEth((data, from)) => {
let rec = recover_secp256k1(signature, &data)
.map_err(SignedMessageError::InvalidSignature)?;
let rec_addr = EthAddress::from(rec);
if rec_addr.0 == from.0 {
Ok(())
} else {
Err(SignedMessageError::InvalidSignature("the Ethereum delegated address did not match the one recovered from the signature".to_string()))
}
}
}
}
/// Calculate an optional hash that ecosystem tools expect.
pub fn domain_hash(
&self,
chain_id: &ChainID,
) -> Result<Option<DomainHash>, SignedMessageError> {
if is_eth_addr_deleg(&self.message.from) && is_eth_addr_compat(&self.message.to) {
let tx: TypedTransaction =
from_fvm::to_eth_transaction_request(self.message(), chain_id)
.map_err(SignedMessageError::Ethereum)?
.into();
let sig = from_fvm::to_eth_signature(self.signature(), true)
.map_err(SignedMessageError::Ethereum)?;
let rlp = tx.rlp_signed(&sig);
let hash = cid::multihash::Code::Keccak256.digest(&rlp);
let hash = hash.digest().try_into().expect("Keccak256 is 32 bytes");
Ok(Some(DomainHash::Eth(hash)))
} else {
// Use the default transaction ID.
Ok(None)
}
}
/// Verifies that the from address of the message generated the signature.
pub fn verify(&self, chain_id: &ChainID) -> Result<(), SignedMessageError> {
Self::verify_signature(&self.message, &self.signature, chain_id)
}
/// Returns reference to the unsigned message.
pub fn message(&self) -> &Message {
&self.message
}
/// Returns signature of the signed message.
pub fn signature(&self) -> &Signature {
&self.signature
}
/// Consumes self and returns it's unsigned message.
pub fn into_message(self) -> Message {
self.message
}
/// Checks if the signed message is a BLS message.
pub fn is_bls(&self) -> bool {
self.signature.signature_type() == SignatureType::BLS
}
/// Checks if the signed message is a SECP message.
pub fn is_secp256k1(&self) -> bool {
self.signature.signature_type() == SignatureType::Secp256k1
}
}
/// Sign a transaction pre-image using Blake2b256, in a way that [Signature::verify] expects it.
fn sign_regular(sk: &SecretKey, data: &[u8]) -> Signature {
let hash: [u8; 32] = blake2b_simd::Params::new()
.hash_length(32)
.to_state()
.update(data)
.finalize()
.as_bytes()
.try_into()
.unwrap();
sign_secp256k1(sk, &hash)
}
/// Sign a transaction pre-image in the same way Ethereum clients would sign it.
fn sign_eth(sk: &SecretKey, hash: et::H256) -> Signature {
sign_secp256k1(sk, &hash.0)
}
/// Turn a [`ChainID`] into bytes. Uses big-endian encoding.
pub fn chain_id_bytes(chain_id: &ChainID) -> [u8; 8] {
u64::from(*chain_id).to_be_bytes()
}
/// Return the 20 byte Ethereum address if the address is that kind of delegated one.
fn maybe_eth_address(addr: &Address) -> Option<et::H160> {
match addr.payload() {
Payload::Delegated(addr)
if addr.namespace() == eam::EAM_ACTOR_ID && addr.subaddress().len() == 20 =>
{
Some(et::H160::from_slice(addr.subaddress()))
}
_ => None,
}
}
/// Check if the address can be converted to an Ethereum one.
fn is_eth_addr_compat(addr: &Address) -> bool {
from_fvm::to_eth_address(addr).is_ok()
}
/// Check if the address is an Ethereum delegated one.
fn is_eth_addr_deleg(addr: &Address) -> bool {
maybe_eth_address(addr).is_some()
}
/// Verify that the method ID and the recipient are one of the allowed combination,
/// which for example is set by [from_eth::to_fvm_message].
///
/// The method ID is not part of the signature, so someone could modify it, which is
/// why we have to check explicitly that there is nothing untowards going on.
fn verify_eth_method(msg: &Message) -> Result<(), SignedMessageError> {
if msg.to == eam::EAM_ACTOR_ADDR {
if msg.method_num != eam::Method::CreateExternal as u64 {
return Err(SignedMessageError::Ethereum(anyhow!(
"The EAM actor can only be called with CreateExternal; got {}",
msg.method_num
)));
}
} else if msg.method_num != evm::Method::InvokeContract as u64 {
return Err(SignedMessageError::Ethereum(anyhow!(
"An EVM actor can only be called with InvokeContract; got {} - {}",
msg.to,
msg.method_num
)));
}
Ok(())
}
/// Sign a hash using the secret key.
pub fn sign_secp256k1(sk: &SecretKey, hash: &[u8; 32]) -> Signature {
let (sig, recovery_id) = sk.sign(hash);
let mut signature = [0u8; SECP_SIG_LEN];
signature[..64].copy_from_slice(&sig.serialize());
signature[64] = recovery_id.serialize();
Signature {
sig_type: SignatureType::Secp256k1,
bytes: signature.to_vec(),
}
}
/// Recover the public key from a Secp256k1
///
/// Based on how `Signature` does it, but without the final address hashing.
fn recover_secp256k1(signature: &Signature, data: &[u8]) -> Result<PublicKey, String> {
let signature = &signature.bytes;
if signature.len() != SECP_SIG_LEN {
return Err(format!(
"Invalid Secp256k1 signature length. Was {}, must be 65",
signature.len()
));
}
// blake2b 256 hash
let hash = blake2b_simd::Params::new()
.hash_length(32)
.to_state()
.update(data)
.finalize();
let mut sig = [0u8; SECP_SIG_LEN];
sig[..].copy_from_slice(signature);
let rec_key =
recover_secp_public_key(hash.as_bytes().try_into().expect("fixed array size"), &sig)
.map_err(|e| e.to_string())?;
Ok(rec_key)
}
/// Signed message with an invalid random signature.
#[cfg(feature = "arb")]
mod arb {
use fendermint_testing::arb::ArbMessage;
use fvm_shared::crypto::signature::Signature;
use super::SignedMessage;
/// An arbitrary `SignedMessage` that is at least as consistent as required for serialization.
impl quickcheck::Arbitrary for SignedMessage {
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
Self {
message: ArbMessage::arbitrary(g).0,
signature: Signature::arbitrary(g),
}
}
}
}
#[cfg(test)]
mod tests {
use fendermint_vm_actor_interface::eam::EthAddress;
use fvm_shared::{
address::{Address, Payload, Protocol},
chainid::ChainID,
};
use quickcheck_macros::quickcheck;
use crate::conv::tests::{EthMessage, KeyPair};
use super::SignedMessage;
#[quickcheck]
fn chain_id_in_signature(
msg: SignedMessage,
chain_id: u64,
key: KeyPair,
) -> Result<(), String> {
let KeyPair { sk, pk } = key;
let chain_id0 = ChainID::from(chain_id);
let chain_id1 = ChainID::from(chain_id.overflowing_add(1).0);
let mut msg = msg.into_message();
msg.from = Address::new_secp256k1(&pk.serialize())
.map_err(|e| format!("failed to conver to address: {e}"))?;
let signed = SignedMessage::new_secp256k1(msg, &sk, &chain_id0)
.map_err(|e| format!("signing failed: {e}"))?;
signed
.verify(&chain_id0)
.map_err(|e| format!("verifying failed: {e}"))?;
if signed.verify(&chain_id1).is_ok() {
return Err("verifying with a different chain ID should fail".into());
}
Ok(())
}
#[quickcheck]
fn eth_sign_and_verify(msg: EthMessage, chain_id: u64, key: KeyPair) -> Result<(), String> {
let chain_id = ChainID::from(chain_id);
let KeyPair { sk, pk } = key;
// Set the sender to the address we are going to sign with.
let ea = EthAddress::from(pk);
let mut msg = msg.0;
msg.from = Address::from(ea);
let signed =
SignedMessage::new_secp256k1(msg, &sk, &chain_id).map_err(|e| e.to_string())?;
signed.verify(&chain_id).map_err(|e| e.to_string())
}
#[quickcheck]
fn eth_sign_and_tamper(msg: EthMessage, chain_id: u64, key: KeyPair) -> Result<(), String> {
let chain_id = ChainID::from(chain_id);
let KeyPair { sk, pk } = key;
// Set the sender to the address we are going to sign with.
let ea = EthAddress::from(pk);
let mut msg = msg.0;
msg.from = Address::from(ea);
let mut signed =
SignedMessage::new_secp256k1(msg, &sk, &chain_id).map_err(|e| e.to_string())?;
// Set the recipient to an address which is a different kind, but the same hash: pretend that it's an f1 address.
// If this succeeded, an attacker can change the recipient of the message and thus funds can get lost.
let Payload::Delegated(da) = signed.message.to.payload() else {
return Err("expected delegated addresss".to_string());
};
let mut bz = da.subaddress().to_vec();
bz.insert(0, Protocol::Secp256k1 as u8);
signed.message.to = Address::from_bytes(&bz).map_err(|e| e.to_string())?;
if signed.verify(&chain_id).is_ok() {
return Err("signature verification should have failed".to_string());
}
Ok(())
}
/// Check that we can send from an ethereum account to a non-ethereum one and sign it.
#[quickcheck]
fn eth_to_non_eth_sign_and_verify(msg: EthMessage, chain_id: u64, from: KeyPair, to: KeyPair) {
let chain_id = ChainID::from(chain_id);
let mut msg = msg.0;
msg.from = Address::from(EthAddress::from(from.pk));
msg.to = Address::new_secp256k1(&to.pk.serialize()).expect("f1 address");
let signed =
SignedMessage::new_secp256k1(msg, &from.sk, &chain_id).expect("message can be signed");
signed.verify(&chain_id).expect("signature should be valid")
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/message/src/ipc.rs | fendermint/vm/message/src/ipc.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use cid::Cid;
use fvm_shared::{
address::Address, clock::ChainEpoch, crypto::signature::Signature, econ::TokenAmount,
};
use ipc_api::subnet_id::SubnetID;
use serde::{Deserialize, Serialize};
/// Messages involved in InterPlanetary Consensus.
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
#[allow(clippy::large_enum_variant)]
pub enum IpcMessage {
/// A bottom-up checkpoint coming from a child subnet "for resolution", relayed by a user of the parent subnet for a reward.
///
/// The reward can be given immediately upon the validation of the quorum certificate in the checkpoint,
/// or later during execution, once data availability has been confirmed.
BottomUpResolve(SignedRelayedMessage<CertifiedMessage<BottomUpCheckpoint>>),
/// A bottom-up checkpoint proposed "for execution" by the parent subnet validators, provided that the majority of them
/// have the data available to them, already resolved.
///
/// To prove that the data is available, we can either use the ABCI++ "process proposal" mechanism,
/// or we can gossip votes using the _IPLD Resolver_ and attach them as a quorum certificate.
BottomUpExec(CertifiedMessage<BottomUpCheckpoint>),
/// A top-down checkpoint parent finality proposal. This proposal should contain the latest parent
/// state that to be checked and voted by validators.
TopDownExec(ParentFinality),
}
/// A message relayed by a user on the current subnet.
///
/// The relayer pays for the inclusion of the message in the ledger,
/// but not necessarily for the execution of its contents.
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct RelayedMessage<T> {
/// The relayed message.
pub message: T,
/// The address (public key) of the relayer in the current subnet.
pub relayer: Address,
/// The nonce of the relayer in the current subnet.
pub sequence: u64,
/// The gas the relayer is willing to spend on the verification of the relayed message.
pub gas_limit: u64,
pub gas_fee_cap: TokenAmount,
pub gas_premium: TokenAmount,
}
/// Relayed messages are signed by the relayer, so we can rightfully charge them message inclusion costs.
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct SignedRelayedMessage<T> {
/// The relayed message with the relayer identity.
pub message: RelayedMessage<T>,
/// The signature of the relayer, for cost and reward attribution.
pub signature: Signature,
}
/// A message with a quorum certificate from a group of validators.
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)]
pub struct CertifiedMessage<T> {
/// The message the validators signed.
pub message: T,
/// The quorum certificate.
pub certificate: MultiSig,
}
/// A quorum certificate consisting of a simple multi-sig.
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)]
pub struct MultiSig {
pub signatures: Vec<ValidatorSignature>,
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)]
pub struct ValidatorSignature {
pub validator: Address,
pub signature: Signature,
}
/// A periodic bottom-up checkpoints contains the source subnet ID (to protect against replay attacks),
/// a block height (for sequencing), any potential handover to the next validator set, and a pointer
/// to the messages that need to be resolved and executed by the parent validators.
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)]
pub struct BottomUpCheckpoint {
/// Which subnet is the checkpoint coming from.
pub subnet_id: SubnetID,
/// Block height of this checkpoint.
pub height: ChainEpoch,
/// Which validator set is going to sign the *next* checkpoint.
/// The parent subnet already expects the last validator set to sign this one.
pub next_validator_set_id: u64,
/// Pointer at all the bottom-up messages included in this checkpoint.
pub bottom_up_messages: Cid, // TODO: Use TCid
}
/// A proposal of the parent view that validators will be voting on.
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)]
pub struct ParentFinality {
/// Block height of this proposal.
pub height: ChainEpoch,
/// The block hash of the parent, expressed as bytes
pub block_hash: Vec<u8>,
}
#[cfg(feature = "arb")]
mod arb {
use crate::ipc::ParentFinality;
use fendermint_testing::arb::{ArbAddress, ArbCid, ArbSubnetID, ArbTokenAmount};
use fvm_shared::crypto::signature::Signature;
use quickcheck::{Arbitrary, Gen};
use super::{
BottomUpCheckpoint, CertifiedMessage, IpcMessage, MultiSig, RelayedMessage,
SignedRelayedMessage, ValidatorSignature,
};
impl Arbitrary for IpcMessage {
fn arbitrary(g: &mut Gen) -> Self {
match u8::arbitrary(g) % 3 {
0 => IpcMessage::BottomUpResolve(Arbitrary::arbitrary(g)),
1 => IpcMessage::BottomUpExec(Arbitrary::arbitrary(g)),
_ => IpcMessage::TopDownExec(Arbitrary::arbitrary(g)),
}
}
}
impl<T: Arbitrary> Arbitrary for SignedRelayedMessage<T> {
fn arbitrary(g: &mut Gen) -> Self {
Self {
message: RelayedMessage::arbitrary(g),
signature: Signature::arbitrary(g),
}
}
}
impl<T: Arbitrary> Arbitrary for RelayedMessage<T> {
fn arbitrary(g: &mut Gen) -> Self {
Self {
message: T::arbitrary(g),
relayer: ArbAddress::arbitrary(g).0,
sequence: u64::arbitrary(g),
gas_limit: u64::arbitrary(g),
gas_fee_cap: ArbTokenAmount::arbitrary(g).0,
gas_premium: ArbTokenAmount::arbitrary(g).0,
}
}
}
impl<T: Arbitrary> Arbitrary for CertifiedMessage<T> {
fn arbitrary(g: &mut Gen) -> Self {
Self {
message: T::arbitrary(g),
certificate: Arbitrary::arbitrary(g),
}
}
}
impl Arbitrary for ValidatorSignature {
fn arbitrary(g: &mut Gen) -> Self {
Self {
validator: ArbAddress::arbitrary(g).0,
signature: Signature::arbitrary(g),
}
}
}
impl Arbitrary for MultiSig {
fn arbitrary(g: &mut Gen) -> Self {
let mut signatures = Vec::new();
for _ in 0..*g.choose(&[1, 3, 5]).unwrap() {
signatures.push(ValidatorSignature::arbitrary(g));
}
Self { signatures }
}
}
impl Arbitrary for BottomUpCheckpoint {
fn arbitrary(g: &mut Gen) -> Self {
Self {
subnet_id: ArbSubnetID::arbitrary(g).0,
height: u32::arbitrary(g).into(),
next_validator_set_id: Arbitrary::arbitrary(g),
bottom_up_messages: ArbCid::arbitrary(g).0,
}
}
}
impl Arbitrary for ParentFinality {
fn arbitrary(g: &mut Gen) -> Self {
Self {
height: u32::arbitrary(g).into(),
block_hash: Vec::arbitrary(g),
}
}
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/message/src/conv/from_fvm.rs | fendermint/vm/message/src/conv/from_fvm.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! Helper methods to convert between FVM and Ethereum data formats.
use std::str::FromStr;
use anyhow::anyhow;
use anyhow::bail;
use ethers_core::types as et;
use fendermint_crypto::{RecoveryId, Signature};
use fendermint_vm_actor_interface::eam::EthAddress;
use fendermint_vm_actor_interface::eam::EAM_ACTOR_ID;
use fvm_ipld_encoding::BytesDe;
use fvm_shared::address::Address;
use fvm_shared::bigint::BigInt;
use fvm_shared::chainid::ChainID;
use fvm_shared::crypto::signature::Signature as FvmSignature;
use fvm_shared::crypto::signature::SignatureType;
use fvm_shared::crypto::signature::SECP_SIG_LEN;
use fvm_shared::message::Message;
use fvm_shared::{address::Payload, econ::TokenAmount};
use lazy_static::lazy_static;
lazy_static! {
pub static ref MAX_U256: BigInt = BigInt::from_str(&et::U256::MAX.to_string()).unwrap();
}
pub fn to_eth_tokens(amount: &TokenAmount) -> anyhow::Result<et::U256> {
if amount.atto() > &MAX_U256 {
Err(anyhow!("TokenAmount > U256.MAX"))
} else {
let (_sign, bz) = amount.atto().to_bytes_be();
Ok(et::U256::from_big_endian(&bz))
}
}
pub fn to_eth_address(addr: &Address) -> anyhow::Result<Option<et::H160>> {
match addr.payload() {
Payload::Delegated(d) if d.namespace() == EAM_ACTOR_ID && d.subaddress().len() == 20 => {
Ok(Some(et::H160::from_slice(d.subaddress())))
}
// Deployments should be sent with an empty `to`.
Payload::ID(EAM_ACTOR_ID) => Ok(None),
// It should be possible to send to an ethereum account by ID.
Payload::ID(id) => Ok(Some(et::H160::from_slice(&EthAddress::from_id(*id).0))),
// The following fit into the type but are not valid ethereum addresses.
// Return an error so we can prevent tampering with the address when we convert ethereum transactions to FVM messages.
_ => bail!("not an Ethereum address: {addr}"), // f1, f2, f3 or an invalid delegated address.
}
}
fn parse_secp256k1(sig: &[u8]) -> anyhow::Result<(RecoveryId, Signature)> {
if sig.len() != SECP_SIG_LEN {
return Err(anyhow!("unexpected Secp256k1 length: {}", sig.len()));
}
// generate types to recover key from
let rec_id = RecoveryId::parse(sig[64])?;
// Signature value without recovery byte
let mut s = [0u8; 64];
s.clone_from_slice(&sig[..64]);
// generate Signature
let sig = Signature::parse_standard(&s)?;
Ok((rec_id, sig))
}
/// Convert an FVM signature, which is a normal Secp256k1 signature, to an Ethereum one,
/// where the `v` is optionally shifted by 27 to make it compatible with Solidity.
///
/// In theory we could incorporate the chain ID into it as well, but that hasn't come up.
///
/// Ethers normalizes Ethereum signatures during conversion to RLP.
pub fn to_eth_signature(sig: &FvmSignature, normalized: bool) -> anyhow::Result<et::Signature> {
let (v, sig) = match sig.sig_type {
SignatureType::Secp256k1 => parse_secp256k1(&sig.bytes)?,
other => return Err(anyhow!("unexpected signature type: {other:?}")),
};
// By adding 27 to the recovery ID we make this compatible with Ethereum,
// so that we can verify such signatures in Solidity with e.g. openzeppelin ECDSA.sol
let shift = if normalized { 0 } else { 27 };
let sig = et::Signature {
v: et::U64::from(v.serialize() + shift).as_u64(),
r: et::U256::from_big_endian(sig.r.b32().as_ref()),
s: et::U256::from_big_endian(sig.s.b32().as_ref()),
};
Ok(sig)
}
/// Turn an FVM `Message` back into an Ethereum transaction request.
pub fn to_eth_transaction_request(
msg: &Message,
chain_id: &ChainID,
) -> anyhow::Result<et::Eip1559TransactionRequest> {
let chain_id: u64 = (*chain_id).into();
let Message {
version: _,
from,
to,
sequence,
value,
method_num: _,
params,
gas_limit,
gas_fee_cap,
gas_premium,
} = msg;
let data = fvm_ipld_encoding::from_slice::<BytesDe>(params).map(|bz| bz.0)?;
let mut tx = et::Eip1559TransactionRequest::new()
.chain_id(chain_id)
.from(to_eth_address(from)?.unwrap_or_default())
.nonce(*sequence)
.gas(*gas_limit)
.max_fee_per_gas(to_eth_tokens(gas_fee_cap)?)
.max_priority_fee_per_gas(to_eth_tokens(gas_premium)?)
.data(et::Bytes::from(data));
tx.to = to_eth_address(to)?.map(et::NameOrAddress::Address);
// NOTE: It's impossible to tell if the original Ethereum transaction sent None or Some(0).
// The ethers deployer sends None, so let's assume that's the useful behavour to match.
// Luckily the RLP encoding at some point seems to resolve them to the same thing.
if !value.is_zero() {
tx.value = Some(to_eth_tokens(value)?);
}
Ok(tx)
}
#[cfg(test)]
pub mod tests {
use std::str::FromStr;
use ethers::signers::{Signer, Wallet};
use ethers_core::utils::rlp;
use ethers_core::{k256::ecdsa::SigningKey, types::transaction::eip2718::TypedTransaction};
use fendermint_crypto::SecretKey;
use fendermint_testing::arb::ArbTokenAmount;
use fendermint_vm_message::signed::SignedMessage;
use fvm_shared::crypto::signature::Signature;
use fvm_shared::{bigint::BigInt, chainid::ChainID, econ::TokenAmount};
use quickcheck_macros::quickcheck;
use rand::{rngs::StdRng, SeedableRng};
use crate::conv::{
from_eth::to_fvm_message,
tests::{EthMessage, KeyPair},
};
use super::{to_eth_signature, to_eth_tokens, to_eth_transaction_request};
#[quickcheck]
fn prop_to_eth_tokens(tokens: ArbTokenAmount) -> bool {
let tokens = tokens.0;
if let Ok(u256_from_tokens) = to_eth_tokens(&tokens) {
let tokens_as_str = tokens.atto().to_str_radix(10);
let u256_from_str = ethers_core::types::U256::from_dec_str(&tokens_as_str).unwrap();
return u256_from_str == u256_from_tokens;
}
true
}
#[test]
fn test_to_eth_tokens() {
let atto = BigInt::from_str(
"99191064924191451313862974502415542781658129482631472725645205117646186753315",
)
.unwrap();
let tokens = TokenAmount::from_atto(atto);
to_eth_tokens(&tokens).unwrap();
}
/// Check that converting a signature from FVM to ETH and back preserves it.
#[quickcheck]
fn prop_signature(msg: SignedMessage, seed: u64, chain_id: u64) -> Result<(), String> {
let chain_id = ChainID::from(chain_id);
let mut rng = StdRng::seed_from_u64(seed);
let sk = SecretKey::random(&mut rng);
let msg = SignedMessage::new_secp256k1(msg.into_message(), &sk, &chain_id)
.map_err(|e| format!("failed to sign: {e}"))?;
let sig0 = msg.signature();
let sig1 = to_eth_signature(sig0, true)
.map_err(|e| format!("failed to convert signature: {e}"))?;
let sig2 = fvm_shared::crypto::signature::Signature::new_secp256k1(sig1.to_vec());
if *sig0 != sig2 {
return Err(format!("signatures don't match: {sig0:?} != {sig2:?}"));
}
Ok(())
}
#[quickcheck]
fn prop_to_and_from_eth_transaction(msg: EthMessage, chain_id: u64) {
let chain_id = ChainID::from(chain_id);
let msg0 = msg.0;
let tx = to_eth_transaction_request(&msg0, &chain_id)
.expect("to_eth_transaction_request failed");
let msg1 = to_fvm_message(&tx).expect("to_fvm_message failed");
assert_eq!(msg1, msg0)
}
/// Check that decoding a signed ETH transaction and converting to FVM can be verified with the signature produced by a Wallet.
#[quickcheck]
fn prop_eth_signature(msg: EthMessage, chain_id: u64, key_pair: KeyPair) {
// ethers has `to_eip155_v` which would fail with u64 overflow if the chain ID is too big.
let chain_id = chain_id / 3;
let chain_id = ChainID::from(chain_id);
let msg0 = msg.0;
let tx: TypedTransaction = to_eth_transaction_request(&msg0, &chain_id)
.expect("to_eth_transaction_request failed")
.into();
let wallet: Wallet<SigningKey> = Wallet::from_bytes(key_pair.sk.serialize().as_ref())
.expect("failed to create wallet")
.with_chain_id(chain_id);
let sig = wallet.sign_transaction_sync(&tx).expect("failed to sign");
let bz = tx.rlp_signed(&sig);
let rlp = rlp::Rlp::new(bz.as_ref());
let (tx1, sig) = TypedTransaction::decode_signed(&rlp)
.expect("failed to decode RLP as signed TypedTransaction");
let tx1 = tx1.as_eip1559_ref().expect("not an eip1559 transaction");
let msg1 = to_fvm_message(tx1).expect("to_fvm_message failed");
let signed = SignedMessage {
message: msg1,
signature: Signature::new_secp256k1(sig.to_vec()),
};
signed.verify(&chain_id).expect("signature should be valid")
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/message/src/conv/from_eth.rs | fendermint/vm/message/src/conv/from_eth.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! Helper methods to convert between Ethereum and FVM data formats.
use ethers_core::types::{Eip1559TransactionRequest, NameOrAddress, H160, U256};
use fendermint_vm_actor_interface::{
eam::{self, EthAddress},
evm,
};
use fvm_ipld_encoding::{BytesSer, RawBytes};
use fvm_shared::{
address::Address,
bigint::{BigInt, Sign},
econ::TokenAmount,
message::Message,
};
// https://github.com/filecoin-project/lotus/blob/594c52b96537a8c8728389b446482a2d7ea5617c/chain/types/ethtypes/eth_transactions.go#L152
pub fn to_fvm_message(tx: &Eip1559TransactionRequest) -> anyhow::Result<Message> {
// FIP-55 says that we should use `InvokeContract` for transfers instead of `METHOD_SEND`,
// because if we are sending to some Ethereum actor by ID using `METHOD_SEND`, they will
// get the tokens but the contract might not provide any way of retrieving them.
// The `Account` actor has been modified to accept any method call, so it will not fail
// even if it receives tokens using `InvokeContract`.
let (method_num, to) = match tx.to {
None => (eam::Method::CreateExternal as u64, eam::EAM_ACTOR_ADDR),
Some(NameOrAddress::Address(to)) => {
let to = to_fvm_address(to);
(evm::Method::InvokeContract as u64, to)
}
Some(NameOrAddress::Name(_)) => {
anyhow::bail!("Turning name to address would require ENS which is not supported.")
}
};
// The `from` of the transaction is inferred from the signature.
// As long as the client and the server use the same hashing scheme, this should be usable as a delegated address.
// If none, use the 0x00..00 null ethereum address, which in the node will be replaced with the SYSTEM_ACTOR_ADDR;
// This is similar to https://github.com/filecoin-project/lotus/blob/master/node/impl/full/eth_utils.go#L124
let from = to_fvm_address(tx.from.unwrap_or_default());
// Wrap calldata in IPLD byte format.
let calldata = tx.data.clone().unwrap_or_default().to_vec();
let params = RawBytes::serialize(BytesSer(&calldata))?;
let msg = Message {
version: 0,
from,
to,
sequence: tx.nonce.unwrap_or_default().as_u64(),
value: to_fvm_tokens(&tx.value.unwrap_or_default()),
method_num,
params,
gas_limit: tx
.gas
.map(|gas| gas.min(U256::from(u64::MAX)).as_u64())
.unwrap_or_default(),
gas_fee_cap: to_fvm_tokens(&tx.max_fee_per_gas.unwrap_or_default()),
gas_premium: to_fvm_tokens(&tx.max_priority_fee_per_gas.unwrap_or_default()),
};
Ok(msg)
}
pub fn to_fvm_address(addr: H160) -> Address {
Address::from(EthAddress(addr.0))
}
pub fn to_fvm_tokens(value: &U256) -> TokenAmount {
let mut bz = [0u8; 256 / 8];
value.to_big_endian(&mut bz);
let atto = BigInt::from_bytes_be(Sign::Plus, &bz);
TokenAmount::from_atto(atto)
}
#[cfg(test)]
mod tests {
use ethers_core::{
types::{transaction::eip2718::TypedTransaction, Bytes, TxHash},
utils::rlp,
};
use fendermint_testing::arb::ArbTokenAmount;
use fvm_shared::{chainid::ChainID, crypto::signature::Signature};
use quickcheck_macros::quickcheck;
use crate::{
conv::{from_eth::to_fvm_message, from_fvm::to_eth_tokens},
signed::{DomainHash, SignedMessage},
};
use super::to_fvm_tokens;
#[quickcheck]
fn prop_to_token_amount(tokens: ArbTokenAmount) -> bool {
let tokens0 = tokens.0;
if let Ok(value) = to_eth_tokens(&tokens0) {
let tokens1 = to_fvm_tokens(&value);
return tokens0 == tokens1;
}
true
}
#[test]
fn test_domain_hash() {
let expected_hash: TxHash =
"0x8fe4fd8e1c7c40dceed249c99a553bc218774f611cfefd8a48ede67b8f6e4725"
.parse()
.unwrap();
let raw_tx: Bytes = "0x02f86e87084472af917f2a8080808502540be400948ed26a19f0e0d6708546495611e9a298d9befb598203e880c080a0a37d03d98e50622ec3744ee368565c5e9469852a1d9111197608135928cd2430a010d1575c68602c96c89e9ec30fade44f5844bf34226044d2931afc60b0a8b2de".parse().unwrap();
let rlp = rlp::Rlp::new(&raw_tx);
let tx_hash = TxHash::from(ethers_core::utils::keccak256(rlp.as_raw()));
assert_eq!(tx_hash, expected_hash);
let (tx0, sig) = TypedTransaction::decode_signed(&rlp).expect("decode signed tx");
let chain_id: ChainID = tx0.chain_id().unwrap().as_u64().into();
let msg = SignedMessage {
message: to_fvm_message(tx0.as_eip1559_ref().unwrap()).expect("to_fvm_message"),
signature: Signature::new_secp256k1(sig.to_vec()),
};
let domain_hash = msg.domain_hash(&chain_id).expect("domain_hash");
match domain_hash {
Some(DomainHash::Eth(h)) => assert_eq!(h, tx_hash.0),
other => panic!("unexpected domain hash: {other:?}"),
}
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/message/src/conv/mod.rs | fendermint/vm/message/src/conv/mod.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
pub mod from_eth;
pub mod from_fvm;
#[cfg(test)]
pub mod tests {
use fendermint_crypto::{PublicKey, SecretKey};
use fendermint_testing::arb::{ArbMessage, ArbTokenAmount};
use fendermint_vm_actor_interface::{
eam::{self, EthAddress},
evm,
};
use fvm_ipld_encoding::{BytesSer, RawBytes};
use fvm_shared::{address::Address, bigint::Integer, econ::TokenAmount, message::Message};
use rand::{rngs::StdRng, SeedableRng};
use super::from_fvm::MAX_U256;
#[derive(Clone, Debug)]
struct EthDelegatedAddress(Address);
impl quickcheck::Arbitrary for EthDelegatedAddress {
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
let mut subaddr: [u8; 20] = std::array::from_fn(|_| u8::arbitrary(g));
while EthAddress(subaddr).is_masked_id() {
subaddr[0] = u8::arbitrary(g);
}
Self(Address::new_delegated(eam::EAM_ACTOR_ID, &subaddr).unwrap())
}
}
#[derive(Clone, Debug)]
struct EthTokenAmount(TokenAmount);
impl quickcheck::Arbitrary for EthTokenAmount {
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
let t = ArbTokenAmount::arbitrary(g).0;
let (_, t) = t.atto().div_mod_floor(&MAX_U256);
Self(TokenAmount::from_atto(t))
}
}
/// Message that only contains data which can survive a roundtrip.
#[derive(Clone, Debug)]
pub struct EthMessage(pub Message);
impl quickcheck::Arbitrary for EthMessage {
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
let mut m = ArbMessage::arbitrary(g).0;
m.version = 0;
m.method_num = evm::Method::InvokeContract as u64;
m.from = EthDelegatedAddress::arbitrary(g).0;
m.to = EthDelegatedAddress::arbitrary(g).0;
m.value = EthTokenAmount::arbitrary(g).0;
m.gas_fee_cap = EthTokenAmount::arbitrary(g).0;
m.gas_premium = EthTokenAmount::arbitrary(g).0;
// The random bytes will fail to deserialize.
// With the EVM we expect them to be IPLD serialized bytes.
m.params =
RawBytes::serialize(BytesSer(m.params.bytes())).expect("failedto serialize params");
Self(m)
}
}
#[derive(Debug, Clone)]
pub struct KeyPair {
pub sk: SecretKey,
pub pk: PublicKey,
}
impl quickcheck::Arbitrary for KeyPair {
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
let seed = u64::arbitrary(g);
let mut rng = StdRng::seed_from_u64(seed);
let sk = SecretKey::random(&mut rng);
let pk = sk.public_key();
Self { sk, pk }
}
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/message/tests/golden.rs | fendermint/vm/message/tests/golden.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
/// Examples of `ChainMessage`, which is what the client has to send,
/// or at least what appears in blocks.
mod chain {
use fendermint_testing::golden_cbor;
use fendermint_vm_message::{chain::ChainMessage, ipc::IpcMessage};
use quickcheck::Arbitrary;
golden_cbor! { "chain", signed, |g| {
loop {
if let msg @ ChainMessage::Signed(_) = ChainMessage::arbitrary(g) {
return msg
}
}
}
}
golden_cbor! { "chain", ipc_bottom_up_resolve, |g| {
loop {
if let msg @ ChainMessage::Ipc(IpcMessage::BottomUpResolve(_)) = ChainMessage::arbitrary(g) {
return msg
}
}
}
}
golden_cbor! { "chain", ipc_bottom_up_exec, |g| {
loop {
if let msg @ ChainMessage::Ipc(IpcMessage::BottomUpExec(_)) = ChainMessage::arbitrary(g) {
return msg
}
}
}
}
golden_cbor! { "chain", ipc_top_down, |g| {
loop {
if let msg @ ChainMessage::Ipc(IpcMessage::TopDownExec(_)) = ChainMessage::arbitrary(g) {
return msg
}
}
}
}
}
/// Examples of FVM messages, which is what the client needs to sign.
mod fvm {
use fendermint_testing::golden_cid;
use fendermint_vm_message::signed::SignedMessage;
use quickcheck::Arbitrary;
golden_cid! { "fvm", message, |g| SignedMessage::arbitrary(g).message, |m| SignedMessage::cid(m).unwrap() }
}
/// Examples of query requests the client needs to send, and client responses it will receive.
mod query {
mod request {
use fendermint_testing::golden_cbor;
use fendermint_vm_message::query::FvmQuery;
use quickcheck::Arbitrary;
golden_cbor! { "query/request", ipld, |g| {
loop {
if let msg @ FvmQuery::Ipld(_) = FvmQuery::arbitrary(g) {
return msg
}
}
}}
golden_cbor! { "query/request", actor_state, |g| {
loop {
if let msg @ FvmQuery::ActorState { .. } = FvmQuery::arbitrary(g) {
return msg
}
}
}}
}
mod response {
use fendermint_testing::golden_cbor;
use fendermint_vm_message::query::ActorState;
use quickcheck::Arbitrary;
golden_cbor! { "query/response", actor_state, |g| {
ActorState::arbitrary(g)
}}
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/genesis/src/lib.rs | fendermint/vm/genesis/src/lib.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! A Genesis data structure similar to [genesis.Template](https://github.com/filecoin-project/lotus/blob/v1.20.4/genesis/types.go)
//! in Lotus, which is used to [initialize](https://github.com/filecoin-project/lotus/blob/v1.20.4/chain/gen/genesis/genesis.go) the state tree.
use anyhow::anyhow;
use fvm_shared::bigint::{BigInt, Integer};
use serde::{Deserialize, Serialize};
use serde_with::serde_as;
use fendermint_actor_eam::PermissionModeParams;
use fvm_shared::version::NetworkVersion;
use fvm_shared::{address::Address, econ::TokenAmount};
use fendermint_crypto::{normalize_public_key, PublicKey};
use fendermint_vm_core::Timestamp;
use fendermint_vm_encoding::IsHumanReadable;
#[cfg(feature = "arb")]
mod arb;
/// Power conversion decimal points, e.g. 3 decimals means 1 power per milliFIL.
pub type PowerScale = i8;
/// The genesis data structure we serialize to JSON and start the chain with.
#[serde_as]
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct Genesis {
/// The name of the blockchain.
///
/// It will be used to derive a chain ID as well as being
/// the network name in the `InitActor`.
pub chain_name: String,
pub timestamp: Timestamp,
pub network_version: NetworkVersion,
#[serde_as(as = "IsHumanReadable")]
pub base_fee: TokenAmount,
/// Collateral to power conversion.
pub power_scale: PowerScale,
/// Validators in genesis are given with their FIL collateral to maintain the
/// highest possible fidelity when we are deriving a genesis file in IPC,
/// where the parent subnet tracks collateral.
pub validators: Vec<Validator<Collateral>>,
pub accounts: Vec<Actor>,
/// The custom eam permission mode that controls who can deploy contracts
pub eam_permission_mode: PermissionMode,
/// IPC related configuration, if enabled.
#[serde(skip_serializing_if = "Option::is_none")]
pub ipc: Option<ipc::IpcParams>,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(tag = "mode", rename_all = "lowercase")]
pub enum PermissionMode {
/// No restriction, everyone can deploy
Unrestricted,
/// Only whitelisted addresses can deploy
AllowList { addresses: Vec<SignerAddr> },
}
/// Wrapper around [`Address`] to provide human readable serialization in JSON format.
///
/// An alternative would be the `serde_with` crate.
///
/// TODO: This is based on [Lotus](https://github.com/filecoin-project/lotus/blob/v1.20.4/genesis/types.go).
/// Not sure if anything but public key addresses make sense here. Consider using `PublicKey` instead of `Address`.
#[serde_as]
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct SignerAddr(#[serde_as(as = "IsHumanReadable")] pub Address);
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct Account {
pub owner: SignerAddr,
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct Multisig {
pub signers: Vec<SignerAddr>,
pub threshold: u64,
pub vesting_duration: u64,
pub vesting_start: u64,
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum ActorMeta {
Account(Account),
Multisig(Multisig),
}
#[serde_as]
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct Actor {
pub meta: ActorMeta,
#[serde_as(as = "IsHumanReadable")]
pub balance: TokenAmount,
}
/// Total amount of tokens delegated to a validator.
#[serde_as]
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct Collateral(#[serde_as(as = "IsHumanReadable")] pub TokenAmount);
/// Total voting power of a validator.
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Copy)]
pub struct Power(pub u64);
impl Collateral {
/// Convert from [Collateral] to [Power] by specifying the number of significant
/// decimal places per FIL that grant 1 power.
///
/// For example:
/// * with 3 decimal places, we get 1 power per milli FIL: 0.001 FIL => 1 power
/// * with 0 decimal places, we get 1 power per whole FIL: 1 FIL => 1 power
pub fn into_power(self: Collateral, scale: PowerScale) -> Power {
let atto_per_power = Self::atto_per_power(scale);
let atto = self.0.atto();
// Rounding away from zero, so with little collateral (e.g. in testing)
// we don't end up with everyone having 0 power and then being unable
// to produce a checkpoint because the threshold is 0.
let power = atto.div_ceil(&atto_per_power);
let power = power.min(BigInt::from(u64::MAX));
Power(power.try_into().expect("clipped to u64::MAX"))
}
/// Helper function to convert atto to [Power].
fn atto_per_power(scale: PowerScale) -> BigInt {
// Figure out how many decimals we need to shift to the right.
let decimals = match scale {
d if d >= 0 => TokenAmount::DECIMALS.saturating_sub(d as usize) as u32,
d => (TokenAmount::DECIMALS as i8 + d.abs()) as u32,
};
BigInt::from(10).pow(decimals)
}
}
impl Default for Collateral {
fn default() -> Self {
Self(TokenAmount::from_atto(0))
}
}
/// Secp256k1 public key of the validators.
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct ValidatorKey(pub PublicKey);
impl ValidatorKey {
/// Create a new key and make sure the wrapped public key is normalized,
/// which is to ensure the results look the same after a serialization roundtrip.
pub fn new(key: PublicKey) -> Self {
Self(normalize_public_key(key))
}
pub fn public_key(&self) -> &PublicKey {
&self.0
}
}
impl TryFrom<ValidatorKey> for tendermint::PublicKey {
type Error = anyhow::Error;
fn try_from(value: ValidatorKey) -> Result<Self, Self::Error> {
let bz = value.0.serialize();
let key = tendermint::crypto::default::ecdsa_secp256k1::VerifyingKey::from_sec1_bytes(&bz)
.map_err(|e| anyhow!("failed to convert public key: {e}"))?;
Ok(tendermint::public_key::PublicKey::Secp256k1(key))
}
}
impl TryFrom<tendermint::PublicKey> for ValidatorKey {
type Error = anyhow::Error;
fn try_from(value: tendermint::PublicKey) -> Result<Self, Self::Error> {
match value {
tendermint::PublicKey::Secp256k1(key) => {
let bz = key.to_sec1_bytes();
let pk = PublicKey::parse_slice(&bz, None)?;
Ok(Self(pk))
}
other => Err(anyhow!("unexpected validator key type: {other:?}")),
}
}
}
/// A genesis validator with their initial power.
///
/// An [`Address`] would be enough to validate signatures, however
/// we will always need the public key to return updates in the
/// power distribution to Tendermint; it is easiest to ask for
/// the full public key.
///
/// Note that we could get the validators from `InitChain` through
/// the ABCI, but then we'd have to handle the case of a key we
/// don't know how to turn into an [`Address`]. This way leaves
/// less room for error, and we can pass all the data to the FVM
/// in one go.
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct Validator<P> {
pub public_key: ValidatorKey,
pub power: P,
}
impl<A> Validator<A> {
/// Convert the power.
pub fn map_power<F: FnOnce(A) -> B, B>(self, f: F) -> Validator<B> {
Validator {
public_key: self.public_key,
power: f(self.power),
}
}
}
impl From<PermissionMode> for PermissionModeParams {
fn from(value: PermissionMode) -> Self {
match value {
PermissionMode::Unrestricted => PermissionModeParams::Unrestricted,
PermissionMode::AllowList { addresses } => {
let addresses = addresses.into_iter().map(|v| v.0).collect::<Vec<_>>();
PermissionModeParams::AllowList(addresses)
}
}
}
}
/// IPC related data structures.
pub mod ipc {
use fendermint_vm_encoding::IsHumanReadable;
use ipc_api::subnet_id::SubnetID;
use serde::{Deserialize, Serialize};
use serde_with::serde_as;
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct IpcParams {
pub gateway: GatewayParams,
}
#[serde_as]
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct GatewayParams {
#[serde_as(as = "IsHumanReadable")]
pub subnet_id: SubnetID,
pub bottom_up_check_period: u64,
pub majority_percentage: u8,
pub active_validators_limit: u16,
}
}
#[cfg(test)]
mod tests {
use fvm_shared::{bigint::BigInt, econ::TokenAmount};
use num_traits::Num;
use quickcheck_macros::quickcheck;
use crate::{Collateral, Genesis};
#[quickcheck]
fn genesis_json(value0: Genesis) {
let repr = serde_json::to_string(&value0).expect("failed to encode");
let value1: Genesis = serde_json::from_str(&repr)
.map_err(|e| format!("{e}; {repr}"))
.expect("failed to decode JSON");
assert_eq!(value1, value0)
}
#[quickcheck]
fn genesis_cbor(value0: Genesis) {
let repr = fvm_ipld_encoding::to_vec(&value0).expect("failed to encode");
let value1: Genesis = fvm_ipld_encoding::from_slice(&repr).expect("failed to decode");
assert_eq!(value1, value0)
}
#[test]
fn tokens_to_power() {
// Collateral given in atto (18 digits after the decimal)
// Instead of truncating, the remainder is rounded up, to avoid giving 0 power.
let examples: Vec<(&str, u64)> = vec![
("0.000000000000000000", 0),
("0.000000000000000001", 1),
("0.000999999999999999", 1),
("0.001000000000000000", 1),
("0.001999999999999999", 2),
("1.000000000000000000", 1000),
("0.999999999999999999", 1000),
("1.998000000000000001", 1999),
("1.999000000000000000", 1999),
("1.999000000000000001", 2000),
("1.999999999999999999", 2000),
("2.999999999999999999", 3000),
];
for (atto, expected) in examples {
let atto = BigInt::from_str_radix(atto.replace('.', "").as_str(), 10).unwrap();
let collateral = Collateral(TokenAmount::from_atto(atto.clone()));
let power = collateral.into_power(3).0;
assert_eq!(power, expected, "{atto:?} atto => {power} power");
}
}
#[test]
fn atto_per_power() {
// Collateral given in atto (18 digits after the decimal)
let examples = vec![
(0, TokenAmount::PRECISION),
(3, 1_000_000_000_000_000),
(-1, 10_000_000_000_000_000_000),
];
for (scale, atto) in examples {
assert_eq!(Collateral::atto_per_power(scale), BigInt::from(atto))
}
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/genesis/src/arb.rs | fendermint/vm/genesis/src/arb.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use crate::{
ipc, Account, Actor, ActorMeta, Collateral, Genesis, Multisig, PermissionMode, Power,
SignerAddr, Validator, ValidatorKey,
};
use cid::multihash::MultihashDigest;
use fendermint_crypto::SecretKey;
use fendermint_testing::arb::{ArbSubnetID, ArbTokenAmount};
use fendermint_vm_core::Timestamp;
use fvm_shared::{address::Address, version::NetworkVersion};
use quickcheck::{Arbitrary, Gen};
use rand::{rngs::StdRng, SeedableRng};
impl Arbitrary for ActorMeta {
fn arbitrary(g: &mut Gen) -> Self {
// Generate keys which the loader knows how to initialize.
if bool::arbitrary(g) {
let pk = ValidatorKey::arbitrary(g).0;
let pk = pk.serialize();
let addr = if bool::arbitrary(g) {
Address::new_secp256k1(&pk).unwrap()
} else {
// NOTE: Not using `EthAddress` because it would be circular dependency.
let mut hash20 = [0u8; 20];
let hash32 = cid::multihash::Code::Keccak256.digest(&pk[1..]);
hash20.copy_from_slice(&hash32.digest()[12..]);
Address::new_delegated(10, &hash20).unwrap()
};
ActorMeta::Account(Account {
owner: SignerAddr(addr),
})
} else {
let n = u64::arbitrary(g) % 4 + 2;
let signers = (0..n)
.map(|_| {
let pk = ValidatorKey::arbitrary(g).0;
let addr = Address::new_secp256k1(&pk.serialize()).unwrap();
SignerAddr(addr)
})
.collect();
let threshold = u64::arbitrary(g) % n + 1;
ActorMeta::Multisig(Multisig {
signers,
threshold,
vesting_duration: u64::arbitrary(g),
vesting_start: u64::arbitrary(g),
})
}
}
}
impl Arbitrary for Actor {
fn arbitrary(g: &mut Gen) -> Self {
Self {
meta: ActorMeta::arbitrary(g),
balance: ArbTokenAmount::arbitrary(g).0,
}
}
}
impl Arbitrary for ValidatorKey {
fn arbitrary(g: &mut Gen) -> Self {
// Using a full 32 byte seed instead of `StdRng::seed_from_u64` to reduce the annoying collisions
// when trying to generate multiple validators. Probably 0 is generated more often than other u64
// for example, but there is a high probability of matching keys, which is possible but usually
// not what we are trying to test, and using a common `Rng` to generate all validators is cumbersome.
let seed: [u8; 32] = std::array::from_fn(|_| u8::arbitrary(g));
let mut rng = StdRng::from_seed(seed);
let sk = SecretKey::random(&mut rng);
let pk = sk.public_key();
Self::new(pk)
}
}
impl Arbitrary for Collateral {
fn arbitrary(g: &mut Gen) -> Self {
Self(ArbTokenAmount::arbitrary(g).0)
}
}
impl Arbitrary for Power {
fn arbitrary(g: &mut Gen) -> Self {
// Giving at least 1 power. 0 is a valid value to signal deletion,
// but not that useful in the more common power table setting.
Self(u64::arbitrary(g).saturating_add(1))
}
}
impl<P: Arbitrary> Arbitrary for Validator<P> {
fn arbitrary(g: &mut Gen) -> Self {
Self {
public_key: ValidatorKey::arbitrary(g),
power: P::arbitrary(g),
}
}
}
impl Arbitrary for Genesis {
fn arbitrary(g: &mut Gen) -> Self {
let nv = usize::arbitrary(g) % 10 + 1;
let na = usize::arbitrary(g) % 10;
Self {
timestamp: Timestamp(u64::arbitrary(g)),
chain_name: String::arbitrary(g),
network_version: NetworkVersion::new(*g.choose(&[21]).unwrap()),
base_fee: ArbTokenAmount::arbitrary(g).0,
power_scale: *g.choose(&[-1, 0, 3]).unwrap(),
validators: (0..nv).map(|_| Arbitrary::arbitrary(g)).collect(),
accounts: (0..na).map(|_| Arbitrary::arbitrary(g)).collect(),
eam_permission_mode: PermissionMode::Unrestricted,
ipc: if bool::arbitrary(g) {
Some(ipc::IpcParams::arbitrary(g))
} else {
None
},
}
}
}
impl Arbitrary for ipc::GatewayParams {
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
Self {
subnet_id: ArbSubnetID::arbitrary(g).0,
// Gateway constructor would reject 0.
bottom_up_check_period: u64::arbitrary(g).max(1),
majority_percentage: u8::arbitrary(g) % 50 + 51,
active_validators_limit: u16::arbitrary(g) % 100 + 1,
}
}
}
impl Arbitrary for ipc::IpcParams {
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
Self {
gateway: ipc::GatewayParams::arbitrary(g),
}
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/genesis/tests/golden.rs | fendermint/vm/genesis/tests/golden.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
/// JSON based test so we can parse data from the disk where it's nice to be human readable.
mod json {
use fendermint_testing::golden_json;
use fendermint_vm_genesis::Genesis;
use quickcheck::Arbitrary;
golden_json! { "genesis/json", genesis, Genesis::arbitrary }
}
/// CBOR based tests to make sure we can parse data in network format.
mod cbor {
use fendermint_testing::golden_cbor;
use fendermint_vm_genesis::Genesis;
use quickcheck::Arbitrary;
golden_cbor! { "genesis/cbor", genesis, Genesis::arbitrary }
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/snapshot/src/lib.rs | fendermint/vm/snapshot/src/lib.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
mod car;
mod client;
mod error;
mod manager;
mod manifest;
mod state;
/// The file name to export the CAR to.
const SNAPSHOT_FILE_NAME: &str = "snapshot.car";
/// The file name in snapshot directories that contains the manifest.
const MANIFEST_FILE_NAME: &str = "manifest.json";
/// Name of the subdirectory where `{idx}.part` files are stored within a snapshot.
const PARTS_DIR_NAME: &str = "parts";
pub use client::SnapshotClient;
pub use error::SnapshotError;
pub use manager::{SnapshotManager, SnapshotParams};
pub use manifest::SnapshotManifest;
pub use state::SnapshotItem;
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/snapshot/src/manifest.rs | fendermint/vm/snapshot/src/manifest.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::path::{Path, PathBuf};
use anyhow::Context;
use fendermint_vm_interpreter::fvm::state::{
snapshot::{BlockHeight, SnapshotVersion},
FvmStateParams,
};
use serde::{Deserialize, Serialize};
use sha2::{Digest, Sha256};
use crate::{SnapshotItem, MANIFEST_FILE_NAME};
#[derive(Serialize, Deserialize, Debug, Clone, Eq, PartialEq)]
pub struct SnapshotManifest {
/// Block height where the snapshot was taken.
pub block_height: BlockHeight,
/// Snapshot size in bytes.
pub size: u64,
/// Number of chunks in the snapshot.
pub chunks: u32,
/// SHA2 hash of the snapshot contents.
///
/// Using a [tendermint::Hash] type because it has nice formatting in JSON.
pub checksum: tendermint::Hash,
/// The FVM parameters at the time of the snapshot,
/// which are also in the CAR file, but it might be
/// useful to see. It is annotated for human readability.
pub state_params: FvmStateParams,
/// Snapshot format version
pub version: SnapshotVersion,
}
/// Save a manifest along with the other snapshot files into a snapshot specific directory.
pub fn write_manifest(
snapshot_dir: impl AsRef<Path>,
manifest: &SnapshotManifest,
) -> anyhow::Result<PathBuf> {
let json =
serde_json::to_string_pretty(&manifest).context("failed to convert manifest to JSON")?;
let manifest_path = snapshot_dir.as_ref().join(MANIFEST_FILE_NAME);
std::fs::write(&manifest_path, json).context("failed to write manifest file")?;
Ok(manifest_path)
}
/// Collect all the manifests from a directory containing snapshot-directories, e.g.
/// `snapshots/snapshot-1/manifest.json` etc.
pub fn list_manifests(snapshot_dir: impl AsRef<Path>) -> anyhow::Result<Vec<SnapshotItem>> {
let contents = std::fs::read_dir(snapshot_dir).context("failed to read snapshot directory")?;
// Collect all manifest file paths.
let mut manifests = Vec::new();
for entry in contents {
match entry {
Ok(entry) => match entry.metadata() {
Ok(metadata) => {
if metadata.is_dir() {
let manifest_path = entry.path().join(MANIFEST_FILE_NAME);
if manifest_path.exists() {
manifests.push((entry.path(), manifest_path))
}
}
}
Err(e) => {
tracing::error!(error =? e, "faulty entry metadata");
}
},
Err(e) => {
tracing::error!(error =? e, "faulty snapshot entry");
}
}
}
// Parse manifests
let mut items = Vec::new();
for (snapshot_dir, manifest) in manifests {
let json = std::fs::read_to_string(&manifest).context("failed to open manifest")?;
match serde_json::from_str(&json) {
Ok(manifest) => items.push(SnapshotItem::new(snapshot_dir, manifest)),
Err(e) => {
tracing::error!(
manifest = manifest.to_string_lossy().to_string(),
error =? e,
"unable to parse snapshot manifest"
);
}
}
}
// Order by oldest to newest.
items.sort_by_key(|i| i.manifest.block_height);
Ok(items)
}
/// Calculate the Sha256 checksum of a file.
pub fn file_checksum(path: impl AsRef<Path>) -> anyhow::Result<tendermint::Hash> {
let mut file = std::fs::File::open(&path)?;
let mut hasher = Sha256::new();
let _ = std::io::copy(&mut file, &mut hasher)?;
let hash = hasher.finalize().into();
Ok(tendermint::Hash::Sha256(hash))
}
/// Calculate the Sha256 checksum of all `{idx}.part` files in a directory.
pub fn parts_checksum(path: impl AsRef<Path>) -> anyhow::Result<tendermint::Hash> {
let mut hasher = Sha256::new();
let chunks = list_parts(path)?;
for path in chunks {
let mut file = std::fs::File::open(path).context("failed to open part")?;
let _ = std::io::copy(&mut file, &mut hasher)?;
}
let hash = hasher.finalize().into();
Ok(tendermint::Hash::Sha256(hash))
}
/// List all the `{idx}.part` files in a directory.
pub fn list_parts(path: impl AsRef<Path>) -> anyhow::Result<Vec<PathBuf>> {
let mut chunks = std::fs::read_dir(path.as_ref())
.unwrap()
.collect::<Result<Vec<_>, _>>()
.with_context(|| {
format!(
"failed to collect parts in directory: {}",
path.as_ref().to_string_lossy()
)
})?;
chunks.retain(|item| {
item.path()
.extension()
.map(|x| x.to_string_lossy().to_string())
.unwrap_or_default()
== "part"
});
chunks.sort_by_cached_key(|item| {
item.path()
.file_stem()
.map(|n| n.to_string_lossy())
.unwrap_or_default()
.parse::<u32>()
.expect("file part names are prefixed by index")
});
Ok(chunks.into_iter().map(|c| c.path()).collect())
}
#[cfg(feature = "arb")]
mod arb {
use fendermint_testing::arb::{ArbCid, ArbTokenAmount};
use fendermint_vm_core::{chainid, Timestamp};
use fendermint_vm_interpreter::fvm::state::FvmStateParams;
use fvm_shared::version::NetworkVersion;
use quickcheck::Arbitrary;
use super::SnapshotManifest;
impl quickcheck::Arbitrary for SnapshotManifest {
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
let checksum: [u8; 32] = std::array::from_fn(|_| u8::arbitrary(g));
Self {
block_height: u32::arbitrary(g) as u64,
size: Arbitrary::arbitrary(g),
chunks: Arbitrary::arbitrary(g),
checksum: tendermint::Hash::from_bytes(
tendermint::hash::Algorithm::Sha256,
&checksum,
)
.unwrap(),
state_params: FvmStateParams {
state_root: ArbCid::arbitrary(g).0,
timestamp: Timestamp(Arbitrary::arbitrary(g)),
network_version: NetworkVersion::MAX,
base_fee: ArbTokenAmount::arbitrary(g).0,
circ_supply: ArbTokenAmount::arbitrary(g).0,
chain_id: chainid::from_str_hashed(String::arbitrary(g).as_str())
.unwrap()
.into(),
power_scale: *g.choose(&[-1, 0, 3]).unwrap(),
app_version: 0,
},
version: Arbitrary::arbitrary(g),
}
}
}
}
#[cfg(test)]
mod tests {
use std::io::Write;
use cid::multihash::MultihashDigest;
use tempfile::NamedTempFile;
use crate::manifest::file_checksum;
#[test]
fn test_file_checksum() {
let content = b"Hello Checksum!";
let mut file = NamedTempFile::new().expect("new temp file");
file.write_all(content).expect("write contents");
let file_path = file.into_temp_path();
let file_digest = file_checksum(file_path).expect("checksum");
let content_digest = cid::multihash::Code::Sha2_256.digest(content);
let content_digest = content_digest.digest();
assert_eq!(file_digest.as_bytes(), content_digest)
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/snapshot/src/manager.rs | fendermint/vm/snapshot/src/manager.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::path::{Path, PathBuf};
use std::time::Duration;
use crate::manifest::{file_checksum, list_manifests, write_manifest, SnapshotManifest};
use crate::state::SnapshotState;
use crate::{car, SnapshotClient, SnapshotItem, PARTS_DIR_NAME, SNAPSHOT_FILE_NAME};
use anyhow::Context;
use async_stm::{atomically, retry, TVar};
use fendermint_vm_interpreter::fvm::state::snapshot::{BlockHeight, Snapshot};
use fendermint_vm_interpreter::fvm::state::FvmStateParams;
use fvm_ipld_blockstore::Blockstore;
use tendermint_rpc::Client;
pub struct SnapshotParams {
/// Location to store completed snapshots.
pub snapshots_dir: PathBuf,
pub download_dir: PathBuf,
pub block_interval: BlockHeight,
/// Target size in bytes for snapshot chunks.
pub chunk_size: usize,
/// Number of snapshots to keep.
///
/// 0 means unlimited.
pub hist_size: usize,
/// Time to hold on from purging a snapshot after a remote client
/// asked for a chunk from it.
pub last_access_hold: Duration,
/// How often to check CometBFT whether it has finished syncing.
pub sync_poll_interval: Duration,
}
/// Create snapshots at regular block intervals.
pub struct SnapshotManager<BS> {
store: BS,
snapshots_dir: PathBuf,
chunk_size: usize,
hist_size: usize,
last_access_hold: Duration,
sync_poll_interval: Duration,
/// Shared state of snapshots.
state: SnapshotState,
/// Indicate whether CometBFT has finished syncing with the chain,
/// so that we can skip snapshotting old states while catching up.
is_syncing: TVar<bool>,
}
impl<BS> SnapshotManager<BS>
where
BS: Blockstore + Clone + Send + Sync + 'static,
{
/// Create a new manager.
pub fn new(store: BS, params: SnapshotParams) -> anyhow::Result<(Self, SnapshotClient)> {
// Make sure the target directory exists.
std::fs::create_dir_all(¶ms.snapshots_dir)
.context("failed to create snapshots directory")?;
let snapshot_items =
list_manifests(¶ms.snapshots_dir).context("failed to list manifests")?;
let state = SnapshotState::new(snapshot_items);
let manager: SnapshotManager<BS> = Self {
store,
snapshots_dir: params.snapshots_dir,
chunk_size: params.chunk_size,
hist_size: params.hist_size,
last_access_hold: params.last_access_hold,
sync_poll_interval: params.sync_poll_interval,
state: state.clone(),
// Assume we are syncing until we can determine otherwise.
is_syncing: TVar::new(true),
};
let client = SnapshotClient::new(params.download_dir, params.block_interval, state);
Ok((manager, client))
}
/// Produce snapshots.
pub async fn run<C>(self, client: C)
where
C: Client + Send + Sync + 'static,
{
// Start a background poll to CometBFT.
// We could just do this once and await here, but this way ostensibly CometBFT could be
// restarted without Fendermint and go through another catch up.
{
if self.sync_poll_interval.is_zero() {
atomically(|| self.is_syncing.write(false)).await;
} else {
let is_syncing = self.is_syncing.clone();
let poll_interval = self.sync_poll_interval;
tokio::spawn(async move {
poll_sync_status(client, is_syncing, poll_interval).await;
});
}
}
let mut last_params = None;
loop {
let (state_params, block_height) = atomically(|| {
// Check the current sync status. We could just query the API, but then we wouldn't
// be notified when we finally reach the end, and we'd only snapshot the next height,
// not the last one as soon as the chain is caught up.
if *self.is_syncing.read()? {
retry()?;
}
match self.state.latest_params.read()?.as_ref() {
None => retry()?,
unchanged if *unchanged == last_params => retry()?,
Some(new_params) => Ok(new_params.clone()),
}
})
.await;
match self
.create_snapshot(block_height, state_params.clone())
.await
{
Ok(item) => {
tracing::info!(
snapshot = item.snapshot_dir.to_string_lossy().to_string(),
block_height,
chunks_count = item.manifest.chunks,
snapshot_size = item.manifest.size,
"exported snapshot"
);
// Add the snapshot to the in-memory records.
atomically(|| {
self.state
.snapshots
.modify_mut(|items| items.push_back(item.clone()))
})
.await;
}
Err(e) => {
tracing::warn!(error =? e, block_height, "failed to create snapshot");
}
}
// Delete old snapshots.
self.prune_history().await;
last_params = Some((state_params, block_height));
}
}
/// Remove snapshot directories if we have more than the desired history size.
async fn prune_history(&self) {
if self.hist_size == 0 {
return;
}
let removables = atomically(|| {
self.state.snapshots.modify_mut(|snapshots| {
let mut removables = Vec::new();
while snapshots.len() > self.hist_size {
// Stop at the first snapshot that was accessed recently.
if let Some(last_access) =
snapshots.head().and_then(|s| s.last_access.elapsed().ok())
{
if last_access <= self.last_access_hold {
break;
}
}
if let Some(snapshot) = snapshots.pop_front() {
removables.push(snapshot);
} else {
break;
}
}
removables
})
})
.await;
for r in removables {
let snapshot_dir = r.snapshot_dir.to_string_lossy().to_string();
if let Err(e) = std::fs::remove_dir_all(&r.snapshot_dir) {
tracing::error!(error =? e, snapshot_dir, "failed to remove snapshot");
} else {
tracing::info!(snapshot_dir, "removed snapshot");
}
}
}
/// Export a snapshot to a temporary file, then copy it to the snapshot directory.
async fn create_snapshot(
&self,
block_height: BlockHeight,
state_params: FvmStateParams,
) -> anyhow::Result<SnapshotItem> {
let snapshot = Snapshot::new(self.store.clone(), state_params.clone(), block_height)
.context("failed to create snapshot")?;
let snapshot_version = snapshot.version();
let snapshot_name = format!("snapshot-{block_height}");
let temp_dir = tempfile::Builder::new()
.prefix(&snapshot_name)
.tempdir()
.context("failed to create temp dir for snapshot")?;
let snapshot_path = temp_dir.path().join(SNAPSHOT_FILE_NAME);
let checksum_path = temp_dir.path().join(format!("{PARTS_DIR_NAME}.sha256"));
let parts_path = temp_dir.path().join(PARTS_DIR_NAME);
// TODO: See if we can reuse the contents of an existing CAR file.
tracing::debug!(
block_height,
path = snapshot_path.to_string_lossy().to_string(),
"exporting snapshot..."
);
// Export the state to a CAR file.
snapshot
.write_car(&snapshot_path)
.await
.context("failed to write CAR file")?;
let snapshot_size = std::fs::metadata(&snapshot_path)
.context("failed to get snapshot metadata")?
.len() as usize;
// Create a checksum over the CAR file.
let checksum_bytes = file_checksum(&snapshot_path).context("failed to compute checksum")?;
std::fs::write(&checksum_path, checksum_bytes.to_string())
.context("failed to write checksum file")?;
// Create a directory for the parts.
std::fs::create_dir(&parts_path).context("failed to create parts dir")?;
// Split the CAR file into chunks.
// They can be listed in the right order with e.g. `ls | sort -n`
// Alternatively we could pad them with zeroes based on the original file size and the chunk size,
// but this way it will be easier to return them based on a numeric index.
let chunks_count = car::split(&snapshot_path, &parts_path, self.chunk_size, |idx| {
format!("{idx}.part")
})
.await
.context("failed to split CAR into chunks")?;
// Create and export a manifest that we can easily look up.
let manifest = SnapshotManifest {
block_height,
size: snapshot_size as u64,
chunks: chunks_count as u32,
checksum: checksum_bytes,
state_params,
version: snapshot_version,
};
let _ = write_manifest(temp_dir.path(), &manifest).context("failed to export manifest")?;
let snapshots_dir = self.snapshots_dir.join(&snapshot_name);
move_or_copy(temp_dir.path(), &snapshots_dir).context("failed to move snapshot")?;
Ok(SnapshotItem::new(snapshots_dir, manifest))
}
}
/// Periodically ask CometBFT if it has caught up with the chain.
async fn poll_sync_status<C>(client: C, is_syncing: TVar<bool>, poll_interval: Duration)
where
C: Client + Send + Sync + 'static,
{
loop {
match client.status().await {
Ok(status) => {
let catching_up = status.sync_info.catching_up;
atomically(|| {
if *is_syncing.read()? != catching_up {
is_syncing.write(catching_up)?;
}
Ok(())
})
.await;
}
Err(e) => {
tracing::warn!(error =? e, "failed to poll CometBFT sync status");
}
}
tokio::time::sleep(poll_interval).await;
}
}
/// Try to move the entire snapshot directory to its final place,
/// then remove the snapshot file, keeping only the parts.
///
/// If that fails, for example because it would be moving between a
/// Docker container's temporary directory to the host mounted volume,
/// then fall back to copying.
fn move_or_copy(from: &Path, to: &Path) -> anyhow::Result<()> {
if std::fs::rename(from, to).is_ok() {
// Delete the big CAR file - keep the only the parts.
std::fs::remove_file(to.join(SNAPSHOT_FILE_NAME)).context("failed to remove CAR file")?;
} else {
dircpy::CopyBuilder::new(from, to)
.with_exclude_filter(SNAPSHOT_FILE_NAME)
.run()?;
}
Ok(())
}
#[cfg(test)]
mod tests {
use std::{sync::Arc, time::Duration};
use async_stm::{atomically, retry};
use fendermint_vm_genesis::Genesis;
use fendermint_vm_interpreter::{
fvm::{
bundle::{bundle_path, contracts_path, custom_actors_bundle_path},
state::{snapshot::Snapshot, FvmGenesisState, FvmStateParams},
store::memory::MemoryBlockstore,
upgrades::UpgradeScheduler,
FvmMessageInterpreter,
},
GenesisInterpreter,
};
use fvm::engine::MultiEngine;
use quickcheck::Arbitrary;
use crate::{manager::SnapshotParams, manifest, PARTS_DIR_NAME};
use super::SnapshotManager;
// Initialise genesis and export it directly to see if it works.
#[tokio::test]
async fn create_snapshots_directly() {
let (state_params, store) = init_genesis().await;
let snapshot = Snapshot::new(store, state_params, 0).expect("failed to create snapshot");
let tmp_path = tempfile::NamedTempFile::new().unwrap().into_temp_path();
snapshot
.write_car(&tmp_path)
.await
.expect("failed to write snapshot");
}
// Initialise genesis, create a snapshot manager, export a snapshot, create another manager, list snapshots.
// Don't forget to run this with `--release` beause of Wasm.
#[tokio::test]
async fn create_snapshot_with_manager() {
let (state_params, store) = init_genesis().await;
// Now we have one store initialized with genesis, let's create a manager and snapshot it.
let snapshots_dir = tempfile::tempdir().expect("failed to create tmp dir");
let download_dir = tempfile::tempdir().expect("failed to create tmp dir");
// Not polling because it's cumbersome to mock it.
let never_poll_sync = Duration::ZERO;
let never_poll_client = mock_client();
let (snapshot_manager, snapshot_client) = SnapshotManager::new(
store.clone(),
SnapshotParams {
snapshots_dir: snapshots_dir.path().into(),
download_dir: download_dir.path().into(),
block_interval: 1,
chunk_size: 10000,
hist_size: 1,
last_access_hold: Duration::ZERO,
sync_poll_interval: never_poll_sync,
},
)
.expect("failed to create snapshot manager");
// Start the manager in the background
tokio::spawn(async move { snapshot_manager.run(never_poll_client).await });
// Make sure we have no snapshots currently.
let snapshots = atomically(|| snapshot_client.list_snapshots()).await;
assert!(snapshots.is_empty());
// Notify about snapshottable height.
atomically(|| snapshot_client.notify(0, state_params.clone())).await;
// Wait for the new snapshot to appear in memory.
let snapshots = tokio::time::timeout(
Duration::from_secs(10),
atomically(|| {
let snapshots = snapshot_client.list_snapshots()?;
if snapshots.is_empty() {
retry()
} else {
Ok(snapshots)
}
}),
)
.await
.expect("failed to export snapshot");
assert_eq!(snapshots.len(), 1);
let snapshot = snapshots.into_iter().next().unwrap();
assert!(snapshot.manifest.chunks > 1);
assert_eq!(snapshot.manifest.block_height, 0);
assert_eq!(snapshot.manifest.state_params, state_params);
assert_eq!(
snapshot.snapshot_dir.as_path(),
snapshots_dir.path().join("snapshot-0")
);
let _ = std::fs::File::open(snapshot.snapshot_dir.join("manifest.json"))
.expect("manifests file exists");
let snapshots = manifest::list_manifests(snapshots_dir.path()).unwrap();
assert_eq!(snapshots.len(), 1, "can list manifests");
assert_eq!(snapshots[0], snapshot);
let checksum =
manifest::parts_checksum(snapshot.snapshot_dir.as_path().join(PARTS_DIR_NAME))
.expect("parts checksum can be calculated");
assert_eq!(
checksum, snapshot.manifest.checksum,
"checksum should match"
);
// Create a new manager instance
let (_, new_client) = SnapshotManager::new(
store,
SnapshotParams {
snapshots_dir: snapshots_dir.path().into(),
download_dir: download_dir.path().into(),
block_interval: 1,
chunk_size: 10000,
hist_size: 1,
last_access_hold: Duration::ZERO,
sync_poll_interval: never_poll_sync,
},
)
.expect("failed to create snapshot manager");
let snapshots = atomically(|| new_client.list_snapshots()).await;
assert!(!snapshots.is_empty(), "loads manifests on start");
}
async fn init_genesis() -> (FvmStateParams, MemoryBlockstore) {
let mut g = quickcheck::Gen::new(5);
let genesis = Genesis::arbitrary(&mut g);
let bundle = std::fs::read(bundle_path()).expect("failed to read bundle");
let custom_actors_bundle = std::fs::read(custom_actors_bundle_path())
.expect("failed to read custom actors bundle");
let multi_engine = Arc::new(MultiEngine::default());
let store = MemoryBlockstore::new();
let state =
FvmGenesisState::new(store.clone(), multi_engine, &bundle, &custom_actors_bundle)
.await
.expect("failed to create state");
let interpreter = FvmMessageInterpreter::new(
mock_client(),
None,
contracts_path(),
1.05,
1.05,
false,
UpgradeScheduler::new(),
);
let (state, out) = interpreter
.init(state, genesis)
.await
.expect("failed to init genesis");
let state_root = state.commit().expect("failed to commit");
let state_params = FvmStateParams {
state_root,
timestamp: out.timestamp,
network_version: out.network_version,
base_fee: out.base_fee,
circ_supply: out.circ_supply,
chain_id: out.chain_id.into(),
power_scale: out.power_scale,
app_version: 0,
};
(state_params, store)
}
fn mock_client() -> tendermint_rpc::MockClient<tendermint_rpc::MockRequestMethodMatcher> {
tendermint_rpc::MockClient::new(tendermint_rpc::MockRequestMethodMatcher::default()).0
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/snapshot/src/state.rs | fendermint/vm/snapshot/src/state.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::{fs::File, io, path::PathBuf, sync::Arc, time::SystemTime};
use anyhow::{bail, Context};
use async_stm::TVar;
use fendermint_vm_interpreter::fvm::state::snapshot::{BlockStateParams, Snapshot};
use fvm_ipld_blockstore::Blockstore;
use tempfile::TempDir;
use crate::{
manifest::{self, SnapshotManifest},
PARTS_DIR_NAME, SNAPSHOT_FILE_NAME,
};
/// State of snapshots, including the list of available completed ones
/// and the next eligible height.
#[derive(Clone)]
pub struct SnapshotState {
/// Completed snapshots.
pub snapshots: TVar<im::Vector<SnapshotItem>>,
/// The latest state parameters at a snapshottable height.
pub latest_params: TVar<Option<BlockStateParams>>,
/// The latest snapshot offered, which CometBFT is downloading and feeding to us.
pub current_download: TVar<Option<SnapshotDownload>>,
}
impl SnapshotState {
pub fn new(snapshots: Vec<SnapshotItem>) -> Self {
Self {
snapshots: TVar::new(snapshots.into()),
// Start with nothing to snapshot until we are notified about a new height.
// We could also look back to find the latest height we should have snapshotted.
latest_params: TVar::new(None),
current_download: TVar::new(None),
}
}
}
/// A snapshot directory and its manifest.
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct SnapshotItem {
/// Directory containing this snapshot, ie. the manifest and the parts.
pub snapshot_dir: PathBuf,
/// Parsed `manifest.json` contents.
pub manifest: SnapshotManifest,
/// Last time a peer asked for a chunk from this snapshot.
pub last_access: SystemTime,
}
impl SnapshotItem {
pub fn new(snapshot_dir: PathBuf, manifest: SnapshotManifest) -> Self {
Self {
snapshot_dir,
manifest,
last_access: SystemTime::UNIX_EPOCH,
}
}
fn parts_dir(&self) -> PathBuf {
self.snapshot_dir.join(PARTS_DIR_NAME)
}
/// Load the data from disk.
///
/// Returns an error if the chunk isn't within range or if the file doesn't exist any more.
pub fn load_chunk(&self, chunk: u32) -> anyhow::Result<Vec<u8>> {
if chunk >= self.manifest.chunks {
bail!(
"cannot load chunk {chunk}; only have {} in the snapshot",
self.manifest.chunks
);
}
let chunk_file = self.parts_dir().join(format!("{chunk}.part"));
let content = std::fs::read(&chunk_file)
.with_context(|| format!("failed to read chunk {}", chunk_file.to_string_lossy()))?;
Ok(content)
}
/// Import a snapshot into the blockstore.
pub async fn import<BS>(&self, store: BS, validate: bool) -> anyhow::Result<Snapshot<BS>>
where
BS: Blockstore + Send + Clone + 'static,
{
let parts =
manifest::list_parts(self.parts_dir()).context("failed to list snapshot parts")?;
// 1. Restore the snapshots into a complete `snapshot.car` file.
let car_path = self.snapshot_dir.join(SNAPSHOT_FILE_NAME);
let mut car_file = File::create(&car_path).context("failed to create CAR file")?;
for part in parts {
let mut part_file = File::open(&part).with_context(|| {
format!("failed to open snapshot part {}", part.to_string_lossy())
})?;
io::copy(&mut part_file, &mut car_file)?;
}
// 2. Import the contents.
let result = Snapshot::read_car(&car_path, store, validate).await;
// 3. Remove the restored file.
std::fs::remove_file(&car_path).context("failed to remove CAR file")?;
// If the import failed, or it fails to validate, it will leave unwanted data in the blockstore.
//
// We could do the import into a namespace which is separate from the state store, and move the data
// if everything we see what successful, but it would need more database API exposed that we don't
// currently have access to. At the moment our best bet to remove the data is to implement garbage
// collection - if the CIDs are unreachable through state roots, they will be removed.
//
// Another thing worth noting is that the `Snapshot` imports synthetic records into the blockstore
// that did not exist in the original: the metadata, an some technical constructs that point at
// the real data and store application state (which is verfied below). It's not easy to get rid
// of these: the `Blockstore` doesn't allow us to delete CIDs, and the `Snapshot` doesn't readily
// expose what the CIDs of the extra records were. Our other option would be to load the data
// into a staging area (see above) and then walk the DAG and only load what is reachable from
// the state root.
//
// Inserting CIDs into the state store which did not exist in the original seem like a vector
// of attack that could be used to cause consensus failure: if the attacker deployed a contract
// that looked up a CID that validators who imported a snapshot have, but others don't, that
// would cause a fork. However, his is why the FVM doesn't currently allow the deployment of
// user defined Wasm actors: the FEVM actors do not allow the lookup of arbitrary CIDs, so they
// are safe, while Wasm actors with direct access to the IPLD SDK methods would be vulnerable.
// Once the FVM implements the "reachability analysis" feature, it won't matter if we have an
// extra record or not.
//
// Actually a very similar situation arises with garbage collection: since the length of history
// is configurable, whether some CIDs are (still) present or not depends on how the validator
// configured their nodes, and cannot be allowed to cause a failure.
let snapshot = result.context("failed to import the snapshot into the blockstore")?;
// 4. See if we actually imported what we thought we would.
if validate {
match snapshot {
Snapshot::V1(ref snapshot) => {
if snapshot.block_height() != self.manifest.block_height {
bail!(
"invalid snapshot block height; expected {}, imported {}",
self.manifest.block_height,
snapshot.block_height()
);
}
if *snapshot.state_params() != self.manifest.state_params {
bail!(
"invalid state params; expected {:?}, imported {:?}",
self.manifest.state_params,
snapshot.state_params()
)
}
}
}
}
Ok(snapshot)
}
}
/// An ongoing, incomplete download of a snapshot.
#[derive(Clone)]
pub struct SnapshotDownload {
pub manifest: SnapshotManifest,
// Temporary download directory. Removed when this download is dropped.
pub download_dir: Arc<TempDir>,
// Next expected chunk index.
pub next_index: TVar<u32>,
}
impl SnapshotDownload {
pub fn parts_dir(&self) -> PathBuf {
self.download_dir.path().join(PARTS_DIR_NAME)
}
}
#[cfg(feature = "arb")]
mod arb {
use std::{path::PathBuf, time::SystemTime};
use super::{SnapshotItem, SnapshotManifest};
impl quickcheck::Arbitrary for SnapshotItem {
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
Self {
manifest: SnapshotManifest::arbitrary(g),
snapshot_dir: PathBuf::arbitrary(g),
last_access: SystemTime::arbitrary(g),
}
}
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/snapshot/src/client.rs | fendermint/vm/snapshot/src/client.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::{path::PathBuf, sync::Arc, time::SystemTime};
use async_stm::{abort, Stm, StmResult, TVar};
use fendermint_vm_interpreter::fvm::state::{
snapshot::{BlockHeight, SnapshotVersion},
FvmStateParams,
};
use crate::{
manifest,
state::{SnapshotDownload, SnapshotState},
SnapshotError, SnapshotItem, SnapshotManifest, MANIFEST_FILE_NAME,
};
/// Interface to snapshot state for the application.
#[derive(Clone)]
pub struct SnapshotClient {
download_dir: PathBuf,
/// The client will only notify the manager of snapshottable heights.
snapshot_interval: BlockHeight,
state: SnapshotState,
}
impl SnapshotClient {
pub fn new(
download_dir: PathBuf,
snapshot_interval: BlockHeight,
state: SnapshotState,
) -> Self {
Self {
download_dir,
snapshot_interval,
state,
}
}
/// Set the latest block state parameters and notify the manager.
///
/// Call this with the block height where the `app_hash` in the block reflects the
/// state in the parameters, that is, the in the *next* block.
pub fn notify(&self, block_height: BlockHeight, state_params: FvmStateParams) -> Stm<()> {
if block_height % self.snapshot_interval == 0 {
self.state
.latest_params
.write(Some((state_params, block_height)))?;
}
Ok(())
}
/// List completed snapshots.
pub fn list_snapshots(&self) -> Stm<im::Vector<SnapshotItem>> {
self.state.snapshots.read_clone()
}
/// Try to find a snapshot, if it still exists.
///
/// If found, mark it as accessed, so that it doesn't get purged while likely to be requested or read from disk.
pub fn access_snapshot(
&self,
block_height: BlockHeight,
version: SnapshotVersion,
) -> Stm<Option<SnapshotItem>> {
let mut snapshots = self.state.snapshots.read_clone()?;
let mut snapshot = None;
for s in snapshots.iter_mut() {
if s.manifest.block_height == block_height && s.manifest.version == version {
s.last_access = SystemTime::now();
snapshot = Some(s.clone());
break;
}
}
if snapshot.is_some() {
self.state.snapshots.write(snapshots)?;
}
Ok(snapshot)
}
/// If the offered snapshot is accepted, we create a temporary directory to hold the chunks
/// and remember it as our current snapshot being downloaded.
pub fn offer_snapshot(&self, manifest: SnapshotManifest) -> StmResult<PathBuf, SnapshotError> {
if manifest.version != 1 {
abort(SnapshotError::IncompatibleVersion(manifest.version))
} else {
match tempfile::tempdir_in(&self.download_dir) {
Ok(dir) => {
// Save the manifest into the temp directory;
// that way we can always see on the file system what's happening.
let json = match serde_json::to_string_pretty(&manifest)
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))
{
Ok(json) => json,
Err(e) => return abort(SnapshotError::from(e)),
};
let download_path: PathBuf = dir.path().into();
let download = SnapshotDownload {
manifest,
download_dir: Arc::new(dir),
next_index: TVar::new(0),
};
// Create a `parts` sub-directory for the chunks.
if let Err(e) = std::fs::create_dir(download.parts_dir()) {
return abort(SnapshotError::from(e));
};
if let Err(e) = std::fs::write(download_path.join(MANIFEST_FILE_NAME), json) {
return abort(SnapshotError::from(e));
}
self.state.current_download.write(Some(download))?;
Ok(download_path)
}
Err(e) => abort(SnapshotError::from(e))?,
}
}
}
/// Take a chunk sent to us by a remote peer. This is our chance to validate chunks on the fly.
///
/// Returns `None` while there are more chunks to download and `Some` when all
/// the chunks have been received and basic file integrity validated.
///
/// Then we can import the snapshot into the blockstore separately.
pub fn save_chunk(
&self,
index: u32,
contents: Vec<u8>,
) -> StmResult<Option<SnapshotItem>, SnapshotError> {
if let Some(cd) = self.state.current_download.read()?.as_ref() {
let next_index = cd.next_index.read_clone()?;
if index != next_index {
abort(SnapshotError::UnexpectedChunk(next_index, index))
} else {
let part_path = cd.parts_dir().join(format!("{}.part", index));
// We are doing IO inside the STM transaction, but that's okay because there is no contention on the download.
match std::fs::write(part_path, contents) {
Ok(()) => {
let next_index = index + 1;
cd.next_index.write(next_index)?;
if next_index == cd.manifest.chunks {
// Verify the checksum then load the snapshot and remove the current download from memory.
match manifest::parts_checksum(cd.parts_dir()) {
Ok(checksum) => {
if checksum == cd.manifest.checksum {
let item = SnapshotItem::new(
cd.download_dir.path().into(),
cd.manifest.clone(),
);
Ok(Some(item))
} else {
abort(SnapshotError::WrongChecksum(
cd.manifest.checksum,
checksum,
))
}
}
Err(e) => abort(SnapshotError::IoError(std::io::Error::new(
std::io::ErrorKind::Other,
e.to_string(),
))),
}
} else {
Ok(None)
}
}
Err(e) => {
// If we failed to save the data to disk we can return an error that will cause all snapshots to be aborted.
// There is no point trying to clear download from the state here because if we `abort` then all changes will be dropped.
abort(SnapshotError::from(e))
}
}
}
} else {
abort(SnapshotError::NoDownload)
}
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/snapshot/src/error.rs | fendermint/vm/snapshot/src/error.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use fendermint_vm_interpreter::fvm::state::snapshot::SnapshotVersion;
/// Possible errors with snapshots.
#[derive(Debug, thiserror::Error)]
pub enum SnapshotError {
#[error("incompatible snapshot version: {0}")]
IncompatibleVersion(SnapshotVersion),
#[error("IO error: {0}")]
IoError(#[from] std::io::Error),
#[error("there is no ongoing snapshot download")]
NoDownload,
#[error("unexpected chunk index; expected {0}, got {1}")]
UnexpectedChunk(u32, u32),
#[error("wrong checksum; expected {0}, got {1}")]
WrongChecksum(tendermint::Hash, tendermint::Hash),
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/snapshot/src/car/streamer.rs | fendermint/vm/snapshot/src/car/streamer.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use cid::Cid;
use futures::{AsyncRead, Future, Stream};
use std::pin::Pin;
use std::task::{Context, Poll};
use fvm_ipld_car::CarReader;
use fvm_ipld_car::Error as CarError;
type BlockStreamerItem = Result<(Cid, Vec<u8>), CarError>;
type BlockStreamerRead<R> = (CarReader<R>, Option<BlockStreamerItem>);
type BlockStreamerReadFuture<R> = Pin<Box<dyn Future<Output = BlockStreamerRead<R>> + Send>>;
enum BlockStreamerState<R> {
Idle(CarReader<R>),
Reading(BlockStreamerReadFuture<R>),
}
/// Stream the content blocks from a CAR reader.
pub struct BlockStreamer<R> {
state: Option<BlockStreamerState<R>>,
}
impl<R> BlockStreamer<R>
where
R: AsyncRead + Send + Unpin,
{
pub fn new(reader: CarReader<R>) -> Self {
Self {
state: Some(BlockStreamerState::Idle(reader)),
}
}
async fn next_block(mut reader: CarReader<R>) -> BlockStreamerRead<R> {
let res = reader.next_block().await;
let out = match res {
Err(e) => Some(Err(e)),
Ok(Some(b)) => Some(Ok((b.cid, b.data))),
Ok(None) => None,
};
(reader, out)
}
fn poll_next_block(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
mut next_block: BlockStreamerReadFuture<R>,
) -> Poll<Option<BlockStreamerItem>> {
use BlockStreamerState::*;
match next_block.as_mut().poll(cx) {
Poll::Pending => {
self.state = Some(Reading(next_block));
Poll::Pending
}
Poll::Ready((reader, out)) => {
self.state = Some(Idle(reader));
Poll::Ready(out)
}
}
}
}
impl<R> Stream for BlockStreamer<R>
where
R: AsyncRead + Send + Unpin + 'static,
{
type Item = BlockStreamerItem;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
use BlockStreamerState::*;
match self.state.take() {
None => Poll::Ready(None),
Some(Idle(reader)) => {
let next_block = Self::next_block(reader);
let next_block = Box::pin(next_block);
self.poll_next_block(cx, next_block)
}
Some(Reading(next_block)) => self.poll_next_block(cx, next_block),
}
}
}
#[cfg(test)]
mod tests {
use fendermint_vm_interpreter::fvm::bundle::bundle_path;
use futures::{AsyncRead, StreamExt};
use fvm_ipld_blockstore::MemoryBlockstore;
use fvm_ipld_car::{load_car, CarReader};
use tokio_util::compat::TokioAsyncReadCompatExt;
use super::BlockStreamer;
async fn bundle_file() -> tokio_util::compat::Compat<tokio::fs::File> {
let bundle_path = bundle_path();
tokio::fs::File::open(bundle_path).await.unwrap().compat()
}
/// Check that a CAR file can be loaded from a byte reader.
async fn check_load_car<R>(reader: R)
where
R: AsyncRead + Send + Unpin,
{
let store = MemoryBlockstore::new();
load_car(&store, reader).await.expect("failed to load CAR");
}
/// Check that a CAR file can be streamed without errors.
async fn check_block_streamer<R>(reader: R)
where
R: AsyncRead + Send + Unpin + 'static,
{
let reader = CarReader::new_unchecked(reader)
.await
.expect("failed to open CAR reader");
let streamer = BlockStreamer::new(reader);
streamer
.for_each(|r| async move {
r.expect("should be ok");
})
.await;
}
/// Sanity check that the test bundle can be loaded with the normal facilities from a file.
#[tokio::test]
async fn load_bundle_from_file() {
let bundle_file = bundle_file().await;
check_load_car(bundle_file).await;
}
#[tokio::test]
async fn block_streamer_from_file() {
let bundle_file = bundle_file().await;
check_block_streamer(bundle_file).await;
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/snapshot/src/car/chunker.rs | fendermint/vm/snapshot/src/car/chunker.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use futures::{AsyncWrite, Future};
use std::io::{Error as IoError, Result as IoResult};
use std::path::PathBuf;
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio_util::compat::TokioAsyncWriteCompatExt;
type BoxedFutureFile = Pin<Box<dyn Future<Output = IoResult<tokio::fs::File>> + Send + 'static>>;
type BoxedFile = Pin<Box<tokio_util::compat::Compat<tokio::fs::File>>>;
type StatePoll<T> = (ChunkWriterState, Poll<IoResult<T>>);
enum ChunkWriterState {
Idle,
Opening { out: BoxedFutureFile },
Open { out: BoxedFile, written: usize },
Closing { out: BoxedFile },
}
impl ChunkWriterState {
fn ok<T>(self, value: T) -> StatePoll<T> {
(self, Poll::Ready(Ok(value)))
}
fn err<T>(self, err: IoError) -> StatePoll<T> {
(self, Poll::Ready(Err(err)))
}
fn pending<T>(self) -> StatePoll<T> {
(self, Poll::Pending)
}
}
/// Write a CAR file to chunks under an output directory:
/// 1. the first chunk is assumed to be just the header and goes into its own file
/// 2. subsequent blocks are assumed to be the contents and go into files with limited size
pub struct ChunkWriter {
output_dir: PathBuf,
max_size: usize,
file_name: Box<dyn Fn(usize) -> String + Send + Sync>,
next_idx: usize,
state: ChunkWriterState,
}
impl ChunkWriter {
pub fn new<F>(output_dir: PathBuf, max_size: usize, file_name: F) -> Self
where
F: Fn(usize) -> String + Send + Sync + 'static,
{
Self {
output_dir,
max_size,
file_name: Box::new(file_name),
next_idx: 0,
state: ChunkWriterState::Idle,
}
}
/// Number of chunks created so far.
pub fn chunk_created(&self) -> usize {
self.next_idx
}
fn take_state(&mut self) -> ChunkWriterState {
let mut state = ChunkWriterState::Idle;
std::mem::swap(&mut self.state, &mut state);
state
}
/// Replace the state with a new one, returning the poll result.
fn poll_state<F, T>(self: &mut Pin<&mut Self>, f: F) -> Poll<IoResult<T>>
where
F: FnOnce(&mut Pin<&mut Self>, ChunkWriterState) -> StatePoll<T>,
{
let state = self.take_state();
let (state, poll) = f(self, state);
self.state = state;
poll
}
/// Open the file, then do something with it.
fn state_poll_open<F, T>(cx: &mut Context<'_>, mut out: BoxedFutureFile, f: F) -> StatePoll<T>
where
F: FnOnce(&mut Context<'_>, BoxedFile) -> StatePoll<T>,
{
use ChunkWriterState::*;
match out.as_mut().poll(cx) {
Poll::Pending => Opening { out }.pending(),
Poll::Ready(Err(e)) => Idle.err(e),
Poll::Ready(Ok(out)) => {
let out = Box::pin(out.compat_write());
f(cx, out)
}
}
}
/// Write to the open file.
fn state_poll_write(
cx: &mut Context<'_>,
buf: &[u8],
mut out: BoxedFile,
sofar: usize,
) -> StatePoll<usize> {
use ChunkWriterState::*;
match out.as_mut().poll_write(cx, buf) {
Poll::Pending => Open {
out,
written: sofar,
}
.pending(),
Poll::Ready(Ok(written)) => Open {
out,
written: sofar + written,
}
.ok(written),
Poll::Ready(Err(e)) => Open {
out,
written: sofar,
}
.err(e),
}
}
/// Close the file.
fn state_poll_close(cx: &mut Context<'_>, mut out: BoxedFile) -> StatePoll<()> {
use ChunkWriterState::*;
match out.as_mut().poll_close(cx) {
Poll::Pending => Closing { out }.pending(),
Poll::Ready(Err(e)) => Idle.err(e),
Poll::Ready(Ok(())) => Idle.ok(()),
}
}
/// Open the file then write to it.
fn state_poll_open_write(
cx: &mut Context<'_>,
buf: &[u8],
out: BoxedFutureFile,
) -> StatePoll<usize> {
Self::state_poll_open(cx, out, |cx, out| Self::state_poll_write(cx, buf, out, 0))
}
/// Open the next file, then increment the index.
fn next_file(&mut self) -> BoxedFutureFile {
let name = (self.file_name)(self.next_idx);
let out = self.output_dir.join(name);
self.next_idx += 1;
Box::pin(tokio::fs::File::create(out))
}
}
impl AsyncWrite for ChunkWriter {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<IoResult<usize>> {
use ChunkWriterState::*;
self.poll_state(|this, state| match state {
Idle => Self::state_poll_open_write(cx, buf, this.next_file()),
Opening { out } => Self::state_poll_open_write(cx, buf, out),
Open { out, written } => Self::state_poll_write(cx, buf, out, written),
Closing { out } => {
let (state, poll) = Self::state_poll_close(cx, out);
if poll.is_ready() {
Self::state_poll_open_write(cx, buf, this.next_file())
} else {
state.pending()
}
}
})
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<IoResult<()>> {
use ChunkWriterState::*;
self.poll_state(|this, state| match state {
Idle => state.ok(()),
Opening { out } => {
// When we just opened this file, there is nothing to flush.
Self::state_poll_open(cx, out, |_cx: &mut Context<'_>, out| {
Open { out, written: 0 }.ok(())
})
}
Open { mut out, written } => match out.as_mut().poll_flush(cx) {
Poll::Pending => Open { out, written }.pending(),
Poll::Ready(Err(e)) => Open { out, written }.err(e),
Poll::Ready(Ok(())) => {
// Close the file if either:
// a) we have written the header, or
// b) we exceeded the maximum file size.
// The flush is ensured by `fvm_ipld_car::util::ld_write` called by `CarHeader::write_stream_async` with the header.
// The file is closed here not in `poll_write` so we don't have torn writes where the varint showing the size is split from the data.
let close = this.next_idx == 1 || written >= this.max_size && this.max_size > 0;
if close {
Self::state_poll_close(cx, out)
} else {
Open { out, written }.ok(())
}
}
},
Closing { out } => Self::state_poll_close(cx, out),
})
}
fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<IoResult<()>> {
use ChunkWriterState::*;
self.poll_state(|_, state| match state {
Idle => state.ok(()),
Opening { out } => Self::state_poll_open(cx, out, Self::state_poll_close),
Open { out, .. } => Self::state_poll_close(cx, out),
Closing { out } => Self::state_poll_close(cx, out),
})
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/snapshot/src/car/mod.rs | fendermint/vm/snapshot/src/car/mod.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! CAR file chunking utilities
//!
//! See https://ipld.io/specs/transport/car/carv1/
use anyhow::{self, Context as AnyhowContext};
use futures::{future, StreamExt};
use std::path::Path;
use tokio_util::compat::TokioAsyncReadCompatExt;
use fvm_ipld_car::{CarHeader, CarReader};
use self::{chunker::ChunkWriter, streamer::BlockStreamer};
mod chunker;
mod streamer;
/// Take an existing CAR file and split it up into an output directory by creating
/// files with a limited size for each file.
///
/// The first (0th) file will be just the header, with the rest containing the "content" blocks.
///
/// Returns the number of chunks created.
pub async fn split<F>(
input_file: &Path,
output_dir: &Path,
max_size: usize,
file_name: F,
) -> anyhow::Result<usize>
where
F: Fn(usize) -> String + Send + Sync + 'static,
{
let file = tokio::fs::File::open(input_file)
.await
.with_context(|| format!("failed to open CAR file: {}", input_file.to_string_lossy()))?;
let reader: CarReader<_> = CarReader::new_unchecked(file.compat())
.await
.context("failed to open CAR reader")?;
// Create a Writer that opens new files when the maximum is reached.
let mut writer = ChunkWriter::new(output_dir.into(), max_size, file_name);
let header = CarHeader::new(reader.header.roots.clone(), reader.header.version);
let block_streamer = BlockStreamer::new(reader);
// We shouldn't see errors when reading the CAR files, as we have written them ourselves,
// but for piece of mind let's log any errors and move on.
let mut block_streamer = block_streamer.filter_map(|res| match res {
Ok(b) => future::ready(Some(b)),
Err(e) => {
// TODO: It would be better to stop if there are errors.
tracing::warn!(
error = e.to_string(),
file = input_file.to_string_lossy().to_string(),
"CAR block failure"
);
future::ready(None)
}
});
// Copy the input CAR into an output CAR.
header
.write_stream_async(&mut writer, &mut block_streamer)
.await
.context("failed to write CAR file")?;
Ok(writer.chunk_created())
}
#[cfg(test)]
mod tests {
use fendermint_vm_interpreter::fvm::bundle::bundle_path;
use tempfile::tempdir;
use super::split;
/// Load the actor bundle CAR file, split it into chunks, then restore and compare to the original.
#[tokio::test]
async fn split_bundle_car() {
let bundle_path = bundle_path();
let bundle_bytes = std::fs::read(&bundle_path).unwrap();
let tmp = tempdir().unwrap();
let target_count = 10;
let max_size = bundle_bytes.len() / target_count;
let chunks_count = split(&bundle_path, tmp.path(), max_size, |idx| idx.to_string())
.await
.expect("failed to split CAR file");
let mut chunks = std::fs::read_dir(tmp.path())
.unwrap()
.collect::<Result<Vec<_>, _>>()
.unwrap();
// There are few enough that we can get away without converting to an integer.
chunks.sort_unstable_by_key(|c| c.path().to_string_lossy().to_string());
let chunks = chunks
.into_iter()
.map(|c| {
let chunk_size = std::fs::metadata(c.path()).unwrap().len() as usize;
(c, chunk_size)
})
.collect::<Vec<_>>();
let chunks_bytes = chunks.iter().fold(Vec::new(), |mut acc, (c, _)| {
let bz = std::fs::read(c.path()).unwrap();
acc.extend(bz);
acc
});
assert_eq!(chunks_count, chunks.len());
assert!(
1 < chunks.len() && chunks.len() <= 1 + target_count,
"expected 1 header and max {} chunks, got {}",
target_count,
chunks.len()
);
assert!(chunks[0].1 < 100, "header is small");
assert_eq!(chunks_bytes.len(), bundle_bytes.len());
assert_eq!(chunks_bytes[0..100], bundle_bytes[0..100]);
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/snapshot/tests/golden.rs | fendermint/vm/snapshot/tests/golden.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
/// JSON based test so we can parse data from the disk where it's nice to be human readable.
mod json {
use fendermint_testing::golden_json;
use fendermint_vm_snapshot::SnapshotManifest;
use quickcheck::Arbitrary;
golden_json! { "manifest/json", manifest, SnapshotManifest::arbitrary }
}
/// CBOR based test to make sure we can parse data in network format and we also cover the state params.
mod cbor {
use fendermint_testing::golden_cbor;
use fendermint_vm_snapshot::SnapshotManifest;
use quickcheck::Arbitrary;
golden_cbor! { "manifest/cbor", manifest, SnapshotManifest::arbitrary }
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/core/src/lib.rs | fendermint/vm/core/src/lib.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
pub mod chainid;
mod timestamp;
pub use timestamp::Timestamp;
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/core/src/timestamp.rs | fendermint/vm/core/src/timestamp.rs | use std::time::SystemTime;
// Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use serde::{Deserialize, Serialize};
/// Unix timestamp (in seconds).
#[derive(Clone, Debug, Copy, Serialize, Deserialize, PartialEq, Eq)]
pub struct Timestamp(pub u64);
impl Timestamp {
pub fn as_secs(&self) -> i64 {
self.0 as i64
}
pub fn current() -> Self {
let d = std::time::SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.expect("duration since epoch");
Self(d.as_secs())
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/core/src/chainid.rs | fendermint/vm/core/src/chainid.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::collections::HashMap;
use std::hash::Hasher;
use fvm_shared::bigint::Integer;
use fvm_shared::chainid::ChainID;
use lazy_static::lazy_static;
use regex::Regex;
use thiserror::Error;
lazy_static! {
/// Well known Filecoin chain IDs.
///
/// See all EVM chain IDs at this repo: https://github.com/ethereum-lists/chains/pull/1567
/// For now I thought it would be enough to enumerate the Filecoin ones.
static ref KNOWN_CHAIN_IDS: HashMap<u64, &'static str> = HashMap::from([
(0, ""), // Used as a default
(314, "filecoin"),
(3141, "hyperspace"),
(31415, "wallaby"),
(3141592, "butterflynet"),
(314159, "calibnet"),
(31415926, "devnet"),
]);
/// Reverse index over the chain IDs.
static ref KNOWN_CHAIN_NAMES: HashMap<&'static str, u64> = KNOWN_CHAIN_IDS.iter().map(|(k, v)| (*v, *k)).collect();
/// Regex for capturing a single root subnet ID.
///
/// See https://github.com/consensus-shipyard/ipc-actors/pull/109
static ref ROOT_RE: Regex = Regex::new(r"^/r(0|[1-9]\d*)$").unwrap();
}
/// Maximum value that MetaMask and other Ethereum JS tools can safely handle.
///
/// See https://github.com/ethereum/EIPs/issues/2294
pub const MAX_CHAIN_ID: u64 = 4503599627370476;
#[derive(Error, Debug)]
pub enum ChainIDError {
/// The name was hashed to a numeric value of a well-known chain.
/// The chances of this are low, but if it happens, try picking a different name, if possible.
#[error("illegal name: {0} ({1})")]
IllegalName(String, u64),
}
/// Hash the name of the chain and reduce it to a number within the acceptable range.
///
/// If the name is one of the well known ones, return the ID for that name as-is.
pub fn from_str_hashed(name: &str) -> Result<ChainID, ChainIDError> {
// See if the name matches one of the well known chains.
if let Some(chain_id) = KNOWN_CHAIN_NAMES.get(name) {
return Ok(ChainID::from(*chain_id));
}
// See if the name is actually a rootnet ID like "/r123"
if let Some(chain_id) = just_root_id(name) {
return Ok(ChainID::from(chain_id));
}
let mut hasher = fnv::FnvHasher::default();
hasher.write(name.as_bytes());
let num_digest = hasher.finish();
let chain_id = num_digest.mod_floor(&MAX_CHAIN_ID);
if KNOWN_CHAIN_IDS.contains_key(&chain_id) {
Err(ChainIDError::IllegalName(name.to_owned(), chain_id))
} else {
Ok(ChainID::from(chain_id))
}
}
/// Anything that has a [`ChainID`].
pub trait HasChainID {
fn chain_id(&self) -> ChainID;
}
/// Extract the root chain ID _iff_ the name is in the format of "/r<chain-id>".
fn just_root_id(name: &str) -> Option<u64> {
ROOT_RE.captures_iter(name).next().and_then(|cap| {
let chain_id = &cap[1];
chain_id.parse::<u64>().ok()
})
}
#[cfg(test)]
mod tests {
use fvm_shared::chainid::ChainID;
use quickcheck_macros::quickcheck;
use crate::chainid::{just_root_id, KNOWN_CHAIN_NAMES};
use super::{from_str_hashed, MAX_CHAIN_ID};
#[quickcheck]
fn prop_chain_id_stable(name: String) -> bool {
if let Ok(id1) = from_str_hashed(&name) {
let id2 = from_str_hashed(&name).unwrap();
return id1 == id2;
}
true
}
#[quickcheck]
fn prop_chain_id_safe(name: String) -> bool {
if let Ok(id) = from_str_hashed(&name) {
let chain_id: u64 = id.into();
return chain_id <= MAX_CHAIN_ID;
}
true
}
#[test]
fn chain_id_ok() -> Result<(), String> {
for name in ["test", "/root/foo/bar"] {
if let Err(e) = from_str_hashed(name) {
return Err(format!("failed: {name} - {e}"));
}
}
Ok(())
}
#[test]
fn chain_id_different() {
let id1 = from_str_hashed("foo").unwrap();
let id2 = from_str_hashed("bar").unwrap();
assert_ne!(id1, id2)
}
#[test]
fn chain_id_of_empty_is_zero() {
assert_eq!(from_str_hashed("").unwrap(), ChainID::from(0))
}
#[test]
fn chain_id_of_known() {
for (name, id) in KNOWN_CHAIN_NAMES.iter() {
assert_eq!(from_str_hashed(name).unwrap(), ChainID::from(*id))
}
}
#[test]
fn chain_id_examples() {
for (name, id) in [
("/r123/f0456/f0789", 3911219601699869),
("/foo/bar", 2313053391103756),
] {
assert_eq!(u64::from(from_str_hashed(name).unwrap()), id);
}
}
#[test]
fn just_root_id_some() {
assert_eq!(just_root_id("/r0"), Some(0));
assert_eq!(just_root_id("/r123"), Some(123));
for (_, id) in KNOWN_CHAIN_NAMES.iter() {
assert_eq!(
from_str_hashed(&format!("/r{id}")).unwrap(),
ChainID::from(*id)
)
}
}
#[test]
fn just_root_id_none() {
for name in [
"",
"/",
"/r",
"/r01",
"/r1234567890123456789012345678901234567890",
"123",
"abc",
"/r123/f456",
] {
assert!(just_root_id(name).is_none());
}
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/interpreter/src/bytes.rs | fendermint/vm/interpreter/src/bytes.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use anyhow::{anyhow, Context};
use async_trait::async_trait;
use cid::Cid;
use fendermint_vm_genesis::Genesis;
use fendermint_vm_message::chain::ChainMessage;
use fvm_ipld_encoding::Error as IpldError;
use crate::{
chain::{ChainMessageApplyRet, ChainMessageCheckRes},
fvm::{FvmQuery, FvmQueryRet},
CheckInterpreter, ExecInterpreter, ExtendVoteInterpreter, GenesisInterpreter,
ProposalInterpreter, QueryInterpreter,
};
pub type BytesMessageApplyRes = Result<ChainMessageApplyRet, IpldError>;
pub type BytesMessageCheckRes = Result<ChainMessageCheckRes, IpldError>;
pub type BytesMessageQueryRes = Result<FvmQueryRet, IpldError>;
/// Close to what the ABCI sends: (Path, Bytes).
pub type BytesMessageQuery = (String, Vec<u8>);
/// Behavour of proposal preparation. It's an optimisation to cut down needless serialization
/// when we know we aren't doing anything with the messages.
#[derive(Debug, Default, Clone)]
pub enum ProposalPrepareMode {
/// Deserialize all messages and pass them to the inner interpreter.
#[default]
PassThrough,
/// Does not pass messages to the inner interpreter, only appends what is returned from it.
AppendOnly,
/// Does not pass messages to the inner interpreter, only prepends what is returned from it.
PrependOnly,
}
/// Interpreter working on raw bytes.
#[derive(Clone)]
pub struct BytesMessageInterpreter<I> {
inner: I,
/// Should we parse and pass on all messages during prepare.
prepare_mode: ProposalPrepareMode,
/// Should we reject proposals with transactions we cannot parse.
reject_malformed_proposal: bool,
/// Maximum number of messages to allow in a block.
max_msgs: usize,
}
impl<I> BytesMessageInterpreter<I> {
pub fn new(
inner: I,
prepare_mode: ProposalPrepareMode,
reject_malformed_proposal: bool,
max_msgs: usize,
) -> Self {
Self {
inner,
prepare_mode,
reject_malformed_proposal,
max_msgs,
}
}
}
#[async_trait]
impl<I> ProposalInterpreter for BytesMessageInterpreter<I>
where
I: ProposalInterpreter<Message = ChainMessage>,
{
type State = I::State;
type Message = Vec<u8>;
/// Parse messages in the mempool and pass them into the inner `ChainMessage` interpreter.
async fn prepare(
&self,
state: Self::State,
msgs: Vec<Self::Message>,
) -> anyhow::Result<Vec<Self::Message>> {
// Collect the messages to pass to the inner interpreter.
let chain_msgs = match self.prepare_mode {
ProposalPrepareMode::PassThrough => {
let mut chain_msgs = Vec::new();
for msg in msgs.iter() {
match fvm_ipld_encoding::from_slice::<ChainMessage>(msg) {
Err(e) => {
// This should not happen because the `CheckInterpreter` implementation below would
// have rejected any such user transaction.
tracing::warn!(
error = e.to_string(),
"failed to decode message in mempool as ChainMessage"
);
}
Ok(msg) => chain_msgs.push(msg),
}
}
chain_msgs
}
ProposalPrepareMode::AppendOnly | ProposalPrepareMode::PrependOnly => Vec::new(),
};
let chain_msgs = self.inner.prepare(state, chain_msgs).await?;
let chain_msgs = chain_msgs
.into_iter()
.map(|msg| {
fvm_ipld_encoding::to_vec(&msg).context("failed to encode ChainMessage as IPLD")
})
.collect::<anyhow::Result<Vec<Self::Message>>>()?;
let mut all_msgs = match self.prepare_mode {
ProposalPrepareMode::PassThrough => chain_msgs,
ProposalPrepareMode::AppendOnly => [msgs, chain_msgs].concat(),
ProposalPrepareMode::PrependOnly => [chain_msgs, msgs].concat(),
};
if all_msgs.len() > self.max_msgs {
tracing::warn!(
max_msgs = self.max_msgs,
all_msgs = all_msgs.len(),
"truncating proposal"
);
all_msgs.truncate(self.max_msgs);
}
Ok(all_msgs)
}
/// Parse messages in the block, reject if unknown format. Pass the rest to the inner `ChainMessage` interpreter.
async fn process(&self, state: Self::State, msgs: Vec<Self::Message>) -> anyhow::Result<bool> {
if msgs.len() > self.max_msgs {
tracing::warn!(
block_msgs = msgs.len(),
"rejecting block: too many messages"
);
return Ok(false);
}
let mut chain_msgs = Vec::new();
for msg in msgs {
match fvm_ipld_encoding::from_slice::<ChainMessage>(&msg) {
Err(e) => {
// If we cannot parse a message, then either:
// * The proposer is Byzantine - as an attack this isn't very effective as they could just not send a proposal and cause a timeout.
// * Our or the proposer node have different versions, or contain bugs
// We can either vote for it or not:
// * If we accept, we can punish the validator during block execution, and if it turns out we had a bug, we will have a consensus failure.
// * If we accept, then the serialization error will become visible in the transaction results through RPC.
// * If we reject, the majority can still accept the block, which indicates we had the bug (that way we might even panic during delivery, since we know it got voted on),
// but a buggy transaction format that fails for everyone would cause liveness issues.
// * If we reject, then the serialization error will only be visible in the logs (and potentially earlier check_tx results).
tracing::warn!(
error = e.to_string(),
"failed to decode message in proposal as ChainMessage"
);
if self.reject_malformed_proposal {
return Ok(false);
}
}
Ok(msg) => chain_msgs.push(msg),
}
}
self.inner.process(state, chain_msgs).await
}
}
#[async_trait]
impl<I> ExecInterpreter for BytesMessageInterpreter<I>
where
I: ExecInterpreter<Message = ChainMessage, DeliverOutput = ChainMessageApplyRet>,
{
type State = I::State;
type Message = Vec<u8>;
type BeginOutput = I::BeginOutput;
type DeliverOutput = BytesMessageApplyRes;
type EndOutput = I::EndOutput;
async fn deliver(
&self,
state: Self::State,
msg: Self::Message,
) -> anyhow::Result<(Self::State, Self::DeliverOutput)> {
match fvm_ipld_encoding::from_slice::<ChainMessage>(&msg) {
Err(e) =>
// TODO: Punish the validator for including rubbish.
// There is always the possibility that our codebase is incompatible,
// but then we'll have a consensus failure later when we don't agree on the ledger.
{
if self.reject_malformed_proposal {
// We could consider panicking here, otherwise if the majority executes this transaction (they voted for it)
// then we will just get a consensu failure after the block.
tracing::warn!(
error = e.to_string(),
"failed to decode delivered message as ChainMessage; we did not vote for it, maybe our node is buggy?"
);
}
Ok((state, Err(e)))
}
Ok(msg) => {
let (state, ret) = self.inner.deliver(state, msg).await?;
Ok((state, Ok(ret)))
}
}
}
async fn begin(&self, state: Self::State) -> anyhow::Result<(Self::State, Self::BeginOutput)> {
self.inner.begin(state).await
}
async fn end(&self, state: Self::State) -> anyhow::Result<(Self::State, Self::EndOutput)> {
self.inner.end(state).await
}
}
#[async_trait]
impl<I> CheckInterpreter for BytesMessageInterpreter<I>
where
I: CheckInterpreter<Message = ChainMessage, Output = ChainMessageCheckRes>,
{
type State = I::State;
type Message = Vec<u8>;
type Output = BytesMessageCheckRes;
async fn check(
&self,
state: Self::State,
msg: Self::Message,
is_recheck: bool,
) -> anyhow::Result<(Self::State, Self::Output)> {
match fvm_ipld_encoding::from_slice::<ChainMessage>(&msg) {
Err(e) =>
// The user sent us an invalid message, all we can do is discard it and block the source.
{
Ok((state, Err(e)))
}
Ok(msg) => {
let (state, ret) = self.inner.check(state, msg, is_recheck).await?;
Ok((state, Ok(ret)))
}
}
}
}
#[async_trait]
impl<I> QueryInterpreter for BytesMessageInterpreter<I>
where
I: QueryInterpreter<Query = FvmQuery, Output = FvmQueryRet>,
{
type State = I::State;
type Query = BytesMessageQuery;
type Output = BytesMessageQueryRes;
async fn query(
&self,
state: Self::State,
qry: Self::Query,
) -> anyhow::Result<(Self::State, Self::Output)> {
let (path, bz) = qry;
let qry = if path.as_str() == "/store" {
// According to the docstrings, the application MUST interpret `/store` as a query on the underlying KV store.
match fvm_ipld_encoding::from_slice::<Cid>(&bz) {
Err(e) => return Ok((state, Err(e))),
Ok(cid) => FvmQuery::Ipld(cid),
}
} else {
// Otherwise ignore the path for now. The docs also say that the query bytes can be used in lieu of the path,
// so it's okay to have two ways to send IPLD queries: either by using the `/store` path and sending a CID,
// or by sending the appropriate `FvmQuery`.
match fvm_ipld_encoding::from_slice::<FvmQuery>(&bz) {
Err(e) => return Ok((state, Err(e))),
Ok(qry) => qry,
}
};
let (state, ret) = self.inner.query(state, qry).await?;
Ok((state, Ok(ret)))
}
}
#[async_trait]
impl<I> GenesisInterpreter for BytesMessageInterpreter<I>
where
I: GenesisInterpreter<Genesis = Genesis>,
{
type State = I::State;
type Genesis = Vec<u8>;
type Output = I::Output;
async fn init(
&self,
state: Self::State,
genesis: Self::Genesis,
) -> anyhow::Result<(Self::State, Self::Output)> {
// TODO (IPC-44): Handle the serialized application state as well as `Genesis`.
let genesis: Genesis = parse_genesis(&genesis)?;
self.inner.init(state, genesis).await
}
}
#[async_trait]
impl<I> ExtendVoteInterpreter for BytesMessageInterpreter<I>
where
I: ExtendVoteInterpreter,
{
type State = I::State;
type ExtendMessage = I::ExtendMessage;
type VerifyMessage = I::VerifyMessage;
type ExtendOutput = I::ExtendOutput;
type VerifyOutput = I::VerifyOutput;
async fn extend_vote(
&self,
state: Self::State,
msg: Self::ExtendMessage,
) -> anyhow::Result<Self::ExtendOutput> {
self.inner.extend_vote(state, msg).await
}
async fn verify_vote_extension(
&self,
state: Self::State,
msg: Self::VerifyMessage,
) -> anyhow::Result<(Self::State, Self::VerifyOutput)> {
self.inner.verify_vote_extension(state, msg).await
}
}
/// Parse the initial genesis either as JSON or CBOR.
fn parse_genesis(bytes: &[u8]) -> anyhow::Result<Genesis> {
try_parse_genesis_json(bytes).or_else(|e1| {
try_parse_genesis_cbor(bytes)
.map_err(|e2| anyhow!("failed to deserialize genesis as JSON or CBOR: {e1}; {e2}"))
})
}
fn try_parse_genesis_json(bytes: &[u8]) -> anyhow::Result<Genesis> {
let json = String::from_utf8(bytes.to_vec())?;
let genesis = serde_json::from_str(&json)?;
Ok(genesis)
}
fn try_parse_genesis_cbor(bytes: &[u8]) -> anyhow::Result<Genesis> {
let genesis = fvm_ipld_encoding::from_slice(bytes)?;
Ok(genesis)
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/interpreter/src/lib.rs | fendermint/vm/interpreter/src/lib.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use async_trait::async_trait;
pub mod bytes;
pub mod chain;
pub mod fvm;
pub mod signed;
#[cfg(feature = "arb")]
mod arb;
/// Initialize the chain state.
///
/// This could be from the original genesis file, or perhaps a checkpointed snapshot.
#[async_trait]
pub trait GenesisInterpreter: Sync + Send {
type State: Send;
type Genesis: Send;
type Output;
/// Initialize the chain.
async fn init(
&self,
state: Self::State,
genesis: Self::Genesis,
) -> anyhow::Result<(Self::State, Self::Output)>;
}
/// Sign tags
#[async_trait]
pub trait ExtendVoteInterpreter: Sync + Send {
type State: Send;
type ExtendMessage: Send;
type VerifyMessage: Send;
type ExtendOutput;
type VerifyOutput;
/// Sign the vote.
async fn extend_vote(
&self,
state: Self::State,
msg: Self::ExtendMessage,
) -> anyhow::Result<Self::ExtendOutput>;
async fn verify_vote_extension(
&self,
state: Self::State,
msg: Self::VerifyMessage,
) -> anyhow::Result<(Self::State, Self::VerifyOutput)>;
}
/// Prepare and process transaction proposals.
#[async_trait]
pub trait ProposalInterpreter: Sync + Send {
/// State reflects the circumstances under which transactions were proposed, e.g. block height,
/// but also any application specific mempool, for example one we can use to resolve CIDs
/// in the background.
///
/// State is considered read-only, since the proposal might not go through. It should only be
/// modified by the delivery of transactions in a finalized bloc; for example that is where
/// we would clear out data from our mempool.
type State: Send;
type Message: Send;
/// Called when the current validator is about to propose a block.
///
/// This is our chance to inject other transactions from our own mempool which we are now able to execute.
async fn prepare(
&self,
state: Self::State,
msgs: Vec<Self::Message>,
) -> anyhow::Result<Vec<Self::Message>>;
/// Called when the current validator needs to decide whether to vote for a block.
///
/// This is our chance check whether CIDs proposed for execution are available.
///
/// Return `true` if we can accept this block, `false` to reject it.
async fn process(&self, state: Self::State, msgs: Vec<Self::Message>) -> anyhow::Result<bool>;
}
/// The `ExecInterpreter` applies messages on some state, which is
/// tied to the lifecycle of a block in the ABCI.
///
/// By making it generic, the intention is that interpreters can
/// be stacked, changing the type of message along the way. For
/// example on the outermost layer the input message can be a mix
/// of self-contained messages and CIDs proposed for resolution
/// or execution, while in the innermost layer it's all self-contained.
/// Some interpreters would act like middlewares to resolve CIDs into
/// a concrete message.
///
/// The execution is asynchronous, so that the middleware is allowed
/// to potentially interact with the outside world. If this was restricted
/// to things like scheduling a CID resolution, we could use effects
/// returned from message processing. However, when a node is catching
/// up with the chain others have already committed, they have to do the
/// message resolution synchronously, so it has to be done during
/// message processing. Alternatively we'd have to split the processing
/// into async steps to pre-process the message, then synchronous steps
/// to update the state. But this approach is more flexible, because
/// the middlewares can decide on a message-by-message basis whether
/// to forward the message to the inner layer. Unfortunately block-level
/// pre-processing is not possible, because we are fed the messages
/// one by one through the ABCI.
///
/// There is no separate type for `Error`, only `Output`. The reason
/// is that we'll be calling high level executors internally that
/// already have their internal error handling, returning all domain
/// errors such as `OutOfGas` in their output, and only using the
/// error case for things that are independent of the message itself,
/// signalling unexpected problems there's no recovering from and
/// that should stop the block processing altogether.
#[async_trait]
pub trait ExecInterpreter: Sync + Send {
type State: Send;
type Message: Send;
type BeginOutput;
type DeliverOutput;
type EndOutput;
/// Called once at the beginning of a block.
///
/// This is our chance to to run `cron` jobs for example.
async fn begin(&self, state: Self::State) -> anyhow::Result<(Self::State, Self::BeginOutput)>;
/// Apply a message onto the state.
///
/// The state is taken by value, so there's no issue with sharing
/// mutable references in futures. The modified value should be
/// returned along with the return value.
///
/// Only return an error case if something truly unexpected happens
/// that should stop message processing altogether; otherwise use
/// the output for signalling all execution results.
async fn deliver(
&self,
state: Self::State,
msg: Self::Message,
) -> anyhow::Result<(Self::State, Self::DeliverOutput)>;
/// Called once at the end of a block.
///
/// This is where we can apply end-of-epoch processing, for example to process staking
/// requests once every 1000 blocks.
async fn end(&self, state: Self::State) -> anyhow::Result<(Self::State, Self::EndOutput)>;
}
/// Check if messages can be added to the mempool by performing certain validation
/// over a projected version of the state. Does not execute transactions fully,
/// just does basic validation. The state is updated so that things like nonces
/// and balances are adjusted as if the transaction was executed. This way an
/// account can send multiple messages in a row, not just the next that follows
/// its current nonce.
#[async_trait]
pub trait CheckInterpreter: Sync + Send {
type State: Send;
type Message: Send;
type Output;
/// Called when a new user transaction is being added to the mempool.
///
/// Returns the updated state, and the check output, which should be
/// able to describe both the success and failure cases.
///
/// The recheck flags indicates that we are checking the transaction
/// again because we have seen a new block and the state changed.
/// As an optimisation, checks that do not depend on state can be skipped.
async fn check(
&self,
state: Self::State,
msg: Self::Message,
is_recheck: bool,
) -> anyhow::Result<(Self::State, Self::Output)>;
}
/// Run a query over the ledger.
#[async_trait]
pub trait QueryInterpreter: Sync + Send {
type State: Send;
type Query: Send;
type Output;
/// Run a single query against the state.
///
/// It takes and returns the state in case we wanted to do some caching of
/// things which otherwise aren't safe to send over async boundaries.
async fn query(
&self,
state: Self::State,
qry: Self::Query,
) -> anyhow::Result<(Self::State, Self::Output)>;
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/interpreter/src/chain.rs | fendermint/vm/interpreter/src/chain.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use crate::fvm::state::ipc::GatewayCaller;
use crate::fvm::{topdown, FvmApplyRet, PowerUpdates};
use crate::ExtendVoteInterpreter;
use crate::{
fvm::state::FvmExecState,
fvm::FvmMessage,
signed::{SignedMessageApplyRes, SignedMessageCheckRes, SyntheticMessage, VerifiableMessage},
CheckInterpreter, ExecInterpreter, GenesisInterpreter, ProposalInterpreter, QueryInterpreter,
};
use anyhow::{bail, Context};
use async_stm::atomically;
use async_trait::async_trait;
use bls_signatures::Serialize;
use fendermint_actor_cetf::BlsSignature;
use fendermint_tracing::emit;
use fendermint_vm_actor_interface::cetf::CETFSYSCALL_ACTOR_ADDR;
use fendermint_vm_actor_interface::ipc;
use fendermint_vm_actor_interface::system::SYSTEM_ACTOR_ADDR;
use fendermint_vm_event::ParentFinalityMissingQuorum;
use fendermint_vm_message::cetf::CetfMessage;
use fendermint_vm_message::ipc::ParentFinality;
use fendermint_vm_message::{
chain::ChainMessage,
ipc::{BottomUpCheckpoint, CertifiedMessage, IpcMessage, SignedRelayedMessage},
};
use fendermint_vm_resolver::pool::{ResolveKey, ResolvePool};
use fendermint_vm_topdown::proxy::IPCProviderProxy;
use fendermint_vm_topdown::voting::{ValidatorKey, VoteTally};
use fendermint_vm_topdown::{
CachedFinalityProvider, IPCParentFinality, ParentFinalityProvider, ParentViewProvider, Toggle,
};
use fvm_ipld_blockstore::Blockstore;
use fvm_ipld_encoding::RawBytes;
use fvm_shared::clock::ChainEpoch;
use fvm_shared::econ::TokenAmount;
use fvm_shared::BLOCK_GAS_LIMIT;
use num_traits::Zero;
use std::sync::Arc;
/// A resolution pool for bottom-up and top-down checkpoints.
pub type CheckpointPool = ResolvePool<CheckpointPoolItem>;
pub type TopDownFinalityProvider = Arc<Toggle<CachedFinalityProvider<IPCProviderProxy>>>;
/// These are the extra state items that the chain interpreter needs,
/// a sort of "environment" supporting IPC.
#[derive(Clone)]
pub struct ChainEnv {
/// CID resolution pool.
pub checkpoint_pool: CheckpointPool,
/// The parent finality provider for top down checkpoint
pub parent_finality_provider: TopDownFinalityProvider,
pub parent_finality_votes: VoteTally,
}
#[derive(Clone, Hash, PartialEq, Eq)]
pub enum CheckpointPoolItem {
/// BottomUp checkpoints to be resolved from the originating subnet or the current one.
BottomUp(CertifiedMessage<BottomUpCheckpoint>),
// We can extend this to include top-down checkpoints as well, with slightly
// different resolution semantics (resolving it from a trusted parent, and
// awaiting finality before declaring it available).
}
impl From<&CheckpointPoolItem> for ResolveKey {
fn from(value: &CheckpointPoolItem) -> Self {
match value {
CheckpointPoolItem::BottomUp(cp) => {
(cp.message.subnet_id.clone(), cp.message.bottom_up_messages)
}
}
}
}
/// A user sent a transaction which they are not allowed to do.
pub struct IllegalMessage;
// For now this is the only option, later we can expand.
pub enum ChainMessageApplyRet {
Signed(SignedMessageApplyRes),
/// The IPC chain message execution result
Ipc(FvmApplyRet),
}
/// We only allow signed messages into the mempool.
pub type ChainMessageCheckRes = Result<SignedMessageCheckRes, IllegalMessage>;
/// Interpreter working on chain messages; in the future it will schedule
/// CID lookups to turn references into self-contained user or cross messages.
#[derive(Clone)]
pub struct ChainMessageInterpreter<I, DB> {
inner: I,
gateway_caller: GatewayCaller<DB>,
}
impl<I, DB> ChainMessageInterpreter<I, DB> {
pub fn new(inner: I) -> Self {
Self {
inner,
gateway_caller: GatewayCaller::default(),
}
}
}
#[async_trait]
impl<I, DB> ProposalInterpreter for ChainMessageInterpreter<I, DB>
where
DB: Blockstore + Clone + 'static + Send + Sync,
I: Sync + Send,
{
type State = ChainEnv;
type Message = ChainMessage;
/// Check whether there are any "ready" messages in the IPLD resolution mempool which can be appended to the proposal.
///
/// We could also use this to select the most profitable user transactions, within the gas limit. We can also take into
/// account the transactions which are part of top-down or bottom-up checkpoints, to stay within gas limits.
async fn prepare(
&self,
state: Self::State,
mut msgs: Vec<Self::Message>,
) -> anyhow::Result<Vec<Self::Message>> {
// Collect resolved CIDs ready to be proposed from the pool.
let ckpts = atomically(|| state.checkpoint_pool.collect_resolved()).await;
// Create transactions ready to be included on the chain.
let ckpts = ckpts.into_iter().map(|ckpt| match ckpt {
CheckpointPoolItem::BottomUp(ckpt) => ChainMessage::Ipc(IpcMessage::BottomUpExec(ckpt)),
});
// Prepare top down proposals.
// Before we try to find a quorum, pause incoming votes. This is optional but if there are lots of votes coming in it might hold up proposals.
atomically(|| state.parent_finality_votes.pause_votes_until_find_quorum()).await;
// The pre-requisite for proposal is that there is a quorum of gossiped votes at that height.
// The final proposal can be at most as high as the quorum, but can be less if we have already,
// hit some limits such as how many blocks we can propose in a single step.
let finalities = atomically(|| {
let parent = state.parent_finality_provider.next_proposal()?;
let quorum = state
.parent_finality_votes
.find_quorum()?
.map(|(height, block_hash)| IPCParentFinality { height, block_hash });
Ok((parent, quorum))
})
.await;
let maybe_finality = match finalities {
(Some(parent), Some(quorum)) => Some(if parent.height <= quorum.height {
parent
} else {
quorum
}),
(Some(parent), None) => {
emit!(
DEBUG,
ParentFinalityMissingQuorum {
block_height: parent.height,
block_hash: &hex::encode(&parent.block_hash),
}
);
None
}
(None, _) => {
// This is normal, the parent probably hasn't produced a block yet.
None
}
};
if let Some(finality) = maybe_finality {
msgs.push(ChainMessage::Ipc(IpcMessage::TopDownExec(ParentFinality {
height: finality.height as ChainEpoch,
block_hash: finality.block_hash,
})))
}
// Append at the end - if we run out of block space, these are going to be reproposed in the next block.
msgs.extend(ckpts);
Ok(msgs)
}
/// Perform finality checks on top-down transactions and availability checks on bottom-up transactions.
async fn process(&self, env: Self::State, msgs: Vec<Self::Message>) -> anyhow::Result<bool> {
for msg in msgs {
match msg {
ChainMessage::Ipc(IpcMessage::BottomUpExec(msg)) => {
let item = CheckpointPoolItem::BottomUp(msg);
// We can just look in memory because when we start the application, we should retrieve any
// pending checkpoints (relayed but not executed) from the ledger, so they should be there.
// We don't have to validate the checkpoint here, because
// 1) we validated it when it was relayed, and
// 2) if a validator proposes something invalid, we can make them pay during execution.
let is_resolved =
atomically(|| match env.checkpoint_pool.get_status(&item)? {
None => Ok(false),
Some(status) => status.is_resolved(),
})
.await;
if !is_resolved {
return Ok(false);
}
}
ChainMessage::Ipc(IpcMessage::TopDownExec(ParentFinality {
height,
block_hash,
})) => {
let prop = IPCParentFinality {
height: height as u64,
block_hash,
};
let is_final =
atomically(|| env.parent_finality_provider.check_proposal(&prop)).await;
if !is_final {
return Ok(false);
}
}
_ => {}
};
}
Ok(true)
}
}
#[async_trait]
impl<I, DB> ExecInterpreter for ChainMessageInterpreter<I, DB>
where
DB: Blockstore + Clone + 'static + Send + Sync + Clone,
I: ExecInterpreter<
Message = VerifiableMessage,
DeliverOutput = SignedMessageApplyRes,
State = FvmExecState<DB>,
EndOutput = PowerUpdates,
>,
{
// The state consists of the resolver pool, which this interpreter needs, and the rest of the
// state which the inner interpreter uses. This is a technical solution because the pool doesn't
// fit with the state we use for execution messages further down the stack, which depend on block
// height and are used in queries as well.
type State = (ChainEnv, I::State);
type Message = ChainMessage;
type BeginOutput = I::BeginOutput;
type DeliverOutput = ChainMessageApplyRet;
type EndOutput = I::EndOutput;
async fn deliver(
&self,
(env, mut state): Self::State,
msg: Self::Message,
) -> anyhow::Result<(Self::State, Self::DeliverOutput)> {
match msg {
ChainMessage::Cetf(msg) => match msg {
CetfMessage::CetfTag(height, sig) => {
let msg = cetf_tag_msg_to_fvm(&(height, sig))
.context("failed to syntesize FVM message")?;
let (state, ret) = self
.inner
.deliver(state, VerifiableMessage::NotVerify(msg))
.await
.context("failed to check cetf tag")?;
Ok(((env, state), ChainMessageApplyRet::Signed(ret)))
}
},
ChainMessage::Signed(msg) => {
let (state, ret) = self
.inner
.deliver(state, VerifiableMessage::Signed(msg))
.await?;
Ok(((env, state), ChainMessageApplyRet::Signed(ret)))
}
ChainMessage::Ipc(msg) => match msg {
IpcMessage::BottomUpResolve(msg) => {
let smsg = relayed_bottom_up_ckpt_to_fvm(&msg)
.context("failed to syntesize FVM message")?;
// Let the FVM validate the checkpoint quorum certificate and take note of the relayer for rewards.
let (state, ret) = self
.inner
.deliver(state, VerifiableMessage::Synthetic(smsg))
.await
.context("failed to deliver bottom up checkpoint")?;
// If successful, add the CID to the background resolution pool.
let is_success = match ret {
Ok(ref ret) => ret.fvm.apply_ret.msg_receipt.exit_code.is_success(),
Err(_) => false,
};
if is_success {
// For now try to get it from the child subnet. If the same comes up for execution, include own.
atomically(|| {
env.checkpoint_pool.add(
CheckpointPoolItem::BottomUp(msg.message.message.clone()),
false,
)
})
.await;
}
// We can use the same result type for now, it's isomorphic.
Ok(((env, state), ChainMessageApplyRet::Signed(ret)))
}
IpcMessage::BottomUpExec(_) => {
todo!("#197: implement BottomUp checkpoint execution")
}
IpcMessage::TopDownExec(p) => {
if !env.parent_finality_provider.is_enabled() {
bail!("cannot execute IPC top-down message: parent provider disabled");
}
// commit parent finality first
let finality = IPCParentFinality::new(p.height, p.block_hash);
tracing::debug!(
finality = finality.to_string(),
"chain interpreter received topdown exec proposal",
);
let (prev_height, prev_finality) = topdown::commit_finality(
&self.gateway_caller,
&mut state,
finality.clone(),
&env.parent_finality_provider,
)
.await
.context("failed to commit finality")?;
tracing::debug!(
previous_committed_height = prev_height,
previous_committed_finality = prev_finality
.as_ref()
.map(|f| format!("{f}"))
.unwrap_or_else(|| String::from("None")),
"chain interpreter committed topdown finality",
);
// The commitment of the finality for block `N` triggers
// the execution of all side-effects up till `N-1`, as for
// deferred execution chains, this is the latest state that
// we know for sure that we have available.
let execution_fr = prev_height;
let execution_to = finality.height - 1;
// error happens if we cannot get the validator set from ipc agent after retries
let validator_changes = env
.parent_finality_provider
.validator_changes_from(execution_fr, execution_to)
.await
.context("failed to fetch validator changes")?;
tracing::debug!(
from = execution_fr,
to = execution_to,
msgs = validator_changes.len(),
"chain interpreter received total validator changes"
);
self.gateway_caller
.store_validator_changes(&mut state, validator_changes)
.context("failed to store validator changes")?;
// error happens if we cannot get the cross messages from ipc agent after retries
let msgs = env
.parent_finality_provider
.top_down_msgs_from(execution_fr, execution_to)
.await
.context("failed to fetch top down messages")?;
tracing::debug!(
number_of_messages = msgs.len(),
start = execution_fr,
end = execution_to,
"chain interpreter received topdown msgs",
);
let ret = topdown::execute_topdown_msgs(&self.gateway_caller, &mut state, msgs)
.await
.context("failed to execute top down messages")?;
tracing::debug!("chain interpreter applied topdown msgs");
atomically(|| {
env.parent_finality_provider
.set_new_finality(finality.clone(), prev_finality.clone())?;
env.parent_finality_votes
.set_finalized(finality.height, finality.block_hash.clone())?;
Ok(())
})
.await;
tracing::debug!(
finality = finality.to_string(),
"chain interpreter has set new"
);
Ok(((env, state), ChainMessageApplyRet::Ipc(ret)))
}
},
}
}
async fn begin(
&self,
(env, state): Self::State,
) -> anyhow::Result<(Self::State, Self::BeginOutput)> {
let (state, out) = self.inner.begin(state).await?;
Ok(((env, state), out))
}
async fn end(
&self,
(env, state): Self::State,
) -> anyhow::Result<(Self::State, Self::EndOutput)> {
let (state, out) = self.inner.end(state).await?;
// Update any component that needs to know about changes in the power table.
if !out.0.is_empty() {
let power_updates = out
.0
.iter()
.map(|v| {
let vk = ValidatorKey::from(v.public_key.0);
let w = v.power.0;
(vk, w)
})
.collect::<Vec<_>>();
atomically(|| {
env.parent_finality_votes
.update_power_table(power_updates.clone())
})
.await;
}
Ok(((env, state), out))
}
}
#[async_trait]
impl<I, DB> CheckInterpreter for ChainMessageInterpreter<I, DB>
where
DB: Blockstore + Clone + 'static + Send + Sync,
I: CheckInterpreter<Message = VerifiableMessage, Output = SignedMessageCheckRes>,
{
type State = I::State;
type Message = ChainMessage;
type Output = ChainMessageCheckRes;
async fn check(
&self,
state: Self::State,
msg: Self::Message,
is_recheck: bool,
) -> anyhow::Result<(Self::State, Self::Output)> {
match msg {
ChainMessage::Signed(msg) => {
let (state, ret) = self
.inner
.check(state, VerifiableMessage::Signed(msg), is_recheck)
.await?;
Ok((state, Ok(ret)))
}
ChainMessage::Ipc(msg) => {
match msg {
IpcMessage::BottomUpResolve(msg) => {
let msg = relayed_bottom_up_ckpt_to_fvm(&msg)
.context("failed to syntesize FVM message")?;
let (state, ret) = self
.inner
.check(state, VerifiableMessage::Synthetic(msg), is_recheck)
.await
.context("failed to check bottom up resolve")?;
Ok((state, Ok(ret)))
}
IpcMessage::TopDownExec(_) | IpcMessage::BottomUpExec(_) => {
// Users cannot send these messages, only validators can propose them in blocks.
Ok((state, Err(IllegalMessage)))
}
}
}
ChainMessage::Cetf(msg) => match msg {
CetfMessage::CetfTag(height, sig) => {
let msg = cetf_tag_msg_to_fvm(&(height, sig))
.context("failed to syntesize FVM message")?;
let (state, ret) = self
.inner
.check(state, VerifiableMessage::NotVerify(msg), is_recheck)
.await
.context("failed to check cetf tag")?;
Ok((state, Ok(ret)))
}
},
}
}
}
#[async_trait]
impl<I, DB> QueryInterpreter for ChainMessageInterpreter<I, DB>
where
DB: Blockstore + Clone + 'static + Send + Sync,
I: QueryInterpreter,
{
type State = I::State;
type Query = I::Query;
type Output = I::Output;
async fn query(
&self,
state: Self::State,
qry: Self::Query,
) -> anyhow::Result<(Self::State, Self::Output)> {
self.inner.query(state, qry).await
}
}
#[async_trait]
impl<I, DB> GenesisInterpreter for ChainMessageInterpreter<I, DB>
where
DB: Blockstore + Clone + 'static + Send + Sync,
I: GenesisInterpreter,
{
type State = I::State;
type Genesis = I::Genesis;
type Output = I::Output;
async fn init(
&self,
state: Self::State,
genesis: Self::Genesis,
) -> anyhow::Result<(Self::State, Self::Output)> {
self.inner.init(state, genesis).await
}
}
#[async_trait]
impl<I, DB> ExtendVoteInterpreter for ChainMessageInterpreter<I, DB>
where
DB: Blockstore + Clone + 'static + Send + Sync,
I: ExtendVoteInterpreter,
{
type State = I::State;
type ExtendMessage = I::ExtendMessage;
type VerifyMessage = I::VerifyMessage;
type ExtendOutput = I::ExtendOutput;
type VerifyOutput = I::VerifyOutput;
async fn extend_vote(
&self,
state: Self::State,
msg: Self::ExtendMessage,
) -> anyhow::Result<Self::ExtendOutput> {
self.inner.extend_vote(state, msg).await
}
async fn verify_vote_extension(
&self,
state: Self::State,
msg: Self::VerifyMessage,
) -> anyhow::Result<(Self::State, Self::VerifyOutput)> {
self.inner.verify_vote_extension(state, msg).await
}
}
/// Convert a signed relayed bottom-up checkpoint to a syntetic message we can send to the FVM.
///
/// By mapping to an FVM message we invoke the right contract to validate the checkpoint,
/// and automatically charge the relayer gas for the execution of the check, but not the
/// execution of the cross-messages, which aren't part of the payload.
fn relayed_bottom_up_ckpt_to_fvm(
relayed: &SignedRelayedMessage<CertifiedMessage<BottomUpCheckpoint>>,
) -> anyhow::Result<SyntheticMessage> {
// TODO #192: Convert the checkpoint to what the actor expects.
let params = RawBytes::default();
let msg = FvmMessage {
version: 0,
from: relayed.message.relayer,
to: ipc::GATEWAY_ACTOR_ADDR,
sequence: relayed.message.sequence,
value: TokenAmount::zero(),
method_num: ipc::gateway::METHOD_INVOKE_CONTRACT,
params,
gas_limit: relayed.message.gas_limit,
gas_fee_cap: relayed.message.gas_fee_cap.clone(),
gas_premium: relayed.message.gas_premium.clone(),
};
let msg = SyntheticMessage::new(msg, &relayed.message, relayed.signature.clone())
.context("failed to create syntetic message")?;
Ok(msg)
}
pub fn cetf_tag_msg_to_chainmessage(
tag_msg: &(u64, bls_signatures::Signature),
) -> anyhow::Result<ChainMessage> {
let tag = tag_msg.0;
let sig = tag_msg.1.as_bytes();
let sig = BlsSignature(sig.try_into().unwrap());
Ok(ChainMessage::Cetf(CetfMessage::CetfTag(tag, sig)))
}
fn cetf_tag_msg_to_fvm(tag_msg: &(u64, BlsSignature)) -> anyhow::Result<FvmMessage> {
let params = RawBytes::serialize(&fendermint_actor_cetf::AddSignedTagParams {
height: tag_msg.0,
signature: tag_msg.1.clone(),
})?;
let msg = FvmMessage {
from: SYSTEM_ACTOR_ADDR,
to: CETFSYSCALL_ACTOR_ADDR,
sequence: tag_msg.0,
gas_limit: BLOCK_GAS_LIMIT * 10000,
method_num: fendermint_actor_cetf::Method::AddSignedTag as u64,
params,
value: Default::default(),
version: Default::default(),
gas_fee_cap: Default::default(),
gas_premium: Default::default(),
};
Ok(msg)
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/interpreter/src/signed.rs | fendermint/vm/interpreter/src/signed.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use anyhow::{anyhow, Context};
use async_trait::async_trait;
use fendermint_vm_core::chainid::HasChainID;
use fendermint_vm_message::{
query::FvmQuery,
signed::{chain_id_bytes, DomainHash, SignedMessage, SignedMessageError},
};
use fvm_ipld_encoding::Error as IpldError;
use fvm_shared::{chainid::ChainID, crypto::signature::Signature};
use serde::Serialize;
use crate::{
fvm::{FvmApplyRet, FvmCheckRet, FvmMessage},
CheckInterpreter, ExecInterpreter, ExtendVoteInterpreter, GenesisInterpreter, QueryInterpreter,
};
/// Message validation failed due to an invalid signature.
pub struct InvalidSignature(pub String);
pub struct SignedMessageApplyRet {
pub fvm: FvmApplyRet,
pub domain_hash: Option<DomainHash>,
}
pub type SignedMessageApplyRes = Result<SignedMessageApplyRet, InvalidSignature>;
pub type SignedMessageCheckRes = Result<FvmCheckRet, InvalidSignature>;
/// Different kinds of signed messages.
///
/// This technical construct was introduced so we can have a simple linear interpreter stack
/// where everything flows through all layers, which means to pass something to the FVM we
/// have to go through the signature check.
pub enum VerifiableMessage {
/// A normal message sent by a user.
Signed(SignedMessage),
/// Something we constructed to pass on to the FVM.
Synthetic(SyntheticMessage),
/// Does not require verification
NotVerify(FvmMessage),
}
impl VerifiableMessage {
pub fn verify(&self, chain_id: &ChainID) -> Result<(), SignedMessageError> {
match self {
Self::Signed(m) => m.verify(chain_id),
Self::Synthetic(m) => m.verify(chain_id),
Self::NotVerify(_) => Ok(()),
}
}
pub fn into_message(self) -> FvmMessage {
match self {
Self::Signed(m) => m.into_message(),
Self::Synthetic(m) => m.message,
Self::NotVerify(m) => m,
}
}
pub fn domain_hash(
&self,
chain_id: &ChainID,
) -> Result<Option<DomainHash>, SignedMessageError> {
match self {
Self::Signed(m) => m.domain_hash(chain_id),
Self::Synthetic(_) => Ok(None),
Self::NotVerify(_) => Ok(None),
}
}
}
pub struct SyntheticMessage {
/// The artifical message.
message: FvmMessage,
/// The CID of the original message (assuming here that that's what was signed).
orig_cid: cid::Cid,
/// The signature over the original CID.
signature: Signature,
}
impl SyntheticMessage {
pub fn new<T: Serialize>(
message: FvmMessage,
orig: &T,
signature: Signature,
) -> Result<Self, IpldError> {
let orig_cid = fendermint_vm_message::cid(orig)?;
Ok(Self {
message,
orig_cid,
signature,
})
}
pub fn verify(&self, chain_id: &ChainID) -> Result<(), SignedMessageError> {
let mut data = self.orig_cid.to_bytes();
data.extend(chain_id_bytes(chain_id).iter());
self.signature
.verify(&data, &self.message.from)
.map_err(SignedMessageError::InvalidSignature)
}
}
/// Interpreter working on signed messages, validating their signature before sending
/// the unsigned parts on for execution.
#[derive(Clone)]
pub struct SignedMessageInterpreter<I> {
inner: I,
}
impl<I> SignedMessageInterpreter<I> {
pub fn new(inner: I) -> Self {
Self { inner }
}
}
#[async_trait]
impl<I> ExecInterpreter for SignedMessageInterpreter<I>
where
I: ExecInterpreter<Message = FvmMessage, DeliverOutput = FvmApplyRet>,
I::State: HasChainID,
{
type State = I::State;
type Message = VerifiableMessage;
type BeginOutput = I::BeginOutput;
type DeliverOutput = SignedMessageApplyRes;
type EndOutput = I::EndOutput;
async fn deliver(
&self,
state: Self::State,
msg: Self::Message,
) -> anyhow::Result<(Self::State, Self::DeliverOutput)> {
// Doing these first, so the compiler doesn't need `Send` bound, which it would if the
// async call to `inner.deliver` would be inside a match holding a reference to `state`.
let chain_id = state.chain_id();
match msg.verify(&chain_id) {
Err(SignedMessageError::Ipld(e)) => Err(anyhow!(e)),
Err(SignedMessageError::Ethereum(e)) => {
Ok((state, Err(InvalidSignature(e.to_string()))))
}
Err(SignedMessageError::InvalidSignature(s)) => {
// TODO: We can penalize the validator for including an invalid signature.
Ok((state, Err(InvalidSignature(s))))
}
Ok(()) => {
let domain_hash = msg
.domain_hash(&chain_id)
.context("failed to compute domain hash")?;
let (state, ret) = self.inner.deliver(state, msg.into_message()).await?;
let ret = SignedMessageApplyRet {
fvm: ret,
domain_hash,
};
Ok((state, Ok(ret)))
}
}
}
async fn begin(&self, state: Self::State) -> anyhow::Result<(Self::State, Self::BeginOutput)> {
self.inner.begin(state).await
}
async fn end(&self, state: Self::State) -> anyhow::Result<(Self::State, Self::EndOutput)> {
self.inner.end(state).await
}
}
#[async_trait]
impl<I> CheckInterpreter for SignedMessageInterpreter<I>
where
I: CheckInterpreter<Message = FvmMessage, Output = FvmCheckRet>,
I::State: HasChainID + Send + 'static,
{
type State = I::State;
type Message = VerifiableMessage;
type Output = SignedMessageCheckRes;
async fn check(
&self,
state: Self::State,
msg: Self::Message,
is_recheck: bool,
) -> anyhow::Result<(Self::State, Self::Output)> {
let verify_result = if is_recheck {
Ok(())
} else {
msg.verify(&state.chain_id())
};
match verify_result {
Err(SignedMessageError::Ipld(e)) => Err(anyhow!(e)),
Err(SignedMessageError::Ethereum(e)) => {
Ok((state, Err(InvalidSignature(e.to_string()))))
}
Err(SignedMessageError::InvalidSignature(s)) => {
// There is nobody we can punish for this, we can just tell Tendermint to discard this message,
// and potentially block the source IP address.
Ok((state, Err(InvalidSignature(s))))
}
Ok(()) => {
let (state, ret) = self
.inner
.check(state, msg.into_message(), is_recheck)
.await?;
Ok((state, Ok(ret)))
}
}
}
}
#[async_trait]
impl<I> QueryInterpreter for SignedMessageInterpreter<I>
where
I: QueryInterpreter<Query = FvmQuery>,
{
type State = I::State;
type Query = I::Query;
type Output = I::Output;
async fn query(
&self,
state: Self::State,
qry: Self::Query,
) -> anyhow::Result<(Self::State, Self::Output)> {
self.inner.query(state, qry).await
}
}
#[async_trait]
impl<I> GenesisInterpreter for SignedMessageInterpreter<I>
where
I: GenesisInterpreter,
{
type State = I::State;
type Genesis = I::Genesis;
type Output = I::Output;
async fn init(
&self,
state: Self::State,
genesis: Self::Genesis,
) -> anyhow::Result<(Self::State, Self::Output)> {
self.inner.init(state, genesis).await
}
}
#[async_trait]
impl<I> ExtendVoteInterpreter for SignedMessageInterpreter<I>
where
I: ExtendVoteInterpreter,
{
type State = I::State;
type ExtendMessage = I::ExtendMessage;
type VerifyMessage = I::VerifyMessage;
type ExtendOutput = I::ExtendOutput;
type VerifyOutput = I::VerifyOutput;
async fn extend_vote(
&self,
state: Self::State,
msg: Self::ExtendMessage,
) -> anyhow::Result<Self::ExtendOutput> {
self.inner.extend_vote(state, msg).await
}
async fn verify_vote_extension(
&self,
state: Self::State,
msg: Self::VerifyMessage,
) -> anyhow::Result<(Self::State, Self::VerifyOutput)> {
self.inner.verify_vote_extension(state, msg).await
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/interpreter/src/arb.rs | fendermint/vm/interpreter/src/arb.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use fendermint_testing::arb::{ArbCid, ArbTokenAmount};
use fendermint_vm_core::{chainid, Timestamp};
use fvm_shared::version::NetworkVersion;
use quickcheck::{Arbitrary, Gen};
use crate::fvm::state::FvmStateParams;
impl Arbitrary for FvmStateParams {
fn arbitrary(g: &mut Gen) -> Self {
Self {
state_root: ArbCid::arbitrary(g).0,
timestamp: Timestamp(u64::arbitrary(g)),
network_version: NetworkVersion::new(*g.choose(&[21]).unwrap()),
base_fee: ArbTokenAmount::arbitrary(g).0,
circ_supply: ArbTokenAmount::arbitrary(g).0,
chain_id: chainid::from_str_hashed(String::arbitrary(g).as_str())
.unwrap()
.into(),
power_scale: *g.choose(&[-1, 0, 3]).unwrap(),
app_version: *g.choose(&[0, 1, 2]).unwrap(),
}
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/interpreter/src/fvm/genesis.rs | fendermint/vm/interpreter/src/fvm/genesis.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::collections::{BTreeSet, HashMap};
use std::marker::PhantomData;
use std::path::{Path, PathBuf};
use anyhow::{anyhow, Context};
use async_trait::async_trait;
use ethers::abi::Tokenize;
use ethers::core::types as et;
use fendermint_actor_eam::PermissionModeParams;
use fendermint_eth_hardhat::{Hardhat, FQN};
use fendermint_vm_actor_interface::diamond::{EthContract, EthContractMap};
use fendermint_vm_actor_interface::eam::EthAddress;
use fendermint_vm_actor_interface::ipc::IPC_CONTRACTS;
use fendermint_vm_actor_interface::{
account, burntfunds, cetf, chainmetadata, cron, eam, init, ipc, reward, system, EMPTY_ARR,
};
use fendermint_vm_core::{chainid, Timestamp};
use fendermint_vm_genesis::{ActorMeta, Genesis, Power, PowerScale, Validator};
use fvm_ipld_blockstore::Blockstore;
use fvm_shared::chainid::ChainID;
use fvm_shared::econ::TokenAmount;
use fvm_shared::version::NetworkVersion;
use ipc_actors_abis::i_diamond::FacetCut;
use num_traits::Zero;
use crate::GenesisInterpreter;
use super::state::FvmGenesisState;
use super::FvmMessageInterpreter;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct FvmGenesisOutput {
pub chain_id: ChainID,
pub timestamp: Timestamp,
pub network_version: NetworkVersion,
pub base_fee: TokenAmount,
pub power_scale: PowerScale,
pub circ_supply: TokenAmount,
pub validators: Vec<Validator<Power>>,
}
#[async_trait]
impl<DB, TC> GenesisInterpreter for FvmMessageInterpreter<DB, TC>
where
DB: Blockstore + 'static + Send + Sync + Clone,
TC: Send + Sync + 'static,
{
type State = FvmGenesisState<DB>;
type Genesis = Genesis;
type Output = FvmGenesisOutput;
/// Initialize actor states from the Genesis spec.
///
/// This method doesn't create all builtin Filecoin actors,
/// it leaves out the ones specific to file storage.
///
/// The ones included are:
/// * system
/// * init
/// * cron
/// * EAM
/// * burnt funds
/// * rewards (placeholder)
/// * accounts
/// * IPC
///
/// TODO:
/// * faucet?
///
/// See genesis initialization in:
/// * [Lotus](https://github.com/filecoin-project/lotus/blob/v1.20.4/chain/gen/genesis/genesis.go)
/// * [ref-fvm tester](https://github.com/filecoin-project/ref-fvm/blob/fvm%40v3.1.0/testing/integration/src/tester.rs#L99-L103)
/// * [fvm-workbench](https://github.com/anorth/fvm-workbench/blob/67219b3fd0b5654d54f722ab5acea6ec0abb2edc/builtin/src/genesis.rs)
async fn init(
&self,
mut state: Self::State,
genesis: Self::Genesis,
) -> anyhow::Result<(Self::State, Self::Output)> {
// Log the genesis in JSON format, hopefully it's not enormous.
tracing::debug!(genesis = serde_json::to_string(&genesis)?, "init");
// NOTE: We could consider adding the chain ID to the interpreter
// and rejecting genesis if it doesn't match the expectation,
// but the Tendermint genesis file also has this field, and
// presumably Tendermint checks that its peers have the same.
let chain_id = chainid::from_str_hashed(&genesis.chain_name)?;
// Convert validators to CometBFT power scale.
let validators = genesis
.validators
.iter()
.cloned()
.map(|vc| vc.map_power(|c| c.into_power(genesis.power_scale)))
.collect();
// Currently we just pass them back as they are, but later we should
// store them in the IPC actors; or in case of a snapshot restore them
// from the state.
let out = FvmGenesisOutput {
chain_id,
timestamp: genesis.timestamp,
network_version: genesis.network_version,
circ_supply: circ_supply(&genesis),
base_fee: genesis.base_fee,
power_scale: genesis.power_scale,
validators,
};
// STAGE 0: Declare the built-in EVM contracts we'll have to deploy.
// Pre-defined IDs for top-level Ethereum contracts.
let mut eth_builtin_ids = BTreeSet::new();
let mut eth_root_contracts = Vec::new();
let mut eth_contracts = EthContractMap::default();
// Only allocate IDs if the contracts are deployed.
if genesis.ipc.is_some() {
eth_contracts.extend(IPC_CONTRACTS.clone());
}
eth_builtin_ids.extend(eth_contracts.values().map(|c| c.actor_id));
eth_root_contracts.extend(eth_contracts.keys());
eth_root_contracts.extend(
eth_contracts
.values()
.flat_map(|c| c.facets.iter().map(|f| f.name)),
);
// Collect dependencies of the main IPC actors.
let mut eth_libs = self
.contracts
.dependencies(
ð_root_contracts
.iter()
.map(|n| (contract_src(n), *n))
.collect::<Vec<_>>(),
)
.context("failed to collect EVM contract dependencies")?;
// Only keep library dependencies, not contracts with constructors.
eth_libs.retain(|(_, d)| !eth_contracts.contains_key(d.as_str()));
// STAGE 1: First we initialize native built-in actors.
// System actor
state
.create_builtin_actor(
system::SYSTEM_ACTOR_CODE_ID,
system::SYSTEM_ACTOR_ID,
&system::State {
builtin_actors: state.manifest_data_cid,
},
TokenAmount::zero(),
None,
)
.context("failed to create system actor")?;
// Init actor
let (init_state, addr_to_id) = init::State::new(
state.store(),
genesis.chain_name.clone(),
&genesis.accounts,
ð_builtin_ids,
eth_libs.len() as u64,
)
.context("failed to create init state")?;
state
.create_builtin_actor(
init::INIT_ACTOR_CODE_ID,
init::INIT_ACTOR_ID,
&init_state,
TokenAmount::zero(),
None,
)
.context("failed to create init actor")?;
// Cron actor
state
.create_builtin_actor(
cron::CRON_ACTOR_CODE_ID,
cron::CRON_ACTOR_ID,
&cron::State {
entries: vec![], // TODO: Maybe with the IPC.
},
TokenAmount::zero(),
None,
)
.context("failed to create cron actor")?;
// Ethereum Account Manager (EAM) actor
state
.create_builtin_actor(
eam::EAM_ACTOR_CODE_ID,
eam::EAM_ACTOR_ID,
&EMPTY_ARR,
TokenAmount::zero(),
None,
)
.context("failed to create EAM actor")?;
// Burnt funds actor (it's just an account).
state
.create_builtin_actor(
account::ACCOUNT_ACTOR_CODE_ID,
burntfunds::BURNT_FUNDS_ACTOR_ID,
&account::State {
address: burntfunds::BURNT_FUNDS_ACTOR_ADDR,
},
TokenAmount::zero(),
None,
)
.context("failed to create burnt funds actor")?;
// A placeholder for the reward actor, beause I don't think
// using the one in the builtin actors library would be appropriate.
// This effectively burns the miner rewards. Better than panicking.
state
.create_builtin_actor(
account::ACCOUNT_ACTOR_CODE_ID,
reward::REWARD_ACTOR_ID,
&account::State {
address: reward::REWARD_ACTOR_ADDR,
},
TokenAmount::zero(),
None,
)
.context("failed to create reward actor")?;
// STAGE 1b: Then we initialize the in-repo custom actors.
// Initialize the chain metadata actor which handles saving metadata about the chain
// (e.g. block hashes) which we can query.
let chainmetadata_state = fendermint_actor_chainmetadata::State::new(
&state.store(),
fendermint_actor_chainmetadata::DEFAULT_LOOKBACK_LEN,
)?;
state
.create_custom_actor(
fendermint_actor_chainmetadata::CHAINMETADATA_ACTOR_NAME,
chainmetadata::CHAINMETADATA_ACTOR_ID,
&chainmetadata_state,
TokenAmount::zero(),
None,
)
.context("failed to create chainmetadata actor")?;
let cetf_state = fendermint_actor_cetf::State::new(&state.store())?;
state
.create_custom_actor(
fendermint_actor_cetf::CETF_ACTOR_NAME,
cetf::CETFSYSCALL_ACTOR_ID,
&cetf_state,
TokenAmount::zero(),
None,
)
.context("failed to create cetf actor")?;
let eam_state = fendermint_actor_eam::State::new(
state.store(),
PermissionModeParams::from(genesis.eam_permission_mode),
)?;
state
.replace_builtin_actor(
eam::EAM_ACTOR_NAME,
eam::EAM_ACTOR_ID,
fendermint_actor_eam::IPC_EAM_ACTOR_NAME,
&eam_state,
TokenAmount::zero(),
None,
)
.context("failed to replace built in eam actor")?;
// STAGE 2: Create non-builtin accounts which do not have a fixed ID.
// The next ID is going to be _after_ the accounts, which have already been assigned an ID by the `Init` actor.
// The reason we aren't using the `init_state.next_id` is because that already accounted for the multisig accounts.
let mut next_id = init::FIRST_NON_SINGLETON_ADDR + addr_to_id.len() as u64;
for a in genesis.accounts {
let balance = a.balance;
match a.meta {
ActorMeta::Account(acct) => {
state
.create_account_actor(acct, balance, &addr_to_id)
.context("failed to create account actor")?;
}
ActorMeta::Multisig(ms) => {
state
.create_multisig_actor(ms, balance, &addr_to_id, next_id)
.context("failed to create multisig actor")?;
next_id += 1;
}
}
}
// STAGE 3: Initialize the FVM and create built-in FEVM actors.
state
.init_exec_state(
out.timestamp,
out.network_version,
out.base_fee.clone(),
out.circ_supply.clone(),
out.chain_id.into(),
out.power_scale,
)
.context("failed to init exec state")?;
let mut deployer = ContractDeployer::<DB>::new(&self.contracts, ð_contracts);
// Deploy Ethereum libraries.
for (lib_src, lib_name) in eth_libs {
deployer.deploy_library(&mut state, &mut next_id, lib_src, &lib_name)?;
}
if let Some(ipc_params) = genesis.ipc {
// IPC Gateway actor.
let gateway_addr = {
use ipc::gateway::ConstructorParameters;
let params = ConstructorParameters::new(ipc_params.gateway, genesis.validators)
.context("failed to create gateway constructor")?;
let facets = deployer
.facets(ipc::gateway::CONTRACT_NAME)
.context("failed to collect gateway facets")?;
deployer.deploy_contract(
&mut state,
ipc::gateway::CONTRACT_NAME,
(facets, params),
)?
};
// IPC SubnetRegistry actor.
{
use ipc::registry::ConstructorParameters;
let mut facets = deployer
.facets(ipc::registry::CONTRACT_NAME)
.context("failed to collect registry facets")?;
let getter_facet = facets.remove(0);
let manager_facet = facets.remove(0);
let rewarder_facet = facets.remove(0);
let checkpointer_facet = facets.remove(0);
let pauser_facet = facets.remove(0);
let diamond_loupe_facet = facets.remove(0);
let diamond_cut_facet = facets.remove(0);
let ownership_facet = facets.remove(0);
debug_assert_eq!(facets.len(), 2, "SubnetRegistry has 2 facets of its own");
let params = ConstructorParameters {
gateway: gateway_addr,
getter_facet: getter_facet.facet_address,
manager_facet: manager_facet.facet_address,
rewarder_facet: rewarder_facet.facet_address,
pauser_facet: pauser_facet.facet_address,
checkpointer_facet: checkpointer_facet.facet_address,
diamond_cut_facet: diamond_cut_facet.facet_address,
diamond_loupe_facet: diamond_loupe_facet.facet_address,
ownership_facet: ownership_facet.facet_address,
subnet_getter_selectors: getter_facet.function_selectors,
subnet_manager_selectors: manager_facet.function_selectors,
subnet_rewarder_selectors: rewarder_facet.function_selectors,
subnet_checkpointer_selectors: checkpointer_facet.function_selectors,
subnet_pauser_selectors: pauser_facet.function_selectors,
subnet_actor_diamond_cut_selectors: diamond_cut_facet.function_selectors,
subnet_actor_diamond_loupe_selectors: diamond_loupe_facet.function_selectors,
subnet_actor_ownership_selectors: ownership_facet.function_selectors,
creation_privileges: 0,
};
deployer.deploy_contract(
&mut state,
ipc::registry::CONTRACT_NAME,
(facets, params),
)?;
};
}
Ok((state, out))
}
}
fn contract_src(name: &str) -> PathBuf {
PathBuf::from(format!("{name}.sol"))
}
struct ContractDeployer<'a, DB> {
hardhat: &'a Hardhat,
top_contracts: &'a EthContractMap,
// Assign dynamic ID addresses to libraries, but use fixed addresses for the top level contracts.
lib_addrs: HashMap<FQN, et::Address>,
phantom_db: PhantomData<DB>,
}
impl<'a, DB> ContractDeployer<'a, DB>
where
DB: Blockstore + 'static + Send + Sync + Clone,
{
pub fn new(hardhat: &'a Hardhat, top_contracts: &'a EthContractMap) -> Self {
Self {
hardhat,
top_contracts,
lib_addrs: Default::default(),
phantom_db: PhantomData,
}
}
/// Deploy a library contract with a dynamic ID and no constructor.
pub fn deploy_library(
&mut self,
state: &mut FvmGenesisState<DB>,
next_id: &mut u64,
lib_src: impl AsRef<Path>,
lib_name: &str,
) -> anyhow::Result<()> {
let fqn = self.hardhat.fqn(lib_src.as_ref(), lib_name);
let bytecode = self
.hardhat
.bytecode(&lib_src, lib_name, &self.lib_addrs)
.with_context(|| format!("failed to load library bytecode {fqn}"))?;
let eth_addr = state
.create_evm_actor(*next_id, bytecode)
.with_context(|| format!("failed to create library actor {fqn}"))?;
let id_addr = et::Address::from(EthAddress::from_id(*next_id).0);
let eth_addr = et::Address::from(eth_addr.0);
tracing::info!(
actor_id = next_id,
?eth_addr,
?id_addr,
fqn,
"deployed Ethereum library"
);
// We can use the masked ID here or the delegated address.
// Maybe the masked ID is quicker because it doesn't need to be resolved.
self.lib_addrs.insert(fqn, id_addr);
*next_id += 1;
Ok(())
}
/// Construct the bytecode of a top-level contract and deploy it with some constructor parameters.
pub fn deploy_contract<T>(
&self,
state: &mut FvmGenesisState<DB>,
contract_name: &str,
constructor_params: T,
) -> anyhow::Result<et::Address>
where
T: Tokenize,
{
let contract = self.top_contract(contract_name)?;
let contract_id = contract.actor_id;
let contract_src = contract_src(contract_name);
let bytecode = self
.hardhat
.bytecode(contract_src, contract_name, &self.lib_addrs)
.with_context(|| format!("failed to load {contract_name} bytecode"))?;
let eth_addr = state
.create_evm_actor_with_cons(contract_id, &contract.abi, bytecode, constructor_params)
.with_context(|| format!("failed to create {contract_name} actor"))?;
let id_addr = et::Address::from(EthAddress::from_id(contract_id).0);
let eth_addr = et::Address::from(eth_addr.0);
tracing::info!(
actor_id = contract_id,
?eth_addr,
?id_addr,
contract_name,
"deployed Ethereum contract"
);
// The Ethereum address is more usable inside the EVM than the ID address.
Ok(eth_addr)
}
/// Collect Facet Cuts for the diamond pattern, where the facet address comes from already deployed library facets.
pub fn facets(&self, contract_name: &str) -> anyhow::Result<Vec<FacetCut>> {
let contract = self.top_contract(contract_name)?;
let mut facet_cuts = Vec::new();
for facet in contract.facets.iter() {
let facet_name = facet.name;
let facet_src = contract_src(facet_name);
let facet_fqn = self.hardhat.fqn(&facet_src, facet_name);
let facet_addr = self
.lib_addrs
.get(&facet_fqn)
.ok_or_else(|| anyhow!("facet {facet_name} has not been deployed"))?;
let method_sigs = facet
.abi
.functions()
.filter(|f| f.signature() != "init(bytes)")
.map(|f| f.short_signature())
.collect();
let facet_cut = FacetCut {
facet_address: *facet_addr,
action: 0, // Add
function_selectors: method_sigs,
};
facet_cuts.push(facet_cut);
}
Ok(facet_cuts)
}
fn top_contract(&self, contract_name: &str) -> anyhow::Result<&EthContract> {
self.top_contracts
.get(contract_name)
.ok_or_else(|| anyhow!("unknown top contract name: {contract_name}"))
}
}
/// Sum of balances in the genesis accounts.
fn circ_supply(g: &Genesis) -> TokenAmount {
g.accounts
.iter()
.fold(TokenAmount::zero(), |s, a| s + a.balance.clone())
}
#[cfg(test)]
mod tests {
use std::{str::FromStr, sync::Arc};
use cid::Cid;
use fendermint_vm_genesis::{ipc::IpcParams, Genesis};
use fvm::engine::MultiEngine;
use quickcheck::Arbitrary;
use tendermint_rpc::{MockClient, MockRequestMethodMatcher};
use crate::{
fvm::{
bundle::{bundle_path, contracts_path, custom_actors_bundle_path},
state::ipc::GatewayCaller,
store::memory::MemoryBlockstore,
upgrades::UpgradeScheduler,
FvmMessageInterpreter,
},
GenesisInterpreter,
};
use super::FvmGenesisState;
#[tokio::test]
async fn load_genesis() {
let genesis = make_genesis();
let bundle = read_bundle();
let custom_actors_bundle = read_custom_actors_bundle();
let interpreter = make_interpreter();
let multi_engine = Arc::new(MultiEngine::default());
let store = MemoryBlockstore::new();
let state = FvmGenesisState::new(store, multi_engine, &bundle, &custom_actors_bundle)
.await
.expect("failed to create state");
let (mut state, out) = interpreter
.init(state, genesis.clone())
.await
.expect("failed to create actors");
assert_eq!(out.validators.len(), genesis.validators.len());
// Try calling a method on the IPC Gateway.
let exec_state = state.exec_state().expect("should be in exec stage");
let caller = GatewayCaller::default();
let period = caller
.bottom_up_check_period(exec_state)
.expect("error calling the gateway");
assert_eq!(period, genesis.ipc.unwrap().gateway.bottom_up_check_period);
let _state_root = state.commit().expect("failed to commit");
}
#[tokio::test]
async fn load_genesis_deterministic() {
let genesis = make_genesis();
let bundle = read_bundle();
let custom_actors_bundle = read_custom_actors_bundle();
let interpreter = make_interpreter();
let multi_engine = Arc::new(MultiEngine::default());
// Create a couple of states and load the same thing.
let mut outputs = Vec::new();
for _ in 0..3 {
let store = MemoryBlockstore::new();
let state =
FvmGenesisState::new(store, multi_engine.clone(), &bundle, &custom_actors_bundle)
.await
.expect("failed to create state");
let (state, out) = interpreter
.init(state, genesis.clone())
.await
.expect("failed to create actors");
let state_root_hash = state.commit().expect("failed to commit");
outputs.push((state_root_hash, out));
}
for out in &outputs[1..] {
assert_eq!(out.0, outputs[0].0, "state root hash is different");
}
}
// This is a sort of canary test, if it fails means something changed in the way we do genesis,
// which is probably fine, but it's better to know about it, and if anybody doesn't get the same
// then we might have some non-determinism.
#[ignore] // I see a different value on CI than locally.
#[tokio::test]
async fn load_genesis_known() {
let genesis_json = "{\"chain_name\":\"/r314159/f410fnfmitm2ww7oehhtbokf6wulhrr62sgq3sgqmenq\",\"timestamp\":1073250,\"network_version\":18,\"base_fee\":\"1000\",\"power_scale\":3,\"validators\":[{\"public_key\":\"BLX9ojqB+8Z26aMmKoCRb3Te6AnSU6zY8hPcf1X5Q69XCNaHVcRxzYO2xx7o/2vgdS7nkDTMRRbkDGzy+FYdAFc=\",\"power\":\"1000000000000000000\"},{\"public_key\":\"BFcOveVieknZiscWsfXa06aGbBkKeucBycd/w0N1QHlaZfa/5dJcH7D0hvcdfv3B2Rv1OPuxo1PkgsEbWegWKcA=\",\"power\":\"1000000000000000000\"},{\"public_key\":\"BEP30ykovfrQp3zo+JVRvDVL2emC+Ju1Kpox3zMVYZyFKvYt64qyN/HOVjridDrkEsnQU8BVen4Aegja4fBZ+LU=\",\"power\":\"1000000000000000000\"}],\"accounts\":[{\"meta\":{\"Account\":{\"owner\":\"f410fggjevhgketpz6gw6ordusynlgcd5piyug4aomuq\"}},\"balance\":\"1000000000000000000\"},{\"meta\":{\"Account\":{\"owner\":\"f410frbdnwklaitcjsqe7swjwp5naple6vthq4woyfry\"}},\"balance\":\"2000000000000000000\"},{\"meta\":{\"Account\":{\"owner\":\"f410fxo4lih4n2acr3oadalidwqjgoqkzhp5dw3zwkvy\"}},\"balance\":\"1000000000000000000\"}],\"ipc\":{\"gateway\":{\"subnet_id\":\"/r314159/f410fnfmitm2ww7oehhtbokf6wulhrr62sgq3sgqmenq\",\"bottom_up_check_period\":30,\"msg_fee\":\"1000000000000\",\"majority_percentage\":60,\"active_validators_limit\":100}}}";
let genesis: Genesis = serde_json::from_str(genesis_json).expect("failed to parse genesis");
let bundle = read_bundle();
let custom_actors_bundle = read_custom_actors_bundle();
let interpreter = make_interpreter();
let multi_engine = Arc::new(MultiEngine::default());
let store = MemoryBlockstore::new();
let state =
FvmGenesisState::new(store, multi_engine.clone(), &bundle, &custom_actors_bundle)
.await
.expect("failed to create state");
let (state, _) = interpreter
.init(state, genesis.clone())
.await
.expect("failed to create actors");
let state_root_hash = state.commit().expect("failed to commit");
let expected_root_hash =
Cid::from_str("bafy2bzacedebgy4j7qnh2v2x4kkr2jqfkryql5ookbjrwge6dbrr24ytlqnj4")
.unwrap();
assert_eq!(state_root_hash, expected_root_hash);
}
fn make_genesis() -> Genesis {
let mut g = quickcheck::Gen::new(5);
let mut genesis = Genesis::arbitrary(&mut g);
// Make sure we have IPC enabled.
genesis.ipc = Some(IpcParams::arbitrary(&mut g));
genesis
}
fn make_interpreter(
) -> FvmMessageInterpreter<MemoryBlockstore, MockClient<MockRequestMethodMatcher>> {
let (client, _) = MockClient::new(MockRequestMethodMatcher::default());
FvmMessageInterpreter::new(
client,
None,
contracts_path(),
1.05,
1.05,
false,
UpgradeScheduler::new(),
)
}
fn read_bundle() -> Vec<u8> {
std::fs::read(bundle_path()).expect("failed to read bundle")
}
fn read_custom_actors_bundle() -> Vec<u8> {
std::fs::read(custom_actors_bundle_path()).expect("failed to read custom actor bundle")
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/interpreter/src/fvm/externs.rs | fendermint/vm/interpreter/src/fvm/externs.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use anyhow::anyhow;
use cid::{
multihash::{Code, MultihashDigest},
Cid,
};
use fendermint_vm_actor_interface::chainmetadata::CHAINMETADATA_ACTOR_ID;
use fvm::{
externs::{Chain, Consensus, Externs, Rand},
state_tree::StateTree,
};
use fvm_ipld_blockstore::Blockstore;
use fvm_ipld_encoding::{CborStore, DAG_CBOR};
use fvm_shared::clock::ChainEpoch;
use super::store::ReadOnlyBlockstore;
pub struct FendermintExterns<DB>
where
DB: Blockstore + 'static,
{
blockstore: DB,
state_root: Cid,
}
impl<DB> FendermintExterns<DB>
where
DB: Blockstore + 'static,
{
pub fn new(blockstore: DB, state_root: Cid) -> Self {
Self {
blockstore,
state_root,
}
}
}
impl<DB> Rand for FendermintExterns<DB>
where
DB: Blockstore + 'static,
{
fn get_chain_randomness(&self, _round: ChainEpoch) -> anyhow::Result<[u8; 32]> {
Err(anyhow!("randomness not implemented"))
}
fn get_beacon_randomness(&self, _round: ChainEpoch) -> anyhow::Result<[u8; 32]> {
Err(anyhow!("beacon not implemented"))
}
}
impl<DB> Consensus for FendermintExterns<DB>
where
DB: Blockstore + 'static,
{
fn verify_consensus_fault(
&self,
_h1: &[u8],
_h2: &[u8],
_extra: &[u8],
) -> anyhow::Result<(Option<fvm_shared::consensus::ConsensusFault>, i64)> {
unimplemented!("not expecting to use consensus faults")
}
}
impl<DB> Chain for FendermintExterns<DB>
where
DB: Blockstore + Clone + 'static,
{
// for retreiving the tipset_cid, we load the chain metadata actor state
// at the given state_root and retrieve the blockhash for the given epoch
fn get_tipset_cid(&self, epoch: ChainEpoch) -> anyhow::Result<Cid> {
// create a read only state tree from the state root
let bstore = ReadOnlyBlockstore::new(&self.blockstore);
let state_tree = StateTree::new_from_root(&bstore, &self.state_root)?;
// get the chain metadata actor state cid
let actor_state_cid = match state_tree.get_actor(CHAINMETADATA_ACTOR_ID) {
Ok(Some(actor_state)) => actor_state.state,
Ok(None) => {
return Err(anyhow!(
"chain metadata actor id ({}) not found in state",
CHAINMETADATA_ACTOR_ID
));
}
Err(err) => {
return Err(anyhow!(
"failed to get chain metadata actor ({}) state, error: {}",
CHAINMETADATA_ACTOR_ID,
err
));
}
};
// get the chain metadata actor state from the blockstore
let actor_state: fendermint_actor_chainmetadata::State =
match state_tree.store().get_cbor(&actor_state_cid) {
Ok(Some(v)) => v,
Ok(None) => {
return Err(anyhow!(
"chain metadata actor ({}) state not found",
CHAINMETADATA_ACTOR_ID
));
}
Err(err) => {
return Err(anyhow!(
"failed to get chain metadata actor ({}) state, error: {}",
CHAINMETADATA_ACTOR_ID,
err
));
}
};
match actor_state.get_block_hash(&bstore, epoch) {
// the block hash retrieved from state was saved raw from how we received it
// from Tendermint (which is Sha2_256) and we simply wrap it here in a cid
Ok(Some(v)) => match Code::Blake2b256.wrap(&v) {
Ok(w) => Ok(Cid::new_v1(DAG_CBOR, w)),
Err(err) => Err(anyhow!("failed to wrap block hash, error: {}", err)),
},
Ok(None) => Ok(Cid::default()),
Err(err) => Err(err),
}
}
}
impl<DB> Externs for FendermintExterns<DB> where DB: Blockstore + Clone + 'static {}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/interpreter/src/fvm/upgrades.rs | fendermint/vm/interpreter/src/fvm/upgrades.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::collections::BTreeMap;
use anyhow::bail;
use fendermint_vm_core::chainid;
use fvm_ipld_blockstore::Blockstore;
use fvm_shared::chainid::ChainID;
use std::collections::btree_map::Entry::{Occupied, Vacant};
use super::state::{snapshot::BlockHeight, FvmExecState};
#[derive(PartialEq, Eq, Clone)]
struct UpgradeKey(ChainID, BlockHeight);
impl PartialOrd for UpgradeKey {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Ord for UpgradeKey {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
if self.0 == other.0 {
self.1.cmp(&other.1)
} else {
let chain_id: u64 = self.0.into();
chain_id.cmp(&other.0.into())
}
}
}
/// a function type for migration
// TODO: Add missing parameters
pub type MigrationFunc<DB> = fn(state: &mut FvmExecState<DB>) -> anyhow::Result<()>;
/// Upgrade represents a single upgrade to be executed at a given height
#[derive(Clone)]
pub struct Upgrade<DB>
where
DB: Blockstore + 'static + Clone,
{
/// the chain_id should match the chain_id from the network configuration
chain_id: ChainID,
/// the block height at which the upgrade should be executed
block_height: BlockHeight,
/// the application version after the upgrade (or None if not affected)
new_app_version: Option<u64>,
/// the migration function to be executed
migration: MigrationFunc<DB>,
}
impl<DB> Upgrade<DB>
where
DB: Blockstore + 'static + Clone,
{
pub fn new(
chain_name: impl ToString,
block_height: BlockHeight,
new_app_version: Option<u64>,
migration: MigrationFunc<DB>,
) -> anyhow::Result<Self> {
Ok(Self {
chain_id: chainid::from_str_hashed(&chain_name.to_string())?,
block_height,
new_app_version,
migration,
})
}
pub fn new_by_id(
chain_id: ChainID,
block_height: BlockHeight,
new_app_version: Option<u64>,
migration: MigrationFunc<DB>,
) -> Self {
Self {
chain_id,
block_height,
new_app_version,
migration,
}
}
pub fn execute(&self, state: &mut FvmExecState<DB>) -> anyhow::Result<Option<u64>> {
(self.migration)(state)?;
Ok(self.new_app_version)
}
}
/// UpgradeScheduler represents a list of upgrades to be executed at given heights
/// During each block height we check if there is an upgrade scheduled at that
/// height, and if so the migration for that upgrade is performed.
#[derive(Clone)]
pub struct UpgradeScheduler<DB>
where
DB: Blockstore + 'static + Clone,
{
upgrades: BTreeMap<UpgradeKey, Upgrade<DB>>,
}
impl<DB> Default for UpgradeScheduler<DB>
where
DB: Blockstore + 'static + Clone,
{
fn default() -> Self {
Self::new()
}
}
impl<DB> UpgradeScheduler<DB>
where
DB: Blockstore + 'static + Clone,
{
pub fn new() -> Self {
Self {
upgrades: BTreeMap::new(),
}
}
}
impl<DB> UpgradeScheduler<DB>
where
DB: Blockstore + 'static + Clone,
{
// add a new upgrade to the schedule
pub fn add(&mut self, upgrade: Upgrade<DB>) -> anyhow::Result<()> {
match self
.upgrades
.entry(UpgradeKey(upgrade.chain_id, upgrade.block_height))
{
Vacant(entry) => {
entry.insert(upgrade);
Ok(())
}
Occupied(_) => {
bail!("Upgrade already exists");
}
}
}
// check if there is an upgrade scheduled for the given chain_id at a given height
pub fn get(&self, chain_id: ChainID, height: BlockHeight) -> Option<&Upgrade<DB>> {
self.upgrades.get(&UpgradeKey(chain_id, height))
}
}
#[test]
fn test_validate_upgrade_schedule() {
use crate::fvm::store::memory::MemoryBlockstore;
let mut upgrade_scheduler: UpgradeScheduler<MemoryBlockstore> = UpgradeScheduler::new();
let upgrade = Upgrade::new("mychain", 10, None, |_state| Ok(())).unwrap();
upgrade_scheduler.add(upgrade).unwrap();
let upgrade = Upgrade::new("mychain", 20, None, |_state| Ok(())).unwrap();
upgrade_scheduler.add(upgrade).unwrap();
// adding an upgrade with the same chain_id and height should fail
let upgrade = Upgrade::new("mychain", 20, None, |_state| Ok(())).unwrap();
let res = upgrade_scheduler.add(upgrade);
assert!(res.is_err());
let mychain_id = chainid::from_str_hashed("mychain").unwrap();
let otherhain_id = chainid::from_str_hashed("otherchain").unwrap();
assert!(upgrade_scheduler.get(mychain_id, 9).is_none());
assert!(upgrade_scheduler.get(mychain_id, 10).is_some());
assert!(upgrade_scheduler.get(otherhain_id, 10).is_none());
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/interpreter/src/fvm/bundle.rs | fendermint/vm/interpreter/src/fvm/bundle.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::path::{Path, PathBuf};
use std::str::FromStr;
fn workspace_dir() -> PathBuf {
let output = std::process::Command::new(env!("CARGO"))
.arg("locate-project")
.arg("--workspace")
.arg("--message-format=plain")
.output()
.unwrap()
.stdout;
let cargo_path = Path::new(std::str::from_utf8(&output).unwrap().trim());
cargo_path.parent().unwrap().to_path_buf()
}
/// Path to the builtin-actor bundle, indended to be used in tests.
pub fn bundle_path() -> PathBuf {
let bundle_path = std::env::var("FM_BUILTIN_ACTORS_BUNDLE").unwrap_or_else(|_| {
workspace_dir()
.join("fendermint/builtin-actors/output/bundle.car")
.to_string_lossy()
.into_owned()
});
PathBuf::from_str(&bundle_path).expect("malformed bundle path")
}
/// Path to the in-repo custom actor bundle, intended to be used in tests.
pub fn custom_actors_bundle_path() -> PathBuf {
let custom_actors_bundle_path = std::env::var("FM_CUSTOM_ACTORS_BUNDLE").unwrap_or_else(|_| {
workspace_dir()
.join("fendermint/actors/output/custom_actors_bundle.car")
.to_string_lossy()
.into_owned()
});
PathBuf::from_str(&custom_actors_bundle_path).expect("malformed custom actors bundle path")
}
/// Path to the Solidity contracts, intended to be used in tests.
pub fn contracts_path() -> PathBuf {
let contracts_path = std::env::var("FM_CONTRACTS_DIR").unwrap_or_else(|_| {
workspace_dir()
.join("contracts/out")
.to_string_lossy()
.into_owned()
});
PathBuf::from_str(&contracts_path).expect("malformed contracts path")
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/interpreter/src/fvm/broadcast.rs | fendermint/vm/interpreter/src/fvm/broadcast.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::time::Duration;
use anyhow::{anyhow, bail, Context};
use ethers::types as et;
use fendermint_rpc::response::decode_fevm_return_data;
use fvm_ipld_encoding::RawBytes;
use fvm_shared::error::ExitCode;
use fvm_shared::{address::Address, chainid::ChainID, econ::TokenAmount, BLOCK_GAS_LIMIT};
use num_traits::Zero;
use tendermint_rpc::Client;
use fendermint_crypto::SecretKey;
use fendermint_rpc::message::GasParams;
use fendermint_rpc::query::QueryClient;
use fendermint_rpc::tx::{CallClient, TxClient, TxSync};
use fendermint_rpc::{client::FendermintClient, message::SignedMessageFactory};
use fendermint_vm_message::query::FvmQueryHeight;
macro_rules! retry {
($max_retries:expr, $retry_delay:expr, $block:expr) => {{
let mut attempt = 0;
let value = loop {
match $block {
Err((code, msg)) if attempt == $max_retries || !can_retry(code) => {
bail!(msg);
}
Err((_, msg)) => {
tracing::warn!(error = msg, attempt, "retry broadcast");
attempt += 1;
}
Ok(value) => {
break value;
}
}
tokio::time::sleep($retry_delay).await;
};
value
}};
}
/// Broadcast transactions to Tendermint.
///
/// This is typically something only active validators would want to do
/// from within Fendermint as part of the block lifecycle, for example
/// to submit their signatures to the ledger.
///
/// The broadcaster encapsulates the tactics for figuring out the nonce,
/// the gas limit, potential retries, etc.
#[derive(Clone)]
pub struct Broadcaster<C> {
client: FendermintClient<C>,
secret_key: SecretKey,
addr: Address,
gas_fee_cap: TokenAmount,
gas_premium: TokenAmount,
gas_overestimation_rate: f64,
max_retries: u8,
retry_delay: Duration,
}
impl<C> Broadcaster<C>
where
C: Client + Clone + Send + Sync,
{
pub fn new(
client: C,
addr: Address,
secret_key: SecretKey,
gas_fee_cap: TokenAmount,
gas_premium: TokenAmount,
gas_overestimation_rate: f64,
) -> Self {
let client = FendermintClient::new(client);
Self {
client,
secret_key,
addr,
gas_fee_cap,
gas_premium,
gas_overestimation_rate,
max_retries: 0,
// Set the retry delay to rougly the block creation time.
retry_delay: Duration::from_secs(1),
}
}
pub fn with_max_retries(mut self, max_retries: u8) -> Self {
self.max_retries = max_retries;
self
}
pub fn with_retry_delay(mut self, retry_delay: Duration) -> Self {
self.retry_delay = retry_delay;
self
}
pub fn retry_delay(&self) -> Duration {
self.retry_delay
}
/// Send a transaction to the chain and return is hash.
///
/// It currently doesn't wait for the execution, only that it has successfully been added to the mempool,
/// or if not then an error is returned. The reason for not waiting for the commit is that the Tendermint
/// client seems to time out if the check fails, waiting for the inclusion which will never come, instead of
/// returning the result with no `deliver_tx` and a failed `check_tx`. We can add our own mechanism to wait
/// for commits if we have to.
pub async fn fevm_invoke(
&self,
contract: Address,
calldata: et::Bytes,
chain_id: ChainID,
) -> anyhow::Result<tendermint::hash::Hash> {
let tx_hash = retry!(self.max_retries, self.retry_delay, {
let sequence = self
.sequence()
.await
.context("failed to get broadcaster sequence")?;
let factory =
SignedMessageFactory::new(self.secret_key.clone(), self.addr, sequence, chain_id);
// Using the bound client as a one-shot transaction sender.
let mut client = self.client.clone().bind(factory);
// TODO: Maybe we should implement something like the Ethereum facade for estimating fees?
// I don't want to call the Ethereum API directly (it would be one more dependency).
// Another option is for Fendermint to recognise transactions coming from validators
// and always put them into the block to facilitate checkpointing.
let mut gas_params = GasParams {
gas_limit: BLOCK_GAS_LIMIT,
gas_fee_cap: self.gas_fee_cap.clone(),
gas_premium: self.gas_premium.clone(),
};
// Not expecting to send any tokens to the contracts.
let value = TokenAmount::zero();
// We can use the `Committed` state to execute the message, which is more efficient than doing it on `Pending`.
let gas_estimate = client
.fevm_estimate_gas(
contract,
calldata.0.clone(),
value.clone(),
gas_params.clone(),
FvmQueryHeight::Committed,
)
.await
.context("failed to estimate gas")?;
if gas_estimate.value.exit_code.is_success() {
gas_params.gas_limit =
(gas_estimate.value.gas_limit as f64 * self.gas_overestimation_rate) as u64;
} else {
bail!(
"failed to estimate gas: {} - {}",
gas_estimate.value.exit_code,
gas_estimate.value.info
);
}
// Using TxSync instead of TxCommit because TxCommit times out if the `check_tx` part fails,
// instead of returning as soon as the check failed with some default values for `deliver_tx`.
let res = TxClient::<TxSync>::fevm_invoke(
&mut client,
contract,
calldata.0.clone(),
value,
gas_params,
)
.await
.context("failed to invoke contract")?;
if res.response.code.is_err() {
// Not sure what exactly arrives in the data and how it's encoded.
// It might need the Base64 decoding or it may not. Let's assume
// that it doesn't because unlike `DeliverTx::data`, this response
// does have some Base64 lreated annotations.
let data = decode_fevm_return_data(RawBytes::new(res.response.data.to_vec()))
.map(hex::encode)
.unwrap_or_else(|_| hex::encode(res.response.data));
Err((
res.response.code,
format!(
"broadcasted transaction failed during check: {}; data = {}",
res.response.code.value(),
data
),
))
} else {
Ok(res.response.hash)
}
});
Ok(tx_hash)
}
/// Fetch the current nonce to be used in the next message.
async fn sequence(&self) -> anyhow::Result<u64> {
// Using the `Pending` state to query just in case there are other transactions initiated by the validator.
let res = self
.client
.actor_state(&self.addr, FvmQueryHeight::Pending)
.await
.context("failed to get broadcaster actor state")?;
match res.value {
Some((_, state)) => Ok(state.sequence),
None => Err(anyhow!("broadcaster actor {} cannot be found", self.addr)),
}
}
}
/// Decide if it's worth retrying the transaction.
fn can_retry(code: tendermint::abci::Code) -> bool {
match ExitCode::new(code.value()) {
// If the sender doesn't exist it doesn't matter how many times we try.
ExitCode::SYS_SENDER_INVALID => false,
// If the nonce was invalid, it might be because of a race condition, and we can try again.
ExitCode::SYS_SENDER_STATE_INVALID => true,
// If the sender doesn't have enough funds to cover the gas, it's unlikely that repeating imemediately will help.
ExitCode::SYS_INSUFFICIENT_FUNDS => false,
ExitCode::USR_INSUFFICIENT_FUNDS => false,
// If we estimate the gas wrong, there's no point trying it will probably go wrong again.
ExitCode::SYS_OUT_OF_GAS => false,
// Unknown errors should not be retried.
_ => false,
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/interpreter/src/fvm/check.rs | fendermint/vm/interpreter/src/fvm/check.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use async_trait::async_trait;
use fvm_ipld_blockstore::Blockstore;
use fvm_ipld_encoding::RawBytes;
use fvm_shared::{address::Address, error::ExitCode};
use crate::CheckInterpreter;
use super::{state::FvmExecState, store::ReadOnlyBlockstore, FvmMessage, FvmMessageInterpreter};
/// Transaction check results are expressed by the exit code, so that hopefully
/// they would result in the same error code if they were applied.
pub struct FvmCheckRet {
pub sender: Address,
pub gas_limit: u64,
pub exit_code: ExitCode,
pub return_data: Option<RawBytes>,
pub info: Option<String>,
}
#[async_trait]
impl<DB, TC> CheckInterpreter for FvmMessageInterpreter<DB, TC>
where
DB: Blockstore + 'static + Send + Sync + Clone,
TC: Send + Sync + 'static,
{
// We simulate the full pending state so that client can call methods on
// contracts that haven't been deployed yet.
type State = FvmExecState<ReadOnlyBlockstore<DB>>;
type Message = FvmMessage;
type Output = FvmCheckRet;
/// Check that:
/// * sender exists
/// * sender nonce matches the message sequence
/// * sender has enough funds to cover the gas cost
async fn check(
&self,
mut state: Self::State,
msg: Self::Message,
_is_recheck: bool,
) -> anyhow::Result<(Self::State, Self::Output)> {
let checked = |state,
exit_code: ExitCode,
gas_used: Option<u64>,
return_data: Option<RawBytes>,
info: Option<String>| {
tracing::info!(
exit_code = exit_code.value(),
from = msg.from.to_string(),
to = msg.to.to_string(),
method_num = msg.method_num,
gas_limit = msg.gas_limit,
gas_used = gas_used.unwrap_or_default(),
info = info.clone().unwrap_or_default(),
"check transaction"
);
let ret = FvmCheckRet {
sender: msg.from,
gas_limit: msg.gas_limit,
exit_code,
return_data,
info,
};
Ok((state, ret))
};
if let Err(e) = msg.check() {
return checked(
state,
ExitCode::SYS_ASSERTION_FAILED,
None,
None,
Some(format!("pre-check failure: {:#}", e)),
);
}
// NOTE: This would be a great place for let-else, but clippy runs into a compilation bug.
let state_tree = state.state_tree_mut();
// This code is left in place for reference of a partial check performed on top of `FvmCheckState`.
if let Some(id) = state_tree.lookup_id(&msg.from)? {
if let Some(mut actor) = state_tree.get_actor(id)? {
let balance_needed = msg.gas_fee_cap.clone() * msg.gas_limit;
if actor.balance < balance_needed {
return checked(
state,
ExitCode::SYS_SENDER_STATE_INVALID,
None,
None,
Some(
format! {"actor balance {} less than needed {}", actor.balance, balance_needed},
),
);
} else if actor.sequence != msg.sequence {
return checked(
state,
ExitCode::SYS_SENDER_STATE_INVALID,
None,
None,
Some(
format! {"expected sequence {}, got {}", actor.sequence, msg.sequence},
),
);
} else if self.exec_in_check {
// Instead of modifying just the partial state, we will execute the call in earnest.
// This is required for fully supporting the Ethereum API "pending" queries, if that's needed.
// This will stack the effect for subsequent transactions added to the mempool.
let (apply_ret, _) = state.execute_explicit(msg.clone())?;
return checked(
state,
apply_ret.msg_receipt.exit_code,
Some(apply_ret.msg_receipt.gas_used),
Some(apply_ret.msg_receipt.return_data),
apply_ret
.failure_info
.map(|i| i.to_string())
.filter(|s| !s.is_empty()),
);
} else {
actor.sequence += 1;
actor.balance -= balance_needed;
state_tree.set_actor(id, actor);
return checked(state, ExitCode::OK, None, None, None);
}
}
}
checked(
state,
ExitCode::SYS_SENDER_INVALID,
None,
None,
Some(format! {"cannot find actor {}", msg.from}),
)
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/interpreter/src/fvm/extend.rs | fendermint/vm/interpreter/src/fvm/extend.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::collections::HashMap;
use anyhow::bail;
use anyhow::Context;
use async_trait::async_trait;
use bls_signatures::Serialize as _;
use fendermint_actor_cetf::BlsSignature;
use fendermint_actor_cetf::Tag;
use num_traits::ToBytes;
use crate::ExtendVoteInterpreter;
use fendermint_vm_actor_interface::cetf::CETFSYSCALL_ACTOR_ADDR;
use fvm_ipld_blockstore::Blockstore;
use fvm_ipld_encoding::serde::{Deserialize, Serialize};
use fvm_shared::address::Address;
use tendermint::account;
use tendermint::block::Height;
use tendermint_rpc::Client;
use super::{
checkpoint::bft_power_table, state::FvmQueryState, FvmMessageInterpreter, ValidatorContext,
};
#[derive(Debug, Serialize, Deserialize)]
pub struct Tags(pub Vec<TagKind>);
#[derive(Debug, Serialize, Deserialize)]
pub struct SignedTags(pub Vec<SignatureKind>);
#[derive(Debug, Serialize, Deserialize)]
pub enum TagKind {
// From Cetf Actor
Cetf(Tag),
// Height as be bytes
BlockHeight(u64),
}
impl TagKind {
pub fn to_vec(&self) -> Vec<u8> {
match self {
TagKind::Cetf(tag) => tag.to_vec(),
TagKind::BlockHeight(height) => height.to_be_bytes().to_vec(),
}
}
pub fn sign<C>(&self, ctx: &ValidatorContext<C>) -> anyhow::Result<SignatureKind> {
match self {
TagKind::Cetf(tag) => {
let sig = ctx.sign_tag(tag.as_slice());
Ok(SignatureKind::Cetf(BlsSignature(
sig.as_bytes().try_into().unwrap(),
)))
}
TagKind::BlockHeight(height) => {
let sig = ctx.sign_tag(&height.to_be_bytes().to_vec());
Ok(SignatureKind::BlockHeight(BlsSignature(
sig.as_bytes().try_into().unwrap(),
)))
}
}
}
}
#[derive(Debug, Serialize, Deserialize)]
pub enum SignatureKind {
Cetf(BlsSignature),
BlockHeight(BlsSignature),
}
impl SignatureKind {
pub fn to_vec(&self) -> Vec<u8> {
match self {
SignatureKind::Cetf(sig) => sig.0.to_vec(),
SignatureKind::BlockHeight(sig) => sig.0.to_vec(),
}
}
pub fn as_slice(&self) -> &[u8] {
match self {
SignatureKind::Cetf(sig) => sig.0.as_slice(),
SignatureKind::BlockHeight(sig) => sig.0.as_slice(),
}
}
pub fn to_bls_signature(&self) -> anyhow::Result<bls_signatures::Signature> {
match self {
SignatureKind::Cetf(sig) => bls_signatures::Signature::from_bytes(&sig.0),
SignatureKind::BlockHeight(sig) => bls_signatures::Signature::from_bytes(&sig.0),
}
.context("failed to convert SignatureKind to bls signature")
}
}
#[async_trait]
impl<DB, TC> ExtendVoteInterpreter for FvmMessageInterpreter<DB, TC>
where
DB: Blockstore + Clone + 'static + Send + Sync,
TC: Client + Clone + Send + Sync + 'static,
{
type State = FvmQueryState<DB>;
type ExtendMessage = Tags;
type VerifyMessage = (account::Id, Tags, SignedTags);
type ExtendOutput = SignedTags;
type VerifyOutput = Option<bool>;
/// Sign the vote.
async fn extend_vote(
&self,
state: Self::State,
msg: Self::ExtendMessage,
) -> anyhow::Result<Self::ExtendOutput> {
let (state, res) = state.actor_state(&CETFSYSCALL_ACTOR_ADDR).await?;
let is_enabled = if let Some((_id, act_st)) = res {
let st: fendermint_actor_cetf::State = state.store_get_cbor(&act_st.state)?.unwrap();
st.enabled
} else {
bail!("no CETF actor found!");
};
if !is_enabled {
return Ok(SignedTags(vec![]));
}
if let Some(ctx) = self.validator_ctx.as_ref() {
Ok(SignedTags(
msg.0
.iter()
.map(|t| t.sign(ctx))
.collect::<anyhow::Result<Vec<_>>>()
.unwrap(),
))
} else {
Ok(SignedTags(vec![]))
}
}
async fn verify_vote_extension(
&self,
state: Self::State,
msg: Self::VerifyMessage,
) -> anyhow::Result<(Self::State, Self::VerifyOutput)> {
let (id, tags, sigs) = msg;
if let Some(_ctx) = self.validator_ctx.as_ref() {
let (state, res) = state.actor_state(&CETFSYSCALL_ACTOR_ADDR).await?;
let store = state.read_only_store();
let (is_enabled, registered_keys) = if let Some((_id, act_st)) = res {
let st: fendermint_actor_cetf::State =
state.store_get_cbor(&act_st.state)?.unwrap();
(st.enabled, st.get_validators_keymap(&store)?)
} else {
bail!("no CETF actor found!");
};
if !is_enabled {
if !tags.0.is_empty() || !sigs.0.is_empty() {
bail!("CETF Actor is disabled! There should not be and tags or signatures");
}
return Ok((state, None));
}
// TODO: There must be a better way to convert address to secp256k1 public key
let key_map =
bft_power_table(&self.client, Height::try_from(state.block_height() as u64)?)
.await
.context("failed to get power table")?
.0
.iter()
.map(|k| {
let tm_pk: tendermint::PublicKey = k
.public_key
.clone()
.try_into()
.expect("failed to convert to secp256k1 public key");
let tm_addr = account::Id::from(tm_pk);
let fvm_addr =
Address::new_secp256k1(&k.public_key.public_key().serialize())
.expect("failed to convert to address");
(tm_addr, fvm_addr)
})
.collect::<HashMap<_, _>>();
let fvm_addr = key_map.get(&id).expect("failed to get fvm address");
let bls_pub_key = registered_keys
.get(fvm_addr)
.expect("failed to get bls public key")
.unwrap();
let bls_pub_key = bls_signatures::PublicKey::from_bytes(&bls_pub_key.0)
.context("failed to deser bls pubkey")?;
// Verify signatures
let mut res = true;
for (sig, tag) in sigs.0.iter().zip(tags.0.iter()) {
let v = bls_signatures::verify_messages(
&sig.to_bls_signature()?,
&[&tag.to_vec()],
&[bls_pub_key],
);
// tracing::info!("BLS Verify for {:?} is {:?}", tag, v);
res &= v;
}
if res {
Ok((state, Some(true)))
} else {
Ok((state, Some(false)))
}
} else {
tracing::info!("No validator context found");
Ok((state, None))
}
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/interpreter/src/fvm/mod.rs | fendermint/vm/interpreter/src/fvm/mod.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::path::PathBuf;
mod broadcast;
mod check;
mod checkpoint;
mod custom_kernel;
mod exec;
pub mod extend;
mod externs;
mod genesis;
mod query;
pub mod state;
pub mod store;
pub mod upgrades;
#[cfg(any(test, feature = "bundle"))]
pub mod bundle;
pub(crate) mod topdown;
pub use check::FvmCheckRet;
pub use checkpoint::PowerUpdates;
pub use exec::FvmApplyRet;
use fendermint_crypto::{PublicKey, SecretKey};
use fendermint_eth_hardhat::Hardhat;
pub use fendermint_vm_message::query::FvmQuery;
use fvm_ipld_blockstore::Blockstore;
pub use genesis::FvmGenesisOutput;
pub use query::FvmQueryRet;
use tendermint_rpc::Client;
pub use self::broadcast::Broadcaster;
use self::{state::ipc::GatewayCaller, upgrades::UpgradeScheduler};
pub type FvmMessage = fvm_shared::message::Message;
#[derive(Clone)]
pub struct ValidatorContext<C> {
/// The secret key the validator uses to produce blocks.
secret_key: SecretKey,
/// The public key identifying the validator (corresponds to the secret key.)
public_key: PublicKey,
/// BLS secret key used for signing CETF tags
bls_secret_key: bls_signatures::PrivateKey,
/// BLS public key used for verifying CETF tags
_bls_public_key: bls_signatures::PublicKey,
/// Used to broadcast transactions. It might use a different secret key for
/// signing transactions than the validator's block producing key.
broadcaster: Broadcaster<C>,
}
impl<C> ValidatorContext<C> {
pub fn new(
secret_key: SecretKey,
bls_secret_key: bls_signatures::PrivateKey,
broadcaster: Broadcaster<C>,
) -> Self {
// Derive the public keys so it's available to check whether this node is a validator at any point in time.
let public_key = secret_key.public_key();
let bls_public_key = bls_secret_key.public_key();
Self {
secret_key,
public_key,
bls_secret_key,
_bls_public_key: bls_public_key,
broadcaster,
}
}
pub fn sign_tag(&self, tag: &[u8]) -> bls_signatures::Signature {
self.bls_secret_key.sign(tag)
}
}
/// Interpreter working on already verified unsigned messages.
#[derive(Clone)]
pub struct FvmMessageInterpreter<DB, C>
where
DB: Blockstore + 'static + Clone,
{
contracts: Hardhat,
/// Tendermint client for querying the RPC.
client: C,
/// If this is a validator node, this should be the key we can use to sign transactions.
validator_ctx: Option<ValidatorContext<C>>,
/// Overestimation rate applied to gas to ensure that the
/// message goes through in the gas estimation.
gas_overestimation_rate: f64,
/// Gas search step increase used to find the optimal gas limit.
/// It determines how fine-grained we want the gas estimation to be.
gas_search_step: f64,
/// Indicate whether transactions should be fully executed during the checks performed
/// when they are added to the mempool, or just the most basic ones are performed.
exec_in_check: bool,
/// Indicate whether the chain metadata should be pushed into the ledger.
push_chain_meta: bool,
gateway: GatewayCaller<DB>,
/// Upgrade scheduler stores all the upgrades to be executed at given heights.
upgrade_scheduler: UpgradeScheduler<DB>,
}
impl<DB, C> FvmMessageInterpreter<DB, C>
where
DB: Blockstore + 'static + Clone,
{
pub fn new(
client: C,
validator_ctx: Option<ValidatorContext<C>>,
contracts_dir: PathBuf,
gas_overestimation_rate: f64,
gas_search_step: f64,
exec_in_check: bool,
upgrade_scheduler: UpgradeScheduler<DB>,
) -> Self {
Self {
client,
validator_ctx,
contracts: Hardhat::new(contracts_dir),
gas_overestimation_rate,
gas_search_step,
exec_in_check,
push_chain_meta: true,
gateway: GatewayCaller::default(),
upgrade_scheduler,
}
}
pub fn with_push_chain_meta(mut self, push_chain_meta: bool) -> Self {
self.push_chain_meta = push_chain_meta;
self
}
}
impl<DB, C> FvmMessageInterpreter<DB, C>
where
DB: fvm_ipld_blockstore::Blockstore + 'static + Clone,
C: Client + Sync,
{
/// Indicate that the node is syncing with the rest of the network and hasn't caught up with the tip yet.
async fn syncing(&self) -> bool {
match self.client.status().await {
Ok(status) => status.sync_info.catching_up,
Err(e) => {
// CometBFT often takes a long time to boot, e.g. while it's replaying blocks it won't
// respond to JSON-RPC calls. Let's treat this as an indication that we are syncing.
tracing::warn!(error =? e, "failed to get CometBFT sync status");
true
}
}
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/interpreter/src/fvm/checkpoint.rs | fendermint/vm/interpreter/src/fvm/checkpoint.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::collections::HashMap;
use std::time::Duration;
use anyhow::{anyhow, Context};
use ethers::abi::Tokenizable;
use tendermint::block::Height;
use tendermint_rpc::endpoint::commit;
use tendermint_rpc::{endpoint::validators, Client, Paging};
use fvm_ipld_blockstore::Blockstore;
use fvm_shared::{address::Address, chainid::ChainID};
use fendermint_crypto::PublicKey;
use fendermint_crypto::SecretKey;
use fendermint_tracing::emit;
use fendermint_vm_actor_interface::eam::EthAddress;
use fendermint_vm_actor_interface::ipc::BottomUpCheckpoint;
use fendermint_vm_event::NewBottomUpCheckpoint;
use fendermint_vm_genesis::{Power, Validator, ValidatorKey};
use ipc_actors_abis::checkpointing_facet as checkpoint;
use ipc_actors_abis::gateway_getter_facet as getter;
use ipc_api::staking::ConfigurationNumber;
use super::state::ipc::tokens_to_burn;
use super::{
broadcast::Broadcaster,
state::{ipc::GatewayCaller, FvmExecState},
ValidatorContext,
};
/// Validator voting power snapshot.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct PowerTable(pub Vec<Validator<Power>>);
/// Changes in the power table.
#[derive(Debug, Clone, Default)]
pub struct PowerUpdates(pub Vec<Validator<Power>>);
/// Construct and store a checkpoint if this is the end of the checkpoint period.
/// Perform end-of-checkpoint-period transitions in the ledger.
///
/// If we are the boundary, return the validators eligible to sign and any updates
/// to the power table, along with the checkpoint that needs to be signed by validators.
pub fn maybe_create_checkpoint<DB>(
gateway: &GatewayCaller<DB>,
state: &mut FvmExecState<DB>,
) -> anyhow::Result<Option<(checkpoint::BottomUpCheckpoint, PowerUpdates)>>
where
DB: Blockstore + Sync + Send + Clone + 'static,
{
// Epoch transitions for checkpointing.
let height: tendermint::block::Height = state
.block_height()
.try_into()
.context("block height is not u64")?;
let block_hash = state
.block_hash()
.ok_or_else(|| anyhow!("block hash not set"))?;
let Some((msgs, subnet_id)) = should_create_checkpoint(gateway, state, height)? else {
return Ok(None);
};
// Get the current power table from the ledger, not CometBFT.
let (_, curr_power_table) =
ipc_power_table(gateway, state).context("failed to get the current power table")?;
// Apply any validator set transitions.
let next_configuration_number = gateway
.apply_validator_changes(state)
.context("failed to apply validator changes")?;
// Sum up the value leaving the subnet as part of the bottom-up messages.
let burnt_tokens = tokens_to_burn(&msgs);
// NOTE: Unlike when we minted tokens for the gateway by modifying its balance,
// we don't have to burn them here, because it's already being done in
// https://github.com/consensus-shipyard/ipc-solidity-actors/pull/263
// by sending the funds to the BURNTFUNDS_ACTOR.
// Ostensibly we could opt _not_ to decrease the circ supply here, but rather
// look up the burnt funds balance at the beginning of each block and subtract
// it from the monotonically increasing supply, in which case it could reflect
// a wider range of burning activity than just IPC.
// It might still be inconsistent if someone uses another address for burning tokens.
// By decreasing here, at least `circ_supply` is consistent with IPC.
state.update_circ_supply(|circ_supply| {
*circ_supply -= burnt_tokens;
});
let num_msgs = msgs.len();
// Construct checkpoint.
let checkpoint = BottomUpCheckpoint {
subnet_id,
block_height: ethers::types::U256::from(height.value()),
block_hash,
next_configuration_number,
msgs,
};
// Save the checkpoint in the ledger.
// Pass in the current power table, because these are the validators who can sign this checkpoint.
gateway
.create_bottom_up_checkpoint(state, checkpoint.clone(), &curr_power_table.0)
.context("failed to store checkpoint")?;
// Figure out the power updates if there was some change in the configuration.
let power_updates = if next_configuration_number == 0 {
PowerUpdates(Vec::new())
} else {
let (next_power_configuration_number, next_power_table) =
ipc_power_table(gateway, state).context("failed to get next power table")?;
debug_assert_eq!(next_power_configuration_number, next_configuration_number);
power_diff(curr_power_table, next_power_table)
};
emit!(NewBottomUpCheckpoint {
block_height: height.value(),
block_hash: &hex::encode(block_hash),
num_msgs,
next_configuration_number,
});
Ok(Some((checkpoint, power_updates)))
}
/// Wait until CometBFT has reached a specific block height.
///
/// This is used so we can wait for the next block where the ledger changes
/// we have done durign execution has been committed.
async fn wait_for_commit<C>(
client: &C,
block_height: u64,
retry_delay: Duration,
) -> anyhow::Result<()>
where
C: Client + Clone + Send + Sync + 'static,
{
loop {
let res: commit::Response = client
.latest_commit()
.await
.context("failed to fetch latest commit")?;
if res.signed_header.header().height.value() >= block_height {
return Ok(());
}
tokio::time::sleep(retry_delay).await;
}
}
/// Collect incomplete signatures from the ledger which this validator hasn't signed yet.
///
/// It doesn't check whether the validator should have signed it, that's done inside
/// [broadcast_incomplete_signatures] at the moment. The goal is rather to avoid double
/// signing for those who have already done it.
pub fn unsigned_checkpoints<DB>(
gateway: &GatewayCaller<DB>,
state: &mut FvmExecState<DB>,
validator_key: PublicKey,
) -> anyhow::Result<Vec<getter::BottomUpCheckpoint>>
where
DB: Blockstore + Send + Sync + Clone + 'static,
{
let mut unsigned_checkpoints = Vec::new();
let validator_addr = EthAddress::from(validator_key);
let incomplete_checkpoints = gateway
.incomplete_checkpoints(state)
.context("failed to fetch incomplete checkpoints")?;
for cp in incomplete_checkpoints {
let signatories = gateway
.checkpoint_signatories(state, cp.block_height.as_u64())
.context("failed to get checkpoint signatories")?;
if !signatories.contains(&validator_addr) {
unsigned_checkpoints.push(cp);
}
}
Ok(unsigned_checkpoints)
}
/// Sign the current and any incomplete checkpoints.
pub async fn broadcast_incomplete_signatures<C, DB>(
client: &C,
validator_ctx: &ValidatorContext<C>,
gateway: &GatewayCaller<DB>,
chain_id: ChainID,
incomplete_checkpoints: Vec<getter::BottomUpCheckpoint>,
) -> anyhow::Result<()>
where
C: Client + Clone + Send + Sync + 'static,
DB: Blockstore + Send + Sync + Clone + 'static,
{
// Make sure that these had time to be added to the ledger.
if let Some(highest) = incomplete_checkpoints
.iter()
.map(|cp| cp.block_height)
.max()
{
wait_for_commit(
client,
highest.as_u64() + 1,
validator_ctx.broadcaster.retry_delay(),
)
.await
.context("failed to wait for commit")?;
}
for cp in incomplete_checkpoints {
let height = Height::try_from(cp.block_height.as_u64())?;
// Getting the power table from CometBFT where the history is available.
let power_table = bft_power_table(client, height)
.await
.context("failed to get power table")?;
if let Some(validator) = power_table
.0
.iter()
.find(|v| v.public_key.0 == validator_ctx.public_key)
.cloned()
{
// TODO: Code generation in the ipc-solidity-actors repo should cater for this.
let checkpoint = checkpoint::BottomUpCheckpoint {
subnet_id: checkpoint::SubnetID {
root: cp.subnet_id.root,
route: cp.subnet_id.route,
},
block_height: cp.block_height,
block_hash: cp.block_hash,
next_configuration_number: cp.next_configuration_number,
msgs: convert_tokenizables(cp.msgs)?,
};
// We mustn't do these in parallel because of how nonces are fetched.
broadcast_signature(
&validator_ctx.broadcaster,
gateway,
checkpoint,
&power_table,
&validator,
&validator_ctx.secret_key,
chain_id,
)
.await
.context("failed to broadcast checkpoint signature")?;
tracing::debug!(?height, "submitted checkpoint signature");
}
}
Ok(())
}
/// As a validator, sign the checkpoint and broadcast a transaction to add our signature to the ledger.
pub async fn broadcast_signature<C, DB>(
broadcaster: &Broadcaster<C>,
gateway: &GatewayCaller<DB>,
checkpoint: checkpoint::BottomUpCheckpoint,
power_table: &PowerTable,
validator: &Validator<Power>,
secret_key: &SecretKey,
chain_id: ChainID,
) -> anyhow::Result<()>
where
C: Client + Clone + Send + Sync + 'static,
DB: Blockstore + Send + Sync + Clone + 'static,
{
let calldata = gateway
.add_checkpoint_signature_calldata(checkpoint, &power_table.0, validator, secret_key)
.context("failed to produce checkpoint signature calldata")?;
let tx_hash = broadcaster
.fevm_invoke(Address::from(gateway.addr()), calldata, chain_id)
.await
.context("failed to broadcast signature")?;
// The transaction should be in the mempool now.
tracing::info!(tx_hash = tx_hash.to_string(), "broadcasted signature");
Ok(())
}
fn convert_tokenizables<Source: Tokenizable, Target: Tokenizable>(
tokenizables: Vec<Source>,
) -> anyhow::Result<Vec<Target>> {
Ok(tokenizables
.into_iter()
.map(|t| Target::from_token(t.into_token()))
.collect::<Result<Vec<_>, _>>()?)
}
fn should_create_checkpoint<DB>(
gateway: &GatewayCaller<DB>,
state: &mut FvmExecState<DB>,
height: Height,
) -> anyhow::Result<Option<(Vec<checkpoint::IpcEnvelope>, checkpoint::SubnetID)>>
where
DB: Blockstore + Clone,
{
if !gateway.enabled(state)? {
return Ok(None);
}
let id = gateway.subnet_id(state)?;
let is_root = id.route.is_empty();
if is_root {
return Ok(None);
}
let batch = gateway.bottom_up_msg_batch(state, height.into())?;
if batch.block_height.as_u64() != 0 {
tracing::debug!(
height = height.value(),
"bottom up msg batch exists at height"
);
} else if height.value() % gateway.bottom_up_check_period(state)? == 0 {
tracing::debug!(
height = height.value(),
"bottom up checkpoint period reached height"
);
} else {
return Ok(None);
}
let id = checkpoint::SubnetID {
root: id.root,
route: id.route,
};
let msgs = convert_tokenizables(batch.msgs)?;
Ok(Some((msgs, id)))
}
/// Get the power table from CometBFT.
///
/// This is prone to failing, e.g. one theory is that CometBFT is trying to restart
/// the application, and while doing that it does not open up its HTTP services,
/// leading to a chicken-and-egg problem of failing to start.
pub(crate) async fn bft_power_table<C>(client: &C, height: Height) -> anyhow::Result<PowerTable>
where
C: Client + Sync + Send + 'static,
{
let mut power_table = Vec::new();
let validators: validators::Response = client.validators(height, Paging::All).await?;
for v in validators.validators {
power_table.push(Validator {
public_key: ValidatorKey::try_from(v.pub_key)?,
power: Power(v.power()),
});
}
Ok(PowerTable(power_table))
}
/// Get the current power table from the Gateway actor.
fn ipc_power_table<DB>(
gateway: &GatewayCaller<DB>,
state: &mut FvmExecState<DB>,
) -> anyhow::Result<(ConfigurationNumber, PowerTable)>
where
DB: Blockstore + Sync + Send + Clone + 'static,
{
gateway
.current_power_table(state)
.context("failed to get current power table")
.map(|(cn, pt)| (cn, PowerTable(pt)))
}
/// Calculate the difference between the current and the next power table, to return to CometBFT only what changed:
/// * include any new validator, or validators whose power has been updated
/// * include validators to be removed with a power of 0, as [expected](https://github.com/informalsystems/tendermint-rs/blob/bcc0b377812b8e53a02dff156988569c5b3c81a2/rpc/src/dialect/end_block.rs#L12-L14) by CometBFT
fn power_diff(current: PowerTable, next: PowerTable) -> PowerUpdates {
let current = into_power_map(current);
let next = into_power_map(next);
let mut diff = Vec::new();
// Validators in `current` but not in `next` should be removed.
for (k, v) in current.iter() {
if !next.contains_key(k) {
let delete = Validator {
public_key: v.public_key.clone(),
power: Power(0),
};
diff.push(delete);
}
}
// Validators in `next` that differ from `current` should be updated.
for (k, v) in next.into_iter() {
let insert = match current.get(&k) {
Some(w) if *w == v => None,
_ => Some(v),
};
if let Some(insert) = insert {
diff.push(insert);
}
}
PowerUpdates(diff)
}
/// Convert the power list to a `HashMap` to support lookups by the public key.
///
/// Unfortunately in their raw format the [`PublicKey`] does not implement `Hash`,
/// so we have to use the serialized format.
fn into_power_map(value: PowerTable) -> HashMap<[u8; 65], Validator<Power>> {
value
.0
.into_iter()
.map(|v| {
let k = v.public_key.0.serialize();
(k, v)
})
.collect()
}
#[cfg(test)]
mod tests {
use fendermint_vm_genesis::{Power, Validator};
use quickcheck_macros::quickcheck;
use crate::fvm::checkpoint::{into_power_map, power_diff};
use super::{PowerTable, PowerUpdates};
fn power_update(current: PowerTable, updates: PowerUpdates) -> PowerTable {
let mut current = into_power_map(current);
for v in updates.0 {
let k = v.public_key.0.serialize();
if v.power.0 == 0 {
current.remove(&k);
} else {
current.insert(k, v);
}
}
PowerTable(current.into_values().collect())
}
#[derive(Debug, Clone)]
struct TestPowerTables {
current: PowerTable,
next: PowerTable,
}
impl quickcheck::Arbitrary for TestPowerTables {
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
let v = 1 + usize::arbitrary(g) % 10;
let c = 1 + usize::arbitrary(g) % v;
let n = 1 + usize::arbitrary(g) % v;
let vs = (0..v).map(|_| Validator::arbitrary(g)).collect::<Vec<_>>();
let cvs = vs.iter().take(c).cloned().collect();
let nvs = vs
.into_iter()
.skip(v - n)
.map(|mut v| {
v.power = Power::arbitrary(g);
v
})
.collect();
TestPowerTables {
current: PowerTable(cvs),
next: PowerTable(nvs),
}
}
}
#[quickcheck]
fn prop_power_diff_update(powers: TestPowerTables) {
let diff = power_diff(powers.current.clone(), powers.next.clone());
let next = power_update(powers.current, diff);
// Order shouldn't matter.
let next = into_power_map(next);
let expected = into_power_map(powers.next);
assert_eq!(next, expected)
}
#[quickcheck]
fn prop_power_diff_nochange(v1: Validator<Power>, v2: Validator<Power>) {
let current = PowerTable(vec![v1.clone(), v2.clone()]);
let next = PowerTable(vec![v2, v1]);
assert!(power_diff(current, next).0.is_empty());
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/interpreter/src/fvm/query.rs | fendermint/vm/interpreter/src/fvm/query.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use async_trait::async_trait;
use cid::Cid;
use fendermint_vm_message::query::{ActorState, FvmQuery, GasEstimate, StateParams};
use fvm_ipld_blockstore::Blockstore;
use fvm_ipld_encoding::RawBytes;
use fvm_shared::{
bigint::BigInt, econ::TokenAmount, error::ExitCode, message::Message, ActorID, BLOCK_GAS_LIMIT,
};
use num_traits::Zero;
use crate::QueryInterpreter;
use super::{state::FvmQueryState, FvmApplyRet, FvmMessageInterpreter};
/// Internal return type for queries. It will never be serialized
/// and sent over the wire as it is, only its internal parts are
/// sent in the response. The client has to know what to expect,
/// depending on the kind of query it sent.
pub enum FvmQueryRet {
/// Bytes from the IPLD store result, if found.
Ipld(Option<Vec<u8>>),
/// The full state of an actor, if found.
ActorState(Option<Box<(ActorID, ActorState)>>),
/// The results of a read-only message application.
Call(FvmApplyRet),
/// The estimated gas limit.
EstimateGas(GasEstimate),
/// Current state parameters.
StateParams(StateParams),
/// Builtin actors known by the system.
BuiltinActors(Vec<(String, Cid)>),
}
#[async_trait]
impl<DB, TC> QueryInterpreter for FvmMessageInterpreter<DB, TC>
where
DB: Blockstore + 'static + Send + Sync + Clone,
TC: Send + Sync + 'static,
{
type State = FvmQueryState<DB>;
type Query = FvmQuery;
type Output = FvmQueryRet;
async fn query(
&self,
state: Self::State,
qry: Self::Query,
) -> anyhow::Result<(Self::State, Self::Output)> {
match qry {
FvmQuery::Ipld(cid) => {
let data = state.store_get(&cid)?;
tracing::info!(
height = state.block_height(),
pending = state.pending(),
cid = cid.to_string(),
found = data.is_some(),
"query IPLD"
);
let out = FvmQueryRet::Ipld(data);
Ok((state, out))
}
FvmQuery::ActorState(address) => {
let (state, ret) = state.actor_state(&address).await?;
tracing::info!(
height = state.block_height(),
pending = state.pending(),
addr = address.to_string(),
found = ret.is_some(),
"query actor state"
);
let out = FvmQueryRet::ActorState(ret.map(Box::new));
Ok((state, out))
}
FvmQuery::Call(msg) => {
let from = msg.from;
let to = msg.to;
let method_num = msg.method_num;
let gas_limit = msg.gas_limit;
// Do not stack effects
let (state, (apply_ret, emitters)) = state.call(*msg).await?;
tracing::info!(
height = state.block_height(),
pending = state.pending(),
to = to.to_string(),
from = from.to_string(),
method_num,
exit_code = apply_ret.msg_receipt.exit_code.value(),
data = hex::encode(apply_ret.msg_receipt.return_data.bytes()),
info = apply_ret
.failure_info
.as_ref()
.map(|i| i.to_string())
.unwrap_or_default(),
"query call"
);
let ret = FvmApplyRet {
apply_ret,
from,
to,
method_num,
gas_limit,
emitters,
};
let out = FvmQueryRet::Call(ret);
Ok((state, out))
}
FvmQuery::EstimateGas(mut msg) => {
tracing::info!(
height = state.block_height(),
pending = state.pending(),
to = msg.to.to_string(),
from = msg.from.to_string(),
method_num = msg.method_num,
"query estimate gas"
);
// Populate gas message parameters.
match self.estimate_gassed_msg(state, &mut msg).await? {
(state, Some(est)) => {
// return immediately if something is returned,
// it means that the message failed to execute so there's
// no point on estimating the gas.
Ok((state, FvmQueryRet::EstimateGas(est)))
}
(state, None) => {
// perform a gas search for an accurate value
let (state, mut est) = self.gas_search(state, &msg).await?;
// we need an additional overestimation for the case where
// the exact value is returned as part of the gas search
// (for some reason with subsequent calls sometimes this is the case).
est.gas_limit =
(est.gas_limit as f64 * self.gas_overestimation_rate) as u64;
Ok((state, FvmQueryRet::EstimateGas(est)))
}
}
}
FvmQuery::StateParams => {
let state_params = state.state_params();
let state_params = StateParams {
base_fee: state_params.base_fee.clone(),
circ_supply: state_params.circ_supply.clone(),
chain_id: state_params.chain_id,
network_version: state_params.network_version,
};
Ok((state, FvmQueryRet::StateParams(state_params)))
}
FvmQuery::BuiltinActors => {
let (state, ret) = state.builtin_actors().await?;
Ok((state, FvmQueryRet::BuiltinActors(ret)))
}
}
}
}
impl<DB, TC> FvmMessageInterpreter<DB, TC>
where
DB: Blockstore + 'static + Send + Sync + Clone,
{
async fn estimate_gassed_msg(
&self,
state: FvmQueryState<DB>,
msg: &mut Message,
) -> anyhow::Result<(FvmQueryState<DB>, Option<GasEstimate>)> {
// Setting BlockGasLimit as initial limit for gas estimation
msg.gas_limit = BLOCK_GAS_LIMIT;
// With unlimited gas we are probably better off setting the prices to zero.
let gas_premium = msg.gas_premium.clone();
let gas_fee_cap = msg.gas_fee_cap.clone();
msg.gas_premium = TokenAmount::zero();
msg.gas_fee_cap = TokenAmount::zero();
// estimate the gas limit and assign it to the message
// revert any changes because we'll repeat the estimation
let (state, (ret, _)) = state.call(msg.clone()).await?;
tracing::debug!(
gas_used = ret.msg_receipt.gas_used,
exit_code = ret.msg_receipt.exit_code.value(),
"estimated gassed message"
);
if !ret.msg_receipt.exit_code.is_success() {
// if the message fail we can't estimate the gas.
return Ok((
state,
Some(GasEstimate {
exit_code: ret.msg_receipt.exit_code,
info: ret.failure_info.map(|x| x.to_string()).unwrap_or_default(),
return_data: ret.msg_receipt.return_data,
gas_limit: 0,
}),
));
}
msg.gas_limit = (ret.msg_receipt.gas_used as f64 * self.gas_overestimation_rate) as u64;
if gas_premium.is_zero() {
// We need to set the gas_premium to some value other than zero for the
// gas estimation to work accurately (I really don't know why this is
// the case but after a lot of testing, setting this value to zero rejects the transaction)
msg.gas_premium = TokenAmount::from_nano(BigInt::from(1));
} else {
msg.gas_premium = gas_premium;
}
// Same for the gas_fee_cap, not setting the fee cap leads to the message
// being sent after the estimation to fail.
if gas_fee_cap.is_zero() {
// TODO: In Lotus historical values of the base fee and a more accurate overestimation is performed
// for the fee cap. If we issues with messages going through let's consider the historical analysis.
// For now we are disregarding the base_fee so I don't think this is needed here.
// Filecoin clamps the gas premium at GasFeeCap - BaseFee, if lower than the
// specified premium. Returns 0 if GasFeeCap is less than BaseFee.
// see https://spec.filecoin.io/#section-systems.filecoin_vm.message.message-semantic-validation
msg.gas_fee_cap = msg.gas_premium.clone();
} else {
msg.gas_fee_cap = gas_fee_cap;
}
Ok((state, None))
}
// This function performs a simpler implementation of the gas search than the one used in Lotus.
// Instead of using historical information of the gas limit for other messages, it searches
// for a valid gas limit for the current message in isolation.
async fn gas_search(
&self,
mut state: FvmQueryState<DB>,
msg: &Message,
) -> anyhow::Result<(FvmQueryState<DB>, GasEstimate)> {
let mut curr_limit = msg.gas_limit;
loop {
let (st, est) = self
.estimation_call_with_limit(state, msg.clone(), curr_limit)
.await?;
if let Some(est) = est {
return Ok((st, est));
} else {
state = st;
}
curr_limit = (curr_limit as f64 * self.gas_search_step) as u64;
if curr_limit > BLOCK_GAS_LIMIT {
let est = GasEstimate {
exit_code: ExitCode::OK,
info: "".to_string(),
return_data: RawBytes::default(),
gas_limit: BLOCK_GAS_LIMIT,
};
return Ok((state, est));
}
}
// TODO: For a more accurate gas estimation we could track the low and the high
// of the search and make higher steps (e.g. `GAS_SEARCH_STEP = 2`).
// Once an interval is found of [low, high] for which the message
// succeeds, we make a finer-grained within that interval.
// At this point, I don't think is worth being that accurate as long as it works.
}
async fn estimation_call_with_limit(
&self,
state: FvmQueryState<DB>,
mut msg: Message,
limit: u64,
) -> anyhow::Result<(FvmQueryState<DB>, Option<GasEstimate>)> {
msg.gas_limit = limit;
// set message nonce to zero so the right one is picked up
msg.sequence = 0;
let (state, (apply_ret, _)) = state.call(msg).await?;
let ret = GasEstimate {
exit_code: apply_ret.msg_receipt.exit_code,
info: apply_ret
.failure_info
.map(|x| x.to_string())
.unwrap_or_default(),
return_data: apply_ret.msg_receipt.return_data,
gas_limit: apply_ret.msg_receipt.gas_used,
};
// if the message succeeded or failed with a different error than `SYS_OUT_OF_GAS`,
// immediately return as we either succeeded finding the right gas estimation,
// or something non-related happened.
if ret.exit_code == ExitCode::OK || ret.exit_code != ExitCode::SYS_OUT_OF_GAS {
return Ok((state, Some(ret)));
}
Ok((state, None))
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/interpreter/src/fvm/custom_kernel.rs | fendermint/vm/interpreter/src/fvm/custom_kernel.rs | // Copyright 2021-2023 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use ambassador::Delegate;
use cid::Cid;
use fvm::call_manager::CallManager;
use fvm::gas::Gas;
use fvm::kernel::prelude::*;
use fvm::kernel::Result;
use fvm::kernel::{
ActorOps, CryptoOps, DebugOps, EventOps, IpldBlockOps, MessageOps, NetworkOps, RandomnessOps,
SelfOps, SendOps, SyscallHandler, UpgradeOps,
};
use fvm::syscalls::Linker;
use fvm::DefaultKernel;
use fvm_shared::clock::ChainEpoch;
use fvm_shared::randomness::RANDOMNESS_LENGTH;
use fvm_shared::sys::out::network::NetworkContext;
use fvm_shared::sys::out::vm::MessageContext;
use fvm_shared::{address::Address, econ::TokenAmount, ActorID, MethodNum};
use regex::Regex;
#[derive(Delegate)]
#[delegate(IpldBlockOps, where = "C: CallManager")]
#[delegate(ActorOps, where = "C: CallManager")]
#[delegate(CryptoOps, where = "C: CallManager")]
#[delegate(EventOps, where = "C: CallManager")]
#[delegate(MessageOps, where = "C: CallManager")]
#[delegate(NetworkOps, where = "C: CallManager")]
#[delegate(RandomnessOps, where = "C: CallManager")]
#[delegate(SelfOps, where = "C: CallManager")]
#[delegate(SendOps<K>, generics = "K", where = "K: Kernel")]
#[delegate(UpgradeOps<K>, generics = "K", where = "K: Kernel")]
pub struct LoggingKernel<C>(pub DefaultKernel<C>);
impl<C> Kernel for LoggingKernel<C>
where
C: CallManager,
{
type CallManager = C;
type Limiter = <DefaultKernel<C> as Kernel>::Limiter;
fn into_inner(self) -> (Self::CallManager, BlockRegistry)
where
Self: Sized,
{
self.0.into_inner()
}
fn new(
mgr: C,
blocks: BlockRegistry,
caller: ActorID,
actor_id: ActorID,
method: MethodNum,
value_received: TokenAmount,
read_only: bool,
) -> Self {
LoggingKernel(DefaultKernel::new(
mgr,
blocks,
caller,
actor_id,
method,
value_received,
read_only,
))
}
fn machine(&self) -> &<Self::CallManager as CallManager>::Machine {
self.0.machine()
}
fn limiter_mut(&mut self) -> &mut Self::Limiter {
self.0.limiter_mut()
}
fn gas_available(&self) -> Gas {
self.0.gas_available()
}
fn charge_gas(&self, name: &str, compute: Gas) -> Result<GasTimer> {
self.0.charge_gas(name, compute)
}
}
impl<K> SyscallHandler<K> for LoggingKernel<K::CallManager>
where
K: Kernel
+ ActorOps
+ IpldBlockOps
+ SendOps
+ UpgradeOps
+ CryptoOps
+ DebugOps
+ EventOps
+ MessageOps
+ NetworkOps
+ RandomnessOps
+ SelfOps,
{
fn link_syscalls(linker: &mut Linker<K>) -> anyhow::Result<()> {
DefaultKernel::link_syscalls(linker)
}
}
impl<C> DebugOps for LoggingKernel<C>
where
C: CallManager,
{
fn log(&self, msg: String) {
let (level, actor_name, actor_id, message) = parse_log(&msg).unwrap();
if level == "INFO" {
tracing::info!("Actor {}({}) - {}", actor_name, actor_id, message);
} else if level == "DEBUG" {
tracing::debug!("Actor {}({}) - {}", actor_name, actor_id, message);
} else if level == "WARN" {
tracing::warn!("Actor {}({}) - {}", actor_name, actor_id, message);
} else if level == "ERROR" {
tracing::error!("Actor {}({}) - {}", actor_name, actor_id, message);
}
}
fn debug_enabled(&self) -> bool {
self.0.debug_enabled()
}
fn store_artifact(&self, name: &str, data: &[u8]) -> Result<()> {
self.0.store_artifact(name, data)
}
}
fn parse_log(log: &str) -> Option<(String, String, i32, String)> {
let re = Regex::new(r"(?s)\[(.*?)\]<(.*?)::(\d+)> (.*)").unwrap();
if let Some(captures) = re.captures(log) {
let first_string = captures.get(1)?.as_str().to_string();
let second_string = captures.get(2)?.as_str().to_string();
let number: i32 = captures.get(3)?.as_str().parse().ok()?;
let fourth_string = captures.get(4)?.as_str().to_string();
Some((first_string, second_string, number, fourth_string))
} else {
None
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/interpreter/src/fvm/exec.rs | fendermint/vm/interpreter/src/fvm/exec.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use anyhow::Context;
use async_trait::async_trait;
use fvm_ipld_encoding::CborStore;
use std::collections::HashMap;
use tendermint::block::Height;
use fendermint_vm_actor_interface::{
cetf::{CETFSYSCALL_ACTOR_ADDR, CETFSYSCALL_ACTOR_ID},
chainmetadata, cron, system,
};
use fvm::executor::ApplyRet;
use fvm_ipld_blockstore::Blockstore;
use fvm_shared::{address::Address, ActorID, MethodNum, BLOCK_GAS_LIMIT};
use tendermint_rpc::Client;
use crate::ExecInterpreter;
use super::{
checkpoint::{self, bft_power_table, PowerUpdates},
state::FvmExecState,
FvmMessage, FvmMessageInterpreter,
};
/// The return value extended with some things from the message that
/// might not be available to the caller, because of the message lookups
/// and transformations that happen along the way, e.g. where we need
/// a field, we might just have a CID.
pub struct FvmApplyRet {
pub apply_ret: ApplyRet,
pub from: Address,
pub to: Address,
pub method_num: MethodNum,
pub gas_limit: u64,
/// Delegated addresses of event emitters, if they have one.
pub emitters: HashMap<ActorID, Address>,
}
#[async_trait]
impl<DB, TC> ExecInterpreter for FvmMessageInterpreter<DB, TC>
where
DB: Blockstore + Clone + 'static + Send + Sync,
TC: Client + Clone + Send + Sync + 'static,
{
type State = FvmExecState<DB>;
type Message = FvmMessage;
type BeginOutput = FvmApplyRet;
type DeliverOutput = FvmApplyRet;
/// Return validator power updates.
/// Currently ignoring events as there aren't any emitted by the smart contract,
/// but keep in mind that if there were, those would have to be propagated.
type EndOutput = PowerUpdates;
async fn begin(
&self,
mut state: Self::State,
) -> anyhow::Result<(Self::State, Self::BeginOutput)> {
// Block height (FVM epoch) as sequence is intentional
let height = state.block_height();
// check for upgrades in the upgrade_scheduler
let chain_id = state.chain_id();
let block_height: u64 = state.block_height().try_into().unwrap();
if let Some(upgrade) = self.upgrade_scheduler.get(chain_id, block_height) {
// TODO: consider using an explicit tracing enum for upgrades
tracing::info!(?chain_id, height = block_height, "Executing an upgrade");
// there is an upgrade scheduled for this height, lets run the migration
let res = upgrade.execute(&mut state).context("upgrade failed")?;
if let Some(new_app_version) = res {
state.update_app_version(|app_version| {
*app_version = new_app_version;
});
tracing::info!(app_version = state.app_version(), "upgraded app version");
}
}
// Arbitrarily large gas limit for cron (matching how Forest does it, which matches Lotus).
// XXX: Our blocks are not necessarily expected to be 30 seconds apart, so the gas limit might be wrong.
let gas_limit = BLOCK_GAS_LIMIT * 10000;
let from = system::SYSTEM_ACTOR_ADDR;
let to = cron::CRON_ACTOR_ADDR;
let method_num = cron::Method::EpochTick as u64;
// Cron.
let msg = FvmMessage {
from,
to,
sequence: height as u64,
gas_limit,
method_num,
params: Default::default(),
value: Default::default(),
version: Default::default(),
gas_fee_cap: Default::default(),
gas_premium: Default::default(),
};
let (apply_ret, emitters) = state.execute_implicit(msg)?;
// Failing cron would be fatal.
if let Some(err) = apply_ret.failure_info {
anyhow::bail!("failed to apply block cron message: {}", err);
}
// Push the current block hash to the chainmetadata actor
if self.push_chain_meta {
if let Some(block_hash) = state.block_hash() {
let params = fvm_ipld_encoding::RawBytes::serialize(
fendermint_actor_chainmetadata::PushBlockParams {
epoch: height,
block: block_hash,
},
)?;
let msg = FvmMessage {
from: system::SYSTEM_ACTOR_ADDR,
to: chainmetadata::CHAINMETADATA_ACTOR_ADDR,
sequence: height as u64,
gas_limit,
method_num: fendermint_actor_chainmetadata::Method::PushBlockHash as u64,
params,
value: Default::default(),
version: Default::default(),
gas_fee_cap: Default::default(),
gas_premium: Default::default(),
};
let (apply_ret, _) = state.execute_implicit(msg)?;
if let Some(err) = apply_ret.failure_info {
anyhow::bail!("failed to apply chainmetadata message: {}", err);
}
}
}
if state.block_height() as u64 > 0 {
let bft_keys =
bft_power_table(&self.client, Height::try_from(state.block_height() as u64)?)
.await
.context("failed to get power table")?
.0
.into_iter()
.map(|k| k.public_key.0)
.map(|pk| {
Address::new_secp256k1(&pk.serialize())
.context("failed to turn TM public key into address")
})
.collect::<anyhow::Result<Vec<_>>>()?;
let (is_enabled, registered_keys) = {
if let Some(act_st) = state.state_tree().get_actor(CETFSYSCALL_ACTOR_ID)? {
let st: fendermint_actor_cetf::State =
state.state_tree().store().get_cbor(&act_st.state)?.unwrap();
Ok((
st.enabled,
st.get_validators_keymap(state.state_tree().store())?,
))
} else {
Err(anyhow::anyhow!("no CETF actor found!"))
}
}?;
if !is_enabled {
let mut to_enable = true;
for key in bft_keys {
to_enable &= registered_keys.contains_key(&key)?;
}
if to_enable {
tracing::info!("All keys found in map! Enabling CETF actor");
let msg = FvmMessage {
from: system::SYSTEM_ACTOR_ADDR,
to: CETFSYSCALL_ACTOR_ADDR,
sequence: height as u64,
gas_limit,
method_num: fendermint_actor_cetf::Method::Enable as u64,
params: Default::default(),
value: Default::default(),
version: Default::default(),
gas_fee_cap: Default::default(),
gas_premium: Default::default(),
};
let (apply_ret, _) = state.execute_implicit(msg)?;
if let Some(err) = apply_ret.failure_info {
anyhow::bail!("Failed to apply cetf message: {}", err);
} else {
tracing::info!("CETF actor enable successful");
}
}
}
}
let ret = FvmApplyRet {
apply_ret,
from,
to,
method_num,
gas_limit,
emitters,
};
Ok((state, ret))
}
async fn deliver(
&self,
mut state: Self::State,
msg: Self::Message,
) -> anyhow::Result<(Self::State, Self::DeliverOutput)> {
let from = msg.from;
let to = msg.to;
let method_num = msg.method_num;
let gas_limit = msg.gas_limit;
let (apply_ret, emitters) = if from == system::SYSTEM_ACTOR_ADDR {
state.execute_implicit(msg)?
} else {
state.execute_explicit(msg)?
};
tracing::info!(
height = state.block_height(),
from = from.to_string(),
to = to.to_string(),
method_num = method_num,
exit_code = apply_ret.msg_receipt.exit_code.value(),
gas_used = apply_ret.msg_receipt.gas_used,
"tx delivered"
);
let ret = FvmApplyRet {
apply_ret,
from,
to,
method_num,
gas_limit,
emitters,
};
Ok((state, ret))
}
async fn end(&self, mut state: Self::State) -> anyhow::Result<(Self::State, Self::EndOutput)> {
let updates = if let Some((checkpoint, updates)) =
checkpoint::maybe_create_checkpoint(&self.gateway, &mut state)
.context("failed to create checkpoint")?
{
// Asynchronously broadcast signature, if validating.
if let Some(ref ctx) = self.validator_ctx {
// Do not resend past signatures.
if !self.syncing().await {
// Fetch any incomplete checkpoints synchronously because the state can't be shared across threads.
let incomplete_checkpoints =
checkpoint::unsigned_checkpoints(&self.gateway, &mut state, ctx.public_key)
.context("failed to fetch incomplete checkpoints")?;
debug_assert!(
incomplete_checkpoints
.iter()
.any(|cp| cp.block_height == checkpoint.block_height
&& cp.block_hash == checkpoint.block_hash),
"the current checkpoint is incomplete"
);
let client = self.client.clone();
let gateway = self.gateway.clone();
let chain_id = state.chain_id();
let height = checkpoint.block_height;
let validator_ctx = ctx.clone();
tokio::spawn(async move {
let res = checkpoint::broadcast_incomplete_signatures(
&client,
&validator_ctx,
&gateway,
chain_id,
incomplete_checkpoints,
)
.await;
if let Err(e) = res {
tracing::error!(error =? e, height = height.as_u64(), "error broadcasting checkpoint signature");
}
});
}
}
updates
} else {
PowerUpdates::default()
};
Ok((state, updates))
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/interpreter/src/fvm/topdown.rs | fendermint/vm/interpreter/src/fvm/topdown.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! Topdown finality related util functions
use crate::chain::TopDownFinalityProvider;
use crate::fvm::state::ipc::GatewayCaller;
use crate::fvm::state::FvmExecState;
use crate::fvm::FvmApplyRet;
use anyhow::Context;
use fendermint_vm_topdown::{BlockHeight, IPCParentFinality, ParentViewProvider};
use fvm_ipld_blockstore::Blockstore;
use ipc_api::cross::IpcEnvelope;
use super::state::ipc::tokens_to_mint;
/// Commit the parent finality. Returns the height that the previous parent finality is committed and
/// the committed finality itself. If there is no parent finality committed, genesis epoch is returned.
pub async fn commit_finality<DB>(
gateway_caller: &GatewayCaller<DB>,
state: &mut FvmExecState<DB>,
finality: IPCParentFinality,
provider: &TopDownFinalityProvider,
) -> anyhow::Result<(BlockHeight, Option<IPCParentFinality>)>
where
DB: Blockstore + Sync + Send + Clone + 'static,
{
let (prev_height, prev_finality) =
if let Some(prev_finality) = gateway_caller.commit_parent_finality(state, finality)? {
(prev_finality.height, Some(prev_finality))
} else {
(provider.genesis_epoch()?, None)
};
tracing::debug!(
"commit finality parsed: prev_height {prev_height}, prev_finality: {prev_finality:?}"
);
Ok((prev_height, prev_finality))
}
/// Execute the top down messages implicitly. Before the execution, mint to the gateway of the funds
/// transferred in the messages, and increase the circulating supply with the incoming value.
pub async fn execute_topdown_msgs<DB>(
gateway_caller: &GatewayCaller<DB>,
state: &mut FvmExecState<DB>,
messages: Vec<IpcEnvelope>,
) -> anyhow::Result<FvmApplyRet>
where
DB: Blockstore + Sync + Send + Clone + 'static,
{
let minted_tokens = tokens_to_mint(&messages);
tracing::debug!(token = minted_tokens.to_string(), "tokens to mint in child");
if !minted_tokens.is_zero() {
gateway_caller
.mint_to_gateway(state, minted_tokens.clone())
.context("failed to mint to gateway")?;
state.update_circ_supply(|circ_supply| {
*circ_supply += minted_tokens;
});
}
gateway_caller.apply_cross_messages(state, messages)
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/interpreter/src/fvm/state/genesis.rs | fendermint/vm/interpreter/src/fvm/state/genesis.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::sync::Arc;
use anyhow::{anyhow, bail, Context};
use cid::{multihash::Code, Cid};
use ethers::{abi::Tokenize, core::abi::Abi};
use fendermint_actors::Manifest as CustomActorManifest;
use fendermint_vm_actor_interface::{
account::{self, ACCOUNT_ACTOR_CODE_ID},
eam::{self, EthAddress},
ethaccount::ETHACCOUNT_ACTOR_CODE_ID,
evm,
init::{self, builtin_actor_eth_addr},
multisig::{self, MULTISIG_ACTOR_CODE_ID},
system, EMPTY_ARR,
};
use fendermint_vm_core::Timestamp;
use fendermint_vm_genesis::{Account, Multisig, PowerScale};
use fvm::{
engine::MultiEngine,
machine::Manifest,
state_tree::{ActorState, StateTree},
};
use fvm_ipld_blockstore::Blockstore;
use fvm_ipld_car::load_car_unchecked;
use fvm_ipld_encoding::{BytesDe, CborStore, RawBytes};
use fvm_shared::{
address::{Address, Payload},
clock::ChainEpoch,
econ::TokenAmount,
message::Message,
state::StateTreeVersion,
version::NetworkVersion,
ActorID, BLOCK_GAS_LIMIT, METHOD_CONSTRUCTOR,
};
use num_traits::Zero;
use serde::{de, Serialize};
use super::{exec::MachineBlockstore, FvmExecState, FvmStateParams};
/// Create an empty state tree.
pub fn empty_state_tree<DB: Blockstore>(store: DB) -> anyhow::Result<StateTree<DB>> {
let state_tree = StateTree::new(store, StateTreeVersion::V5)?;
Ok(state_tree)
}
/// Initially we can only set up an empty state tree.
/// Then we have to create the built-in actors' state that the FVM relies on.
/// Then we can instantiate an FVM execution engine, which we can use to construct FEVM based actors.
enum Stage<DB: Blockstore + Clone + 'static> {
Tree(StateTree<DB>),
Exec(FvmExecState<DB>),
}
/// A state we create for the execution of genesis initialisation.
pub struct FvmGenesisState<DB>
where
DB: Blockstore + Clone + 'static,
{
pub manifest_data_cid: Cid,
pub manifest: Manifest,
pub custom_actor_manifest: CustomActorManifest,
store: DB,
multi_engine: Arc<MultiEngine>,
stage: Stage<DB>,
}
async fn parse_bundle<DB: Blockstore>(store: &DB, bundle: &[u8]) -> anyhow::Result<(u32, Cid)> {
let bundle_roots = load_car_unchecked(&store, bundle).await?;
let bundle_root = match bundle_roots.as_slice() {
[root] => root,
roots => {
return Err(anyhow!(
"expected one root in builtin actor bundle; got {}",
roots.len()
))
}
};
let (manifest_version, manifest_data_cid): (u32, Cid) = match store.get_cbor(bundle_root)? {
Some(vd) => vd,
None => {
return Err(anyhow!(
"no manifest information in bundle root {}",
bundle_root
))
}
};
Ok((manifest_version, manifest_data_cid))
}
impl<DB> FvmGenesisState<DB>
where
DB: Blockstore + Clone + 'static,
{
pub async fn new(
store: DB,
multi_engine: Arc<MultiEngine>,
bundle: &[u8],
custom_actor_bundle: &[u8],
) -> anyhow::Result<Self> {
// Load the builtin actor bundle.
let (manifest_version, manifest_data_cid): (u32, Cid) =
parse_bundle(&store, bundle).await?;
let manifest = Manifest::load(&store, &manifest_data_cid, manifest_version)?;
// Load the custom actor bundle.
let (custom_manifest_version, custom_manifest_data_cid): (u32, Cid) =
parse_bundle(&store, custom_actor_bundle).await?;
let custom_actor_manifest =
CustomActorManifest::load(&store, &custom_manifest_data_cid, custom_manifest_version)?;
let state_tree = empty_state_tree(store.clone())?;
let state = Self {
manifest_data_cid,
manifest,
custom_actor_manifest,
store,
multi_engine,
stage: Stage::Tree(state_tree),
};
Ok(state)
}
/// Instantiate the execution state, once the basic genesis parameters are known.
///
/// This must be called before we try to instantiate any EVM actors in genesis.
pub fn init_exec_state(
&mut self,
timestamp: Timestamp,
network_version: NetworkVersion,
base_fee: TokenAmount,
circ_supply: TokenAmount,
chain_id: u64,
power_scale: PowerScale,
) -> anyhow::Result<()> {
self.stage = match self.stage {
Stage::Exec(_) => bail!("execution engine already initialized"),
Stage::Tree(ref mut state_tree) => {
// We have to flush the data at this point.
let state_root = state_tree.flush()?;
let params = FvmStateParams {
state_root,
timestamp,
network_version,
base_fee,
circ_supply,
chain_id,
power_scale,
app_version: 0,
};
let exec_state =
FvmExecState::new(self.store.clone(), &self.multi_engine, 1, params)
.context("failed to create exec state")?;
Stage::Exec(exec_state)
}
};
Ok(())
}
/// Flush the data to the block store.
pub fn commit(self) -> anyhow::Result<Cid> {
match self.stage {
Stage::Tree(mut state_tree) => Ok(state_tree.flush()?),
Stage::Exec(exec_state) => match exec_state.commit()? {
(_, _, true) => bail!("FVM parameters are not expected to be updated in genesis"),
(cid, _, _) => Ok(cid),
},
}
}
/// Replaces the built in actor with custom actor. This assumes the system actor is already
/// created, else it would throw an error.
pub fn replace_builtin_actor(
&mut self,
built_in_actor_name: &str,
built_in_actor_id: ActorID,
custom_actor_name: &str,
state: &impl Serialize,
balance: TokenAmount,
delegated_address: Option<Address>,
) -> anyhow::Result<()> {
let code_cid = self
.update_system_actor_manifest(built_in_actor_name, custom_actor_name)
.context("failed to replace system actor manifest")?;
self.create_actor_internal(
code_cid,
built_in_actor_id,
state,
balance,
delegated_address,
)
}
/// Update the manifest id of the system actor, returns the code cid of the replacing
/// custom actor.
fn update_system_actor_manifest(
&mut self,
built_in_actor_name: &str,
custom_actor_name: &str,
) -> anyhow::Result<Cid> {
let code = *self
.custom_actor_manifest
.code_by_name(custom_actor_name)
.ok_or_else(|| anyhow!("replacement {custom_actor_name} actor not found"))?;
let manifest_cid = self
.get_actor_state::<system::State>(system::SYSTEM_ACTOR_ID)?
.builtin_actors;
let mut built_in_actors: Vec<(String, Cid)> = self
.store()
.get_cbor(&manifest_cid)
.context("could not load built in actors")?
.ok_or_else(|| anyhow!("cannot find manifest cid {}", manifest_cid))?;
for (_, code_cid) in built_in_actors
.iter_mut()
.filter(|(n, _)| n == built_in_actor_name)
{
*code_cid = code
}
let builtin_actors = self.put_state(built_in_actors)?;
let new_cid = self.put_state(system::State { builtin_actors })?;
let mutate = |actor_state: &mut ActorState| {
actor_state.state = new_cid;
Ok(())
};
self.with_state_tree(
|s| s.mutate_actor(system::SYSTEM_ACTOR_ID, mutate),
|s| s.mutate_actor(system::SYSTEM_ACTOR_ID, mutate),
)?;
Ok(code)
}
pub fn create_builtin_actor(
&mut self,
code_id: u32,
id: ActorID,
state: &impl Serialize,
balance: TokenAmount,
delegated_address: Option<Address>,
) -> anyhow::Result<()> {
// Retrieve the CID of the actor code by the numeric ID.
let code_cid = *self
.manifest
.code_by_id(code_id)
.ok_or_else(|| anyhow!("can't find {code_id} in the manifest"))?;
self.create_actor_internal(code_cid, id, state, balance, delegated_address)
}
pub fn create_custom_actor(
&mut self,
name: &str,
id: ActorID,
state: &impl Serialize,
balance: TokenAmount,
delegated_address: Option<Address>,
) -> anyhow::Result<()> {
// Retrieve the CID of the actor code by the numeric ID.
let code_cid = *self
.custom_actor_manifest
.code_by_name(name)
.ok_or_else(|| anyhow!("can't find actor: {name} in the custom actor manifest"))?;
self.create_actor_internal(code_cid, id, state, balance, delegated_address)
}
/// Creates an actor using code specified in the manifest.
fn create_actor_internal(
&mut self,
code_cid: Cid,
id: ActorID,
state: &impl Serialize,
balance: TokenAmount,
delegated_address: Option<Address>,
) -> anyhow::Result<()> {
let state_cid = self.put_state(state)?;
let actor_state = ActorState {
code: code_cid,
state: state_cid,
sequence: 0,
balance,
delegated_address,
};
self.with_state_tree(
|s| s.set_actor(id, actor_state.clone()),
|s| s.set_actor(id, actor_state.clone()),
);
{
let cid = self.with_state_tree(|s| s.flush(), |s| s.flush())?;
tracing::debug!(
state_root = cid.to_string(),
actor_id = id,
"interim state root after actor creation"
);
}
Ok(())
}
pub fn create_account_actor(
&mut self,
acct: Account,
balance: TokenAmount,
ids: &init::AddressMap,
) -> anyhow::Result<()> {
let owner = acct.owner.0;
let id = ids
.get(&owner)
.ok_or_else(|| anyhow!("can't find ID for owner {owner}"))?;
match owner.payload() {
Payload::Secp256k1(_) => {
let state = account::State { address: owner };
self.create_builtin_actor(ACCOUNT_ACTOR_CODE_ID, *id, &state, balance, None)
}
Payload::Delegated(d) if d.namespace() == eam::EAM_ACTOR_ID => {
let state = EMPTY_ARR;
// NOTE: Here we could use the placeholder code ID as well.
self.create_builtin_actor(
ETHACCOUNT_ACTOR_CODE_ID,
*id,
&state,
balance,
Some(owner),
)
}
other => Err(anyhow!("unexpected actor owner: {other:?}")),
}
}
pub fn create_multisig_actor(
&mut self,
ms: Multisig,
balance: TokenAmount,
ids: &init::AddressMap,
next_id: ActorID,
) -> anyhow::Result<()> {
let mut signers = Vec::new();
// Make sure every signer has their own account.
for signer in ms.signers {
let id = ids
.get(&signer.0)
.ok_or_else(|| anyhow!("can't find ID for signer {}", signer.0))?;
if self
.with_state_tree(|s| s.get_actor(*id), |s| s.get_actor(*id))?
.is_none()
{
self.create_account_actor(Account { owner: signer }, TokenAmount::zero(), ids)?;
}
signers.push(*id)
}
// Now create a multisig actor that manages group transactions.
let state = multisig::State::new(
self.store(),
signers,
ms.threshold,
ms.vesting_start as ChainEpoch,
ms.vesting_duration as ChainEpoch,
balance.clone(),
)?;
self.create_builtin_actor(MULTISIG_ACTOR_CODE_ID, next_id, &state, balance, None)
}
/// Deploy an EVM contract with a fixed ID and some constructor arguments.
///
/// Returns the hashed Ethereum address we can use to invoke the contract.
pub fn create_evm_actor_with_cons<T: Tokenize>(
&mut self,
id: ActorID,
abi: &Abi,
bytecode: Vec<u8>,
constructor_params: T,
) -> anyhow::Result<EthAddress> {
let constructor = abi
.constructor()
.ok_or_else(|| anyhow!("contract doesn't have a constructor"))?;
let initcode = constructor
.encode_input(bytecode, &constructor_params.into_tokens())
.context("failed to encode constructor input")?;
self.create_evm_actor(id, initcode)
}
/// Deploy an EVM contract.
///
/// Returns the hashed Ethereum address we can use to invoke the contract.
pub fn create_evm_actor(
&mut self,
id: ActorID,
initcode: Vec<u8>,
) -> anyhow::Result<EthAddress> {
// Here we are circumventing the normal way of creating an actor through the EAM and jump ahead to what the `Init` actor would do:
// https://github.com/filecoin-project/builtin-actors/blob/421855a7b968114ac59422c1faeca968482eccf4/actors/init/src/lib.rs#L97-L107
// Based on how the EAM constructs it.
let params = evm::ConstructorParams {
// We have to pick someone as creator for these quasi built-in types.
creator: EthAddress::from_id(system::SYSTEM_ACTOR_ID),
initcode: RawBytes::from(initcode),
};
let params = RawBytes::serialize(params)?;
// When a contract is constructed the EVM actor verifies that it has an Ethereum delegated address.
// This has been inserted into the Init actor state as well.
let f0_addr = Address::new_id(id);
let f4_addr = Address::from(builtin_actor_eth_addr(id));
let msg = Message {
version: 0,
from: init::INIT_ACTOR_ADDR, // asserted by the constructor
to: f0_addr,
sequence: 0, // We will use implicit execution which doesn't check or modify this.
value: TokenAmount::zero(),
method_num: METHOD_CONSTRUCTOR,
params,
gas_limit: BLOCK_GAS_LIMIT,
gas_fee_cap: TokenAmount::zero(),
gas_premium: TokenAmount::zero(),
};
// Create an empty actor to receive the call.
self.create_builtin_actor(
evm::EVM_ACTOR_CODE_ID,
id,
&EMPTY_ARR,
TokenAmount::zero(),
Some(f4_addr),
)
.context("failed to create empty actor")?;
let (apply_ret, _) = match self.stage {
Stage::Tree(_) => bail!("execution engine not initialized"),
Stage::Exec(ref mut exec_state) => exec_state
.execute_implicit(msg)
.context("failed to execute message")?,
};
{
let cid = self.with_state_tree(|s| s.flush(), |s| s.flush())?;
tracing::debug!(
state_root = cid.to_string(),
actor_id = id,
"interim state root after EVM actor initialisation"
);
}
if !apply_ret.msg_receipt.exit_code.is_success() {
let error_data = apply_ret.msg_receipt.return_data;
let error_data = if error_data.is_empty() {
Vec::new()
} else {
// The EVM actor might return some revert in the output.
error_data
.deserialize::<BytesDe>()
.map(|bz| bz.0)
.context("failed to deserialize error data")?
};
bail!(
"failed to deploy EVM actor: code = {}; data = 0x{}; info = {:?}",
apply_ret.msg_receipt.exit_code,
hex::encode(error_data),
apply_ret.failure_info,
);
}
let addr: [u8; 20] = match f4_addr.payload() {
Payload::Delegated(addr) => addr.subaddress().try_into().expect("hash is 20 bytes"),
other => panic!("not an f4 address: {other:?}"),
};
Ok(EthAddress(addr))
}
pub fn store(&self) -> &DB {
&self.store
}
pub fn exec_state(&mut self) -> Option<&mut FvmExecState<DB>> {
match self.stage {
Stage::Tree(_) => None,
Stage::Exec(ref mut exec) => Some(exec),
}
}
pub fn into_exec_state(self) -> Result<FvmExecState<DB>, Self> {
match self.stage {
Stage::Tree(_) => Err(self),
Stage::Exec(exec) => Ok(exec),
}
}
fn put_state(&mut self, state: impl Serialize) -> anyhow::Result<Cid> {
self.store()
.put_cbor(&state, Code::Blake2b256)
.context("failed to store actor state")
}
/// A horrible way of unifying the state tree under the two different stages.
///
/// We only use this a few times, so perhaps it's not that much of a burden to duplicate some code.
fn with_state_tree<F, G, T>(&mut self, f: F, g: G) -> T
where
F: FnOnce(&mut StateTree<DB>) -> T,
G: FnOnce(&mut StateTree<MachineBlockstore<DB>>) -> T,
{
match self.stage {
Stage::Tree(ref mut state_tree) => f(state_tree),
Stage::Exec(ref mut exec_state) => g(exec_state.state_tree_mut()),
}
}
/// Query the actor state from the state tree under the two different stages.
fn get_actor_state<T: de::DeserializeOwned>(&self, actor: ActorID) -> anyhow::Result<T> {
let actor_state_cid = match &self.stage {
Stage::Tree(s) => s.get_actor(actor)?,
Stage::Exec(s) => s.state_tree().get_actor(actor)?,
}
.ok_or_else(|| anyhow!("actor state {actor} not found, is it deployed?"))?
.state;
self.store()
.get_cbor(&actor_state_cid)
.context("failed to get actor state by state cid")?
.ok_or_else(|| anyhow!("actor state by {actor_state_cid} not found"))
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/interpreter/src/fvm/state/check.rs | fendermint/vm/interpreter/src/fvm/state/check.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use anyhow::{anyhow, Context};
use cid::Cid;
use fendermint_vm_core::chainid::HasChainID;
use fvm::state_tree::StateTree;
use fvm_ipld_blockstore::Blockstore;
use fvm_shared::chainid::ChainID;
use crate::fvm::store::ReadOnlyBlockstore;
/// A state we create for the execution of all the messages in a block.
pub struct FvmCheckState<DB>
where
DB: Blockstore + Clone + 'static,
{
state_tree: StateTree<ReadOnlyBlockstore<DB>>,
chain_id: ChainID,
}
impl<DB> FvmCheckState<DB>
where
DB: Blockstore + Clone + 'static,
{
pub fn new(blockstore: DB, state_root: Cid, chain_id: ChainID) -> anyhow::Result<Self> {
// Sanity check that the blockstore contains the supplied state root.
if !blockstore
.has(&state_root)
.context("failed to load initial state-root")?
{
return Err(anyhow!(
"blockstore doesn't have the initial state-root {}",
state_root
));
}
// Create a new state tree from the supplied root.
let state_tree = {
let bstore = ReadOnlyBlockstore::new(blockstore);
StateTree::new_from_root(bstore, &state_root)?
};
let state = Self {
state_tree,
chain_id,
};
Ok(state)
}
pub fn state_tree_mut(&mut self) -> &mut StateTree<ReadOnlyBlockstore<DB>> {
&mut self.state_tree
}
}
impl<DB> HasChainID for FvmCheckState<DB>
where
DB: Blockstore + Clone + 'static,
{
fn chain_id(&self) -> ChainID {
self.chain_id
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/interpreter/src/fvm/state/cetf.rs | fendermint/vm/interpreter/src/fvm/state/cetf.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use crate::fvm::store::ReadOnlyBlockstore;
use anyhow::anyhow;
use cid::Cid;
use fendermint_actor_cetf::{BlockHeight, Tag};
use fendermint_vm_actor_interface::cetf::CETFSYSCALL_ACTOR_ID;
use fvm::state_tree::StateTree;
use fvm_ipld_blockstore::Blockstore;
use fvm_ipld_encoding::CborStore;
/// Reads the CETF system actor state to retrieve the tag
pub fn get_tag_at_height<DB: Blockstore + Clone + 'static>(
db: DB,
state_root: &Cid,
height: BlockHeight,
) -> anyhow::Result<Option<Tag>> {
let bstore = ReadOnlyBlockstore::new(db);
let state_tree = StateTree::new_from_root(&bstore, state_root)?;
// get the actor state cid
let actor_state_cid = match state_tree.get_actor(CETFSYSCALL_ACTOR_ID) {
Ok(Some(actor_state)) => actor_state.state,
Ok(None) => {
return Err(anyhow!(
"CETF actor id ({}) not found in state",
CETFSYSCALL_ACTOR_ID
));
}
Err(err) => {
return Err(anyhow!(
"failed to get CETF actor ({}) state, error: {}",
CETFSYSCALL_ACTOR_ID,
err
));
}
};
// get the actor state from the blockstore
let actor_state: fendermint_actor_cetf::State =
match state_tree.store().get_cbor(&actor_state_cid) {
Ok(Some(v)) => v,
Ok(None) => {
return Err(anyhow!(
"CETF actor ({}) state not found",
CETFSYSCALL_ACTOR_ID
));
}
Err(err) => {
return Err(anyhow!(
"failed to get CETF actor ({}) state, error: {}",
CETFSYSCALL_ACTOR_ID,
err
));
}
};
Ok(actor_state.get_tag_at_height(&bstore, &height)?)
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/interpreter/src/fvm/state/mod.rs | fendermint/vm/interpreter/src/fvm/state/mod.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
pub mod cetf;
mod check;
mod exec;
pub mod fevm;
mod genesis;
pub mod ipc;
mod query;
pub mod snapshot;
use std::sync::Arc;
pub use check::FvmCheckState;
pub use exec::{BlockHash, FvmExecState, FvmStateParams, FvmUpdatableParams};
pub use genesis::{empty_state_tree, FvmGenesisState};
pub use query::FvmQueryState;
use super::store::ReadOnlyBlockstore;
/// We use full state even for checking, to support certain client scenarios.
pub type CheckStateRef<DB> = Arc<tokio::sync::Mutex<Option<FvmExecState<ReadOnlyBlockstore<DB>>>>>;
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/interpreter/src/fvm/state/query.rs | fendermint/vm/interpreter/src/fvm/state/query.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::collections::HashMap;
use std::{cell::RefCell, sync::Arc};
use anyhow::{anyhow, Context};
use cid::Cid;
use fendermint_vm_actor_interface::system::{
is_system_addr, State as SystemState, SYSTEM_ACTOR_ADDR,
};
use fendermint_vm_core::chainid::HasChainID;
use fendermint_vm_message::query::ActorState;
use fvm::engine::MultiEngine;
use fvm::executor::ApplyRet;
use fvm::state_tree::StateTree;
use fvm_ipld_blockstore::Blockstore;
use fvm_ipld_encoding::CborStore;
use fvm_shared::{address::Address, chainid::ChainID, clock::ChainEpoch, ActorID};
use num_traits::Zero;
use serde::de;
use crate::fvm::{store::ReadOnlyBlockstore, FvmMessage};
use super::{CheckStateRef, FvmExecState, FvmStateParams};
/// The state over which we run queries. These can interrogate the IPLD block store or the state tree.
pub struct FvmQueryState<DB>
where
DB: CborStore + Blockstore + Clone + 'static,
{
/// A read-only wrapper around the blockstore, to make sure we aren't
/// accidentally committing any state. Any writes by the FVM will be
/// buffered; as long as we don't call `flush()` we should be fine.
store: ReadOnlyBlockstore<DB>,
/// Multi-engine for potential message execution.
multi_engine: Arc<MultiEngine>,
/// Height of block at which we are executing the queries.
block_height: ChainEpoch,
/// State at the height we want to query.
state_params: FvmStateParams,
/// Lazy loaded execution state.
exec_state: RefCell<Option<FvmExecState<ReadOnlyBlockstore<DB>>>>,
/// Lazy locked check state.
check_state: CheckStateRef<DB>,
/// Whether to try ot use the check state or not.
pending: bool,
}
impl<DB> FvmQueryState<DB>
where
DB: Blockstore + Clone + 'static,
{
pub fn new(
blockstore: DB,
multi_engine: Arc<MultiEngine>,
block_height: ChainEpoch,
state_params: FvmStateParams,
check_state: CheckStateRef<DB>,
pending: bool,
) -> anyhow::Result<Self> {
// Sanity check that the blockstore contains the supplied state root.
if !blockstore
.has(&state_params.state_root)
.context("failed to load state-root")?
{
return Err(anyhow!(
"blockstore doesn't have the state-root {}",
state_params.state_root
));
}
let state = Self {
store: ReadOnlyBlockstore::new(blockstore),
multi_engine,
block_height,
state_params,
exec_state: RefCell::new(None),
check_state,
pending,
};
Ok(state)
}
/// Do not make the changes in the call persistent. They should be run on top of
/// transactions added to the mempool, but they can run independent of each other.
///
/// There is no way to specify stacking in the API and only transactions should modify things.
fn with_revert<T, F>(
&self,
exec_state: &mut FvmExecState<ReadOnlyBlockstore<DB>>,
f: F,
) -> anyhow::Result<T>
where
F: FnOnce(&mut FvmExecState<ReadOnlyBlockstore<DB>>) -> anyhow::Result<T>,
{
exec_state.state_tree_mut().begin_transaction();
let res = f(exec_state);
exec_state
.state_tree_mut()
.end_transaction(true)
.expect("we just started a transaction");
res
}
/// If we know the query is over the state, cache the state tree.
async fn with_exec_state<T, F>(self, f: F) -> anyhow::Result<(Self, T)>
where
F: FnOnce(&mut FvmExecState<ReadOnlyBlockstore<DB>>) -> anyhow::Result<T>,
{
if self.pending {
// XXX: This will block all `check_tx` from going through and also all other queries.
let mut guard = self.check_state.lock().await;
if let Some(ref mut exec_state) = *guard {
let res = self.with_revert(exec_state, f);
drop(guard);
return res.map(|r| (self, r));
}
}
// Not using pending, or there is no pending state.
let mut cache = self.exec_state.borrow_mut();
if let Some(exec_state) = cache.as_mut() {
let res = self.with_revert(exec_state, f);
drop(cache);
return res.map(|r| (self, r));
}
let mut exec_state = FvmExecState::new(
self.store.clone(),
self.multi_engine.as_ref(),
self.block_height,
self.state_params.clone(),
)
.context("error creating execution state")?;
let res = self.with_revert(&mut exec_state, f);
*cache = Some(exec_state);
drop(cache);
res.map(|r| (self, r))
}
/// Read a CID from the underlying IPLD store.
pub fn store_get(&self, key: &Cid) -> anyhow::Result<Option<Vec<u8>>> {
self.store.get(key)
}
pub fn store_get_cbor<T: de::DeserializeOwned>(&self, key: &Cid) -> anyhow::Result<Option<T>> {
self.store.get_cbor(key)
}
pub fn read_only_store(&self) -> &ReadOnlyBlockstore<DB> {
&self.store
}
/// Get the state of an actor, if it exists.
pub async fn actor_state(
self,
addr: &Address,
) -> anyhow::Result<(Self, Option<(ActorID, ActorState)>)> {
self.with_exec_state(|exec_state| {
let state_tree = exec_state.state_tree_mut();
get_actor_state(state_tree, addr)
})
.await
}
/// Run a "read-only" message.
///
/// The results are never going to be flushed, so it's semantically read-only,
/// but it might write into the buffered block store the FVM creates. Running
/// multiple such messages results in their buffered effects stacking up,
/// unless it's called with `revert`.
pub async fn call(
self,
mut msg: FvmMessage,
) -> anyhow::Result<(Self, (ApplyRet, HashMap<u64, Address>))> {
self.with_exec_state(|s| {
// If the sequence is zero, treat it as a signal to use whatever is in the state.
if msg.sequence.is_zero() {
let state_tree = s.state_tree_mut();
if let Some(id) = state_tree.lookup_id(&msg.from)? {
state_tree.get_actor(id)?.map(|st| {
msg.sequence = st.sequence;
st
});
}
}
// If the gas_limit is zero, set it to the block gas limit so that call will not hit
// gas limit not set error. It is possible, in the future, to estimate the gas limit
// based on the account balance and base fee + premium for higher accuracy.
if msg.gas_limit == 0 {
msg.gas_limit = fvm_shared::BLOCK_GAS_LIMIT;
}
if is_system_addr(&msg.from) {
// Explicit execution requires `from` to be an account kind.
s.execute_implicit(msg)
} else {
s.execute_explicit(msg)
}
})
.await
}
pub fn state_params(&self) -> &FvmStateParams {
&self.state_params
}
/// Returns the registry of built-in actors as enrolled in the System actor.
pub async fn builtin_actors(self) -> anyhow::Result<(Self, Vec<(String, Cid)>)> {
let (s, sys_state) = {
let (s, state) = self.actor_state(&SYSTEM_ACTOR_ADDR).await?;
(s, state.ok_or(anyhow!("no system actor"))?.1)
};
let state: SystemState = s
.store
.get_cbor(&sys_state.state)
.context("failed to get system state")?
.ok_or(anyhow!("system actor state not found"))?;
let ret = s
.store
.get_cbor(&state.builtin_actors)
.context("failed to get builtin actors manifest")?
.ok_or(anyhow!("builtin actors manifest not found"))?;
Ok((s, ret))
}
pub fn block_height(&self) -> ChainEpoch {
self.block_height
}
pub fn pending(&self) -> bool {
self.pending
}
}
impl<DB> HasChainID for FvmQueryState<DB>
where
DB: Blockstore + Clone + 'static,
{
fn chain_id(&self) -> ChainID {
ChainID::from(self.state_params.chain_id)
}
}
fn get_actor_state<DB>(
state_tree: &StateTree<DB>,
addr: &Address,
) -> anyhow::Result<Option<(ActorID, ActorState)>>
where
DB: Blockstore,
{
if let Some(id) = state_tree.lookup_id(addr)? {
Ok(state_tree.get_actor(id)?.map(|st| {
let st = ActorState {
code: st.code,
state: st.state,
sequence: st.sequence,
balance: st.balance,
delegated_address: st.delegated_address,
};
(id, st)
}))
} else {
Ok(None)
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/interpreter/src/fvm/state/snapshot.rs | fendermint/vm/interpreter/src/fvm/state/snapshot.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use crate::fvm::state::FvmStateParams;
use crate::fvm::store::ReadOnlyBlockstore;
use anyhow::anyhow;
use cid::multihash::{Code, MultihashDigest};
use cid::Cid;
use futures_core::Stream;
use fvm::state_tree::StateTree;
use fvm_ipld_blockstore::Blockstore;
use fvm_ipld_car::{load_car, load_car_unchecked, CarHeader};
use fvm_ipld_encoding::{from_slice, CborStore, DAG_CBOR};
use libipld::Ipld;
use serde::{Deserialize, Serialize};
use std::collections::VecDeque;
use std::path::Path;
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio_stream::StreamExt;
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
pub type BlockHeight = u64;
pub type SnapshotVersion = u32;
/// Taking snapshot of the current blockchain state
pub enum Snapshot<BS> {
V1(V1Snapshot<BS>),
}
/// Contains the overall metadata for the snapshot
#[derive(Serialize, Deserialize)]
struct SnapshotMetadata {
version: u8,
data_root_cid: Cid,
}
/// The streamer that streams the snapshot into (Cid, Vec<u8>) for car file.
type SnapshotStreamer = Box<dyn Send + Unpin + Stream<Item = (Cid, Vec<u8>)>>;
impl<BS> Snapshot<BS>
where
BS: Blockstore + 'static + Send + Clone,
{
pub fn new(
store: BS,
state_params: FvmStateParams,
block_height: BlockHeight,
) -> anyhow::Result<Self> {
Ok(Self::V1(V1Snapshot::new(
store,
state_params,
block_height,
)?))
}
pub fn version(&self) -> SnapshotVersion {
match self {
Snapshot::V1(_) => 1,
}
}
/// Read the snapshot from file and load all the data into the store
pub async fn read_car(
path: impl AsRef<Path>,
store: BS,
validate: bool,
) -> anyhow::Result<Self> {
let file = tokio::fs::File::open(path).await?;
let roots = if validate {
load_car(&store, file.compat()).await?
} else {
load_car_unchecked(&store, file.compat()).await?
};
if roots.len() != 1 {
return Err(anyhow!("invalid snapshot, should have 1 root cid"));
}
let metadata_cid = roots[0];
let metadata = if let Some(metadata) = store.get_cbor::<SnapshotMetadata>(&metadata_cid)? {
metadata
} else {
return Err(anyhow!("invalid snapshot, metadata not found"));
};
match metadata.version {
1 => Ok(Self::V1(V1Snapshot::from_root(
store,
metadata.data_root_cid,
)?)),
v => Err(anyhow!("unknown snapshot version: {v}")),
}
}
/// Write the snapshot to car file.
///
/// The root cid points to the metadata, i.e `SnapshotMetadata` struct. From the snapshot metadata
/// one can query the version and root data cid. Based on the version, one can parse the underlying
/// data of the snapshot from the root cid.
pub async fn write_car(self, path: impl AsRef<Path>) -> anyhow::Result<()> {
let file = tokio::fs::File::create(path).await?;
// derive the metadata for the car file, so that the snapshot version can be recorded.
let (metadata, snapshot_streamer) = self.into_streamer()?;
let (metadata_cid, metadata_bytes) = derive_cid(&metadata)?;
// create the target car header with the metadata cid as the only root
let car = CarHeader::new(vec![metadata_cid], 1);
// create the stream to stream all the data into the car file
let mut streamer =
tokio_stream::iter(vec![(metadata_cid, metadata_bytes)]).merge(snapshot_streamer);
let write_task = tokio::spawn(async move {
let mut write = file.compat_write();
car.write_stream_async(&mut Pin::new(&mut write), &mut streamer)
.await
});
write_task.await??;
Ok(())
}
fn into_streamer(self) -> anyhow::Result<(SnapshotMetadata, SnapshotStreamer)> {
match self {
Snapshot::V1(inner) => {
let (data_root_cid, streamer) = inner.into_streamer()?;
Ok((
SnapshotMetadata {
version: 1,
data_root_cid,
},
streamer,
))
}
}
}
}
pub struct V1Snapshot<BS> {
/// The state tree of the current blockchain
state_tree: StateTree<ReadOnlyBlockstore<BS>>,
state_params: FvmStateParams,
block_height: BlockHeight,
}
pub type BlockStateParams = (FvmStateParams, BlockHeight);
impl<BS> V1Snapshot<BS>
where
BS: Blockstore + 'static + Send + Clone,
{
/// Creates a new V2Snapshot struct. Caller ensure store
pub fn new(
store: BS,
state_params: FvmStateParams,
block_height: BlockHeight,
) -> anyhow::Result<Self> {
let state_tree =
StateTree::new_from_root(ReadOnlyBlockstore::new(store), &state_params.state_root)?;
Ok(Self {
state_tree,
state_params,
block_height,
})
}
fn from_root(store: BS, root_cid: Cid) -> anyhow::Result<Self> {
if let Some((state_params, block_height)) = store.get_cbor::<BlockStateParams>(&root_cid)? {
let state_tree_root = state_params.state_root;
Ok(Self {
state_tree: StateTree::new_from_root(
ReadOnlyBlockstore::new(store),
&state_tree_root,
)?,
state_params,
block_height,
})
} else {
Err(anyhow!(
"invalid v1 snapshot, root cid not found: {}",
root_cid
))
}
}
fn into_streamer(self) -> anyhow::Result<(Cid, SnapshotStreamer)> {
let state_tree_root = self.state_params.state_root;
let block_state_params = (self.state_params, self.block_height);
let bytes = fvm_ipld_encoding::to_vec(&block_state_params)?;
let root_cid = Cid::new_v1(DAG_CBOR, Code::Blake2b256.digest(&bytes));
let state_tree_streamer =
StateTreeStreamer::new(state_tree_root, self.state_tree.into_store());
let root_streamer = tokio_stream::iter(vec![(root_cid, bytes)]);
let streamer: SnapshotStreamer = Box::new(state_tree_streamer.merge(root_streamer));
Ok((root_cid, streamer))
}
pub fn block_height(&self) -> BlockHeight {
self.block_height
}
pub fn state_params(&self) -> &FvmStateParams {
&self.state_params
}
}
#[pin_project::pin_project]
struct StateTreeStreamer<BS> {
/// The list of cids to pull from the blockstore
#[pin]
dfs: VecDeque<Cid>,
/// The block store
bs: BS,
}
impl<BS> StateTreeStreamer<BS> {
pub fn new(state_root_cid: Cid, bs: BS) -> Self {
let mut dfs = VecDeque::new();
dfs.push_back(state_root_cid);
Self { dfs, bs }
}
}
impl<BS: Blockstore> Stream for StateTreeStreamer<BS> {
type Item = (Cid, Vec<u8>);
fn poll_next(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut this = self.project();
loop {
let cid = if let Some(cid) = this.dfs.pop_front() {
cid
} else {
return Poll::Ready(None);
};
match this.bs.get(&cid) {
Ok(Some(bytes)) => {
// Not all data in the blockstore is traversable, e.g.
// Wasm bytecode is inserted as IPLD_RAW here: https://github.com/filecoin-project/builtin-actors-bundler/blob/bf6847b2276ee8e4e17f8336f2eb5ab2fce1d853/src/lib.rs#L54C71-L54C79
if cid.codec() == DAG_CBOR {
// XXX: Is it okay to panic?
let ipld =
from_slice::<Ipld>(&bytes).expect("blocktore stores IPLD encoded data");
walk_ipld_cids(ipld, &mut this.dfs);
}
return Poll::Ready(Some((cid, bytes)));
}
Ok(None) => {
tracing::debug!("cid: {cid:?} has no value in block store, skip");
continue;
}
Err(e) => {
tracing::error!("cannot get from block store: {}", e.to_string());
// TODO: consider returning Result, but it won't work with `car.write_stream_async`.
return Poll::Ready(None);
}
}
}
}
}
fn walk_ipld_cids(ipld: Ipld, dfs: &mut VecDeque<Cid>) {
match ipld {
Ipld::List(v) => {
for i in v {
walk_ipld_cids(i, dfs);
}
}
Ipld::Map(map) => {
for v in map.into_values() {
walk_ipld_cids(v, dfs);
}
}
Ipld::Link(cid) => dfs.push_back(cid),
_ => {}
}
}
fn derive_cid<T: Serialize>(t: &T) -> anyhow::Result<(Cid, Vec<u8>)> {
let bytes = fvm_ipld_encoding::to_vec(&t)?;
let cid = Cid::new_v1(DAG_CBOR, Code::Blake2b256.digest(&bytes));
Ok((cid, bytes))
}
#[cfg(test)]
mod tests {
use crate::fvm::state::snapshot::{Snapshot, StateTreeStreamer};
use crate::fvm::state::FvmStateParams;
use crate::fvm::store::memory::MemoryBlockstore;
use crate::fvm::store::ReadOnlyBlockstore;
use cid::Cid;
use fendermint_vm_core::Timestamp;
use futures_util::StreamExt;
use fvm::state_tree::{ActorState, StateTree};
use fvm_ipld_blockstore::Blockstore;
use fvm_shared::state::StateTreeVersion;
use fvm_shared::version::NetworkVersion;
use quickcheck::{Arbitrary, Gen};
use std::collections::VecDeque;
fn prepare_state_tree(items: u64) -> (Cid, StateTree<MemoryBlockstore>) {
let store = MemoryBlockstore::new();
let mut state_tree = StateTree::new(store, StateTreeVersion::V5).unwrap();
let mut gen = Gen::new(16);
for i in 1..=items {
let state = ActorState::arbitrary(&mut gen);
state_tree.set_actor(i, state);
}
let root_cid = state_tree.flush().unwrap();
(root_cid, state_tree)
}
fn assert_tree2_contains_tree1<Store1: Blockstore, Store2: Blockstore>(
tree1: &StateTree<Store1>,
tree2: &StateTree<Store2>,
) {
tree1
.for_each(|addr, state| {
let r = tree2.get_actor_by_address(&addr);
if r.is_err() {
panic!("addr: {addr:?} does not exists in tree 2");
}
if let Some(target_state) = r.unwrap() {
assert_eq!(target_state, *state);
} else {
panic!("missing address: {addr:?}");
}
Ok(())
})
.unwrap();
}
#[tokio::test]
async fn test_streamer() {
let (root_cid, state_tree) = prepare_state_tree(100);
let bs = state_tree.into_store();
let mut stream = StateTreeStreamer {
dfs: VecDeque::from(vec![root_cid]),
bs: bs.clone(),
};
let new_bs = MemoryBlockstore::new();
while let Some((cid, bytes)) = stream.next().await {
new_bs.put_keyed(&cid, &bytes).unwrap();
}
let new_state_tree = StateTree::new_from_root(new_bs, &root_cid).unwrap();
let old_state_tree = StateTree::new_from_root(bs, &root_cid).unwrap();
assert_tree2_contains_tree1(&old_state_tree, &new_state_tree);
assert_tree2_contains_tree1(&new_state_tree, &old_state_tree);
}
#[tokio::test]
async fn test_car() {
let (state_root, state_tree) = prepare_state_tree(100);
let state_params = FvmStateParams {
state_root,
timestamp: Timestamp(100),
network_version: NetworkVersion::V1,
base_fee: Default::default(),
circ_supply: Default::default(),
chain_id: 1024,
power_scale: 0,
app_version: 0,
};
let block_height = 2048;
let bs = state_tree.into_store();
let db = ReadOnlyBlockstore::new(bs.clone());
let snapshot = Snapshot::new(db, state_params.clone(), block_height).unwrap();
let tmp_file = tempfile::NamedTempFile::new().unwrap();
let r = snapshot.write_car(tmp_file.path()).await;
assert!(r.is_ok());
let new_store = MemoryBlockstore::new();
let Snapshot::V1(loaded_snapshot) = Snapshot::read_car(tmp_file.path(), new_store, true)
.await
.unwrap();
assert_eq!(state_params, loaded_snapshot.state_params);
assert_eq!(block_height, loaded_snapshot.block_height);
assert_tree2_contains_tree1(
&StateTree::new_from_root(bs, &loaded_snapshot.state_params.state_root).unwrap(),
&loaded_snapshot.state_tree,
);
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/interpreter/src/fvm/state/fevm.rs | fendermint/vm/interpreter/src/fvm/state/fevm.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::any::type_name;
use std::fmt::Debug;
use std::{marker::PhantomData, sync::Arc};
use crate::fvm::FvmApplyRet;
use anyhow::{anyhow, bail, Context};
use ethers::abi::{AbiDecode, AbiEncode, Detokenize};
use ethers::core::types as et;
use ethers::prelude::{decode_function_data, ContractRevert};
use ethers::providers as ep;
use fendermint_vm_actor_interface::{eam::EthAddress, evm, system};
use fendermint_vm_message::conv::from_eth;
use fvm::executor::ApplyFailure;
use fvm_ipld_blockstore::Blockstore;
use fvm_ipld_encoding::{BytesDe, BytesSer, RawBytes};
use fvm_shared::{address::Address, econ::TokenAmount, error::ExitCode, message::Message};
use super::FvmExecState;
pub type MockProvider = ep::Provider<ep::MockProvider>;
pub type MockContractCall<T> = ethers::prelude::ContractCall<MockProvider, T>;
/// Result of trying to decode the data returned in failures as reverts.
///
/// The `E` type is supposed to be the enum unifying all errors that the contract can emit.
#[derive(Clone)]
pub enum ContractError<E> {
/// The contract reverted with one of the expected custom errors.
Revert(E),
/// Some other error occurred that we could not decode.
Raw(Vec<u8>),
}
/// Error returned by calling a contract.
#[derive(Clone, Debug)]
pub struct CallError<E> {
pub exit_code: ExitCode,
pub failure_info: Option<ApplyFailure>,
pub error: ContractError<E>,
}
impl<E> std::fmt::Debug for ContractError<E>
where
E: std::fmt::Debug,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ContractError::Revert(e) => write!(f, "{}:{:?}", type_name::<E>(), e),
ContractError::Raw(bz) if bz.is_empty() => {
write!(f, "<no data; potential ABI mismatch>")
}
ContractError::Raw(bz) => write!(f, "0x{}", hex::encode(bz)),
}
}
}
pub struct ContractCallerReturn<T> {
ret: FvmApplyRet,
call: MockContractCall<T>,
}
impl<T: Detokenize> ContractCallerReturn<T> {
pub fn into_decoded(self) -> anyhow::Result<T> {
let data = self
.ret
.apply_ret
.msg_receipt
.return_data
.deserialize::<BytesDe>()
.context("failed to deserialize return data")?;
let value = decode_function_data(&self.call.function, data.0, false)
.context("failed to decode bytes")?;
Ok(value)
}
pub fn into_return(self) -> FvmApplyRet {
self.ret
}
}
pub type ContractResult<T, E> = Result<T, CallError<E>>;
/// Type we can use if a contract does not return revert errors, e.g. because it's all read-only views.
#[derive(Clone)]
pub struct NoRevert;
impl ContractRevert for NoRevert {
fn valid_selector(_selector: et::Selector) -> bool {
false
}
}
impl AbiDecode for NoRevert {
fn decode(_bytes: impl AsRef<[u8]>) -> Result<Self, ethers::contract::AbiError> {
unimplemented!("selector doesn't match anything")
}
}
impl AbiEncode for NoRevert {
fn encode(self) -> Vec<u8> {
unimplemented!("selector doesn't match anything")
}
}
impl std::fmt::Debug for NoRevert {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "contract not expected to revert")
}
}
/// Facilitate calling FEVM contracts through their Ethers ABI bindings by
/// 1. serializing parameters,
/// 2. sending a message to the FVM, and
/// 3. deserializing the return value
///
/// Example:
/// ```no_run
/// use fendermint_vm_actor_interface::{eam::EthAddress, ipc::GATEWAY_ACTOR_ID};
/// use ipc_actors_abis::gateway_getter_facet::GatewayGetterFacet;
/// # use fendermint_vm_interpreter::fvm::state::fevm::{ContractCaller, NoRevert};
/// # use fendermint_vm_interpreter::fvm::state::FvmExecState;
/// # use fendermint_vm_interpreter::fvm::store::memory::MemoryBlockstore as DB;
///
/// let caller: ContractCaller<_, _, NoRevert> = ContractCaller::new(
/// EthAddress::from_id(GATEWAY_ACTOR_ID),
/// GatewayGetterFacet::new
/// );
///
/// let mut state: FvmExecState<DB> = todo!();
///
/// let _period: u64 = caller.call(&mut state, |c| c.bottom_up_check_period()).unwrap().as_u64();
/// ```
#[derive(Clone)]
pub struct ContractCaller<DB, C, E> {
addr: Address,
contract: C,
store: PhantomData<DB>,
error: PhantomData<E>,
}
impl<DB, C, E> ContractCaller<DB, C, E> {
/// Create a new contract caller with the contract's Ethereum address and ABI bindings:
pub fn new<F>(addr: EthAddress, contract: F) -> Self
where
F: FnOnce(et::Address, Arc<MockProvider>) -> C,
{
let (client, _mock) = ep::Provider::mocked();
let contract = contract(addr.into(), std::sync::Arc::new(client));
Self {
addr: Address::from(addr),
contract,
store: PhantomData,
error: PhantomData,
}
}
/// Get a reference to the wrapped contract to construct messages without callign anything.
pub fn contract(&self) -> &C {
&self.contract
}
}
impl<DB, C, E> ContractCaller<DB, C, E>
where
DB: Blockstore + Clone,
E: ContractRevert + Debug,
{
/// Call an EVM method implicitly to read its return value.
///
/// Returns an error if the return code shows is not successful;
/// intended to be used with methods that are expected succeed.
pub fn call<T, F>(&self, state: &mut FvmExecState<DB>, f: F) -> anyhow::Result<T>
where
F: FnOnce(&C) -> MockContractCall<T>,
T: Detokenize,
{
self.call_with_return(state, f)?.into_decoded()
}
/// Call an EVM method implicitly to read its raw return value.
///
/// Returns an error if the return code shows is not successful;
/// intended to be used with methods that are expected succeed.
pub fn call_with_return<T, F>(
&self,
state: &mut FvmExecState<DB>,
f: F,
) -> anyhow::Result<ContractCallerReturn<T>>
where
F: FnOnce(&C) -> MockContractCall<T>,
T: Detokenize,
{
match self.try_call_with_ret(state, f)? {
Ok(value) => Ok(value),
Err(CallError {
exit_code,
failure_info,
error,
}) => {
bail!(
"failed to execute contract call to {}:\ncode: {}\nerror: {:?}\ninfo: {}",
self.addr,
exit_code.value(),
error,
failure_info.map(|i| i.to_string()).unwrap_or_default(),
);
}
}
}
/// Call an EVM method implicitly to read its return value.
///
/// Returns either the result or the exit code if it's not successful;
/// intended to be used with methods that are expected to fail under certain conditions.
pub fn try_call<T, F>(
&self,
state: &mut FvmExecState<DB>,
f: F,
) -> anyhow::Result<ContractResult<T, E>>
where
F: FnOnce(&C) -> MockContractCall<T>,
T: Detokenize,
{
Ok(match self.try_call_with_ret(state, f)? {
Ok(r) => Ok(r.into_decoded()?),
Err(e) => Err(e),
})
}
/// Call an EVM method implicitly to read its return value and its original apply return.
///
/// Returns either the result or the exit code if it's not successful;
/// intended to be used with methods that are expected to fail under certain conditions.
pub fn try_call_with_ret<T, F>(
&self,
state: &mut FvmExecState<DB>,
f: F,
) -> anyhow::Result<ContractResult<ContractCallerReturn<T>, E>>
where
F: FnOnce(&C) -> MockContractCall<T>,
T: Detokenize,
{
let call = f(&self.contract);
let calldata = call.calldata().ok_or_else(|| anyhow!("missing calldata"))?;
let calldata = RawBytes::serialize(BytesSer(&calldata))?;
let from = call
.tx
.from()
.map(|addr| Address::from(EthAddress::from(*addr)))
.unwrap_or(system::SYSTEM_ACTOR_ADDR);
let value = call
.tx
.value()
.map(from_eth::to_fvm_tokens)
.unwrap_or_else(|| TokenAmount::from_atto(0));
// We send off a read-only query to an EVM actor at the given address.
let msg = Message {
version: Default::default(),
from,
to: self.addr,
sequence: 0,
value,
method_num: evm::Method::InvokeContract as u64,
params: calldata,
gas_limit: fvm_shared::BLOCK_GAS_LIMIT,
gas_fee_cap: TokenAmount::from_atto(0),
gas_premium: TokenAmount::from_atto(0),
};
//eprintln!("\nCALLING FVM: {msg:?}");
let (ret, emitters) = state.execute_implicit(msg).context("failed to call FEVM")?;
//eprintln!("\nRESULT FROM FVM: {ret:?}");
if !ret.msg_receipt.exit_code.is_success() {
let output = ret.msg_receipt.return_data;
let output = if output.is_empty() {
Vec::new()
} else {
// The EVM actor might return some revert in the output.
output
.deserialize::<BytesDe>()
.map(|bz| bz.0)
.context("failed to deserialize error data")?
};
let error = match decode_revert::<E>(&output) {
Some(e) => ContractError::Revert(e),
None => ContractError::Raw(output),
};
Ok(Err(CallError {
exit_code: ret.msg_receipt.exit_code,
failure_info: ret.failure_info,
error,
}))
} else {
let ret = FvmApplyRet {
apply_ret: ret,
from,
to: self.addr,
method_num: evm::Method::InvokeContract as u64,
gas_limit: fvm_shared::BLOCK_GAS_LIMIT,
emitters,
};
Ok(Ok(ContractCallerReturn { call, ret }))
}
}
}
/// Fixed decoding until https://github.com/gakonst/ethers-rs/pull/2637 is released.
fn decode_revert<E: ContractRevert>(data: &[u8]) -> Option<E> {
E::decode_with_selector(data).or_else(|| {
if data.len() < 4 {
return None;
}
// There is a bug fixed by the above PR that chops the selector off.
// By doubling it up, after chopping off it should still be present.
let double_prefix = [&data[..4], data].concat();
E::decode_with_selector(&double_prefix)
})
}
#[cfg(test)]
mod tests {
use ethers::{contract::ContractRevert, types::Bytes};
use ipc_actors_abis::gateway_manager_facet::{GatewayManagerFacetErrors, InsufficientFunds};
use crate::fvm::state::fevm::decode_revert;
#[test]
fn decode_custom_error() {
// An example of binary data corresponding to `InsufficientFunds`
let bz: Bytes = "0x356680b7".parse().unwrap();
let selector = bz[..4].try_into().expect("it's 4 bytes");
assert!(
GatewayManagerFacetErrors::valid_selector(selector),
"it should be a valid selector"
);
let err =
decode_revert::<GatewayManagerFacetErrors>(&bz).expect("could not decode as revert");
assert_eq!(
err,
GatewayManagerFacetErrors::InsufficientFunds(InsufficientFunds)
)
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/interpreter/src/fvm/state/exec.rs | fendermint/vm/interpreter/src/fvm/state/exec.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::collections::{HashMap, HashSet};
use anyhow::Ok;
use cid::Cid;
use fendermint_vm_genesis::PowerScale;
use fvm::{
call_manager::DefaultCallManager,
engine::MultiEngine,
executor::{ApplyFailure, ApplyKind, ApplyRet, DefaultExecutor, Executor},
machine::{DefaultMachine, Machine, Manifest, NetworkConfig},
state_tree::StateTree,
DefaultKernel,
};
use fvm_ipld_blockstore::Blockstore;
use fvm_ipld_encoding::RawBytes;
use fvm_shared::{
address::Address, chainid::ChainID, clock::ChainEpoch, econ::TokenAmount, error::ExitCode,
message::Message, receipt::Receipt, version::NetworkVersion, ActorID,
};
use serde::{Deserialize, Serialize};
use serde_with::serde_as;
use crate::fvm::{custom_kernel::LoggingKernel, externs::FendermintExterns};
use fendermint_vm_core::{chainid::HasChainID, Timestamp};
use fendermint_vm_encoding::IsHumanReadable;
pub type BlockHash = [u8; 32];
/// First 20 bytes of SHA256(PublicKey)
pub type ValidatorId = tendermint::account::Id;
pub type ActorAddressMap = HashMap<ActorID, Address>;
/// The result of the message application bundled with any delegated addresses of event emitters.
pub type ExecResult = anyhow::Result<(ApplyRet, ActorAddressMap)>;
/// Parts of the state which evolve during the lifetime of the chain.
#[serde_as]
#[derive(Serialize, Deserialize, Debug, Clone, Eq, PartialEq)]
pub struct FvmStateParams {
/// Root CID of the actor state map.
#[serde_as(as = "IsHumanReadable")]
pub state_root: Cid,
/// Last applied block time stamp.
pub timestamp: Timestamp,
/// FVM network version.
pub network_version: NetworkVersion,
/// Base fee for contract execution.
#[serde_as(as = "IsHumanReadable")]
pub base_fee: TokenAmount,
/// Current circulating supply; changes in the context of IPC.
#[serde_as(as = "IsHumanReadable")]
pub circ_supply: TokenAmount,
/// The [`ChainID`] is stored here to hint at the possibility that
/// a chain ID might change during the lifetime of a chain, in case
/// there is a fork, or perhaps a subnet migration in IPC.
///
/// How exactly that would be communicated is uknown at this point.
pub chain_id: u64,
/// Conversion from collateral to voting power.
pub power_scale: PowerScale,
/// The application protocol version.
#[serde(default)]
pub app_version: u64,
}
/// Parts of the state which can be updated by message execution, apart from the actor state.
///
/// This is just a technical thing to help us not forget about saving something.
///
/// TODO: `base_fee` should surely be here.
#[derive(Debug)]
pub struct FvmUpdatableParams {
/// The application protocol version, which changes during upgrades.
pub app_version: u64,
/// The base fee has currently no automatic rules of being updated,
/// but it's exposed to upgrades.
pub base_fee: TokenAmount,
/// The circulating supply changes if IPC is enabled and
/// funds/releases are carried out with the parent.
pub circ_supply: TokenAmount,
/// Conversion between collateral and voting power.
/// Doesn't change at the moment but in theory it could,
/// and it doesn't have a place within the FVM.
pub power_scale: PowerScale,
}
pub type MachineBlockstore<DB> = <DefaultMachine<DB, FendermintExterns<DB>> as Machine>::Blockstore;
/// A state we create for the execution of all the messages in a block.
pub struct FvmExecState<DB>
where
DB: Blockstore + Clone + 'static,
{
#[allow(clippy::type_complexity)]
executor: DefaultExecutor<
LoggingKernel<DefaultCallManager<DefaultMachine<DB, FendermintExterns<DB>>>>,
>,
/// Hash of the block currently being executed. For queries and checks this is empty.
///
/// The main motivation to add it here was to make it easier to pass in data to the
/// execution interpreter without having to add yet another piece to track at the app level.
block_hash: Option<BlockHash>,
/// ID of the validator who created this block. For queries and checks this is empty.
validator_id: Option<ValidatorId>,
/// State of parameters that are outside the control of the FVM but can change and need to be persisted.
params: FvmUpdatableParams,
/// Indicate whether the parameters have been updated.
params_dirty: bool,
}
impl<DB> FvmExecState<DB>
where
DB: Blockstore + Clone + 'static,
{
/// Create a new FVM execution environment.
///
/// Calling this can be very slow unless we run in `--release` mode, because the [DefaultExecutor]
/// pre-loads builtin-actor CIDs and wasm in debug mode is slow to instrument.
pub fn new(
blockstore: DB,
multi_engine: &MultiEngine,
block_height: ChainEpoch,
params: FvmStateParams,
) -> anyhow::Result<Self> {
let mut nc = NetworkConfig::new(params.network_version);
nc.chain_id = ChainID::from(params.chain_id);
// TODO: Configure:
// * circ_supply; by default it's for Filecoin
// * base_fee; by default it's zero
let mut mc = nc.for_epoch(block_height, params.timestamp.0, params.state_root);
mc.set_base_fee(params.base_fee.clone());
mc.set_circulating_supply(params.circ_supply.clone());
mc.enable_actor_debugging();
// Creating a new machine every time is prohibitively slow.
// let ec = EngineConfig::from(&nc);
// let engine = EnginePool::new_default(ec)?;
let engine = multi_engine.get(&nc)?;
let externs = FendermintExterns::new(blockstore.clone(), params.state_root);
let machine = DefaultMachine::new(&mc, blockstore, externs)?;
let executor = DefaultExecutor::new(engine, machine)?;
Ok(Self {
executor,
block_hash: None,
validator_id: None,
params: FvmUpdatableParams {
app_version: params.app_version,
base_fee: params.base_fee,
circ_supply: params.circ_supply,
power_scale: params.power_scale,
},
params_dirty: false,
})
}
/// Set the block hash during execution.
pub fn with_block_hash(mut self, block_hash: BlockHash) -> Self {
self.block_hash = Some(block_hash);
self
}
/// Set the validator during execution.
pub fn with_validator_id(mut self, validator_id: ValidatorId) -> Self {
self.validator_id = Some(validator_id);
self
}
/// Execute message implicitly.
pub fn execute_implicit(&mut self, msg: Message) -> ExecResult {
self.execute_message(msg, ApplyKind::Implicit)
}
/// Execute message explicitly.
pub fn execute_explicit(&mut self, msg: Message) -> ExecResult {
self.execute_message(msg, ApplyKind::Explicit)
}
pub fn execute_message(&mut self, msg: Message, kind: ApplyKind) -> ExecResult {
if let Err(e) = msg.check() {
return Ok(check_error(e));
}
// TODO: We could preserve the message length by changing the input type.
let raw_length = fvm_ipld_encoding::to_vec(&msg).map(|bz| bz.len())?;
let ret = self.executor.execute_message(msg, kind, raw_length)?;
let addrs = self.emitter_delegated_addresses(&ret)?;
Ok((ret, addrs))
}
/// Commit the state. It must not fail, but we're returning a result so that error
/// handling can be done in the application root.
///
/// For now this is not part of the `Interpreter` because it's not clear what atomic
/// semantics we can hope to provide if the middlewares call each other: did it go
/// all the way down, or did it stop somewhere? Easier to have one commit of the state
/// as a whole.
pub fn commit(mut self) -> anyhow::Result<(Cid, FvmUpdatableParams, bool)> {
let cid = self.executor.flush()?;
Ok((cid, self.params, self.params_dirty))
}
/// The height of the currently executing block.
pub fn block_height(&self) -> ChainEpoch {
self.executor.context().epoch
}
/// Identity of the block being executed, if we are indeed executing any blocks.
pub fn block_hash(&self) -> Option<BlockHash> {
self.block_hash
}
/// Identity of the block creator, if we are indeed executing any blocks.
pub fn validator_id(&self) -> Option<ValidatorId> {
self.validator_id
}
/// The timestamp of the currently executing block.
pub fn timestamp(&self) -> Timestamp {
Timestamp(self.executor.context().timestamp)
}
/// Conversion between collateral and voting power.
pub fn power_scale(&self) -> PowerScale {
self.params.power_scale
}
pub fn app_version(&self) -> u64 {
self.params.app_version
}
pub fn params(&self) -> &FvmUpdatableParams {
&self.params
}
/// Get a mutable reference to the underlying [StateTree].
pub fn state_tree_mut(&mut self) -> &mut StateTree<MachineBlockstore<DB>> {
self.executor.state_tree_mut()
}
/// Get a reference to the underlying [StateTree].
pub fn state_tree(&self) -> &StateTree<MachineBlockstore<DB>> {
self.executor.state_tree()
}
/// Built-in actor manifest to inspect code CIDs.
pub fn builtin_actors(&self) -> &Manifest {
self.executor.builtin_actors()
}
/// The [ChainID] from the network configuration.
pub fn chain_id(&self) -> ChainID {
self.executor.context().network.chain_id
}
/// Collect all the event emitters' delegated addresses, for those who have any.
fn emitter_delegated_addresses(&self, apply_ret: &ApplyRet) -> anyhow::Result<ActorAddressMap> {
let emitter_ids = apply_ret
.events
.iter()
.map(|e| e.emitter)
.collect::<HashSet<_>>();
let mut emitters = HashMap::default();
for id in emitter_ids {
if let Some(actor) = self.executor.state_tree().get_actor(id)? {
if let Some(addr) = actor.delegated_address {
emitters.insert(id, addr);
}
}
}
Ok(emitters)
}
/// Update the application version.
pub fn update_app_version<F>(&mut self, f: F)
where
F: FnOnce(&mut u64),
{
self.update_params(|p| f(&mut p.app_version))
}
/// Update the application version.
pub fn update_base_fee<F>(&mut self, f: F)
where
F: FnOnce(&mut TokenAmount),
{
self.update_params(|p| f(&mut p.base_fee))
}
/// Update the circulating supply, effective from the next block.
pub fn update_circ_supply<F>(&mut self, f: F)
where
F: FnOnce(&mut TokenAmount),
{
self.update_params(|p| f(&mut p.circ_supply))
}
/// Update the parameters and mark them as dirty.
fn update_params<F>(&mut self, f: F)
where
F: FnOnce(&mut FvmUpdatableParams),
{
f(&mut self.params);
self.params_dirty = true;
}
}
impl<DB> HasChainID for FvmExecState<DB>
where
DB: Blockstore + Clone,
{
fn chain_id(&self) -> ChainID {
self.executor.context().network.chain_id
}
}
/// The FVM would return an error from `DefaultExecutor::preflight_message` if it was called
/// with a message that doesn't pass basic checks, for example it has no gas limit, as opposed
/// to returning an `ApplyRet`. This would cause our application to fail.
/// I'm not sure if it's intentional, or how Lotus handles it, it's not desireable to crash
/// because such messages can be included by malicious validators or user queries. We could
/// use ABCI++ to filter out messages from blocks, but that doesn't affect queries, so we
/// might as well encode it as an error. To keep the types simpler, let's fabricate an `ApplyRet`.
fn check_error(e: anyhow::Error) -> (ApplyRet, ActorAddressMap) {
let zero = TokenAmount::from_atto(0);
let ret = ApplyRet {
msg_receipt: Receipt {
exit_code: ExitCode::SYS_ASSERTION_FAILED,
return_data: RawBytes::default(),
gas_used: 0,
events_root: None,
},
penalty: zero.clone(),
miner_tip: zero.clone(),
base_fee_burn: zero.clone(),
over_estimation_burn: zero.clone(),
refund: zero,
gas_refund: 0,
gas_burned: 0,
failure_info: Some(ApplyFailure::PreValidation(format!("{:#}", e))),
exec_trace: Vec::new(),
events: Vec::new(),
};
(ret, Default::default())
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/interpreter/src/fvm/state/ipc.rs | fendermint/vm/interpreter/src/fvm/state/ipc.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use anyhow::{anyhow, Context};
use ethers::types as et;
use fvm_ipld_blockstore::Blockstore;
use fvm_shared::econ::TokenAmount;
use fvm_shared::ActorID;
use fendermint_crypto::{PublicKey, SecretKey};
use fendermint_vm_actor_interface::ipc;
use fendermint_vm_actor_interface::{
eam::EthAddress,
init::builtin_actor_eth_addr,
ipc::{AbiHash, ValidatorMerkleTree, GATEWAY_ACTOR_ID},
};
use fendermint_vm_genesis::{Collateral, Power, PowerScale, Validator, ValidatorKey};
use fendermint_vm_message::conv::{from_eth, from_fvm};
use fendermint_vm_message::signed::sign_secp256k1;
use fendermint_vm_topdown::IPCParentFinality;
use ipc_actors_abis::checkpointing_facet::CheckpointingFacet;
use ipc_actors_abis::gateway_getter_facet::GatewayGetterFacet;
use ipc_actors_abis::gateway_getter_facet::{self as getter, gateway_getter_facet};
use ipc_actors_abis::top_down_finality_facet::TopDownFinalityFacet;
use ipc_actors_abis::xnet_messaging_facet::XnetMessagingFacet;
use ipc_actors_abis::{checkpointing_facet, top_down_finality_facet, xnet_messaging_facet};
use ipc_api::cross::IpcEnvelope;
use ipc_api::staking::{ConfigurationNumber, StakingChangeRequest};
use super::{
fevm::{ContractCaller, MockProvider, NoRevert},
FvmExecState,
};
use crate::fvm::FvmApplyRet;
#[derive(Clone)]
pub struct GatewayCaller<DB> {
addr: EthAddress,
getter: ContractCaller<DB, GatewayGetterFacet<MockProvider>, NoRevert>,
checkpointing: ContractCaller<
DB,
CheckpointingFacet<MockProvider>,
checkpointing_facet::CheckpointingFacetErrors,
>,
topdown: ContractCaller<
DB,
TopDownFinalityFacet<MockProvider>,
top_down_finality_facet::TopDownFinalityFacetErrors,
>,
xnet: ContractCaller<DB, XnetMessagingFacet<MockProvider>, NoRevert>,
}
impl<DB> Default for GatewayCaller<DB> {
fn default() -> Self {
Self::new(GATEWAY_ACTOR_ID)
}
}
impl<DB> GatewayCaller<DB> {
pub fn new(actor_id: ActorID) -> Self {
// A masked ID works for invoking the contract, but internally the EVM uses a different
// ID and if we used this address for anything like validating that the sender is the gateway,
// we'll face bitter disappointment. For that we have to use the delegated address we have in genesis.
let addr = builtin_actor_eth_addr(actor_id);
Self {
addr,
getter: ContractCaller::new(addr, GatewayGetterFacet::new),
checkpointing: ContractCaller::new(addr, CheckpointingFacet::new),
topdown: ContractCaller::new(addr, TopDownFinalityFacet::new),
xnet: ContractCaller::new(addr, XnetMessagingFacet::new),
}
}
pub fn addr(&self) -> EthAddress {
self.addr
}
}
impl<DB: Blockstore + Clone> GatewayCaller<DB> {
/// Check that IPC is configured in this deployment.
pub fn enabled(&self, state: &mut FvmExecState<DB>) -> anyhow::Result<bool> {
match state.state_tree_mut().get_actor(GATEWAY_ACTOR_ID)? {
None => Ok(false),
Some(a) => Ok(!state.builtin_actors().is_placeholder_actor(&a.code)),
}
}
/// Return true if the current subnet is the root subnet.
pub fn is_root(&self, state: &mut FvmExecState<DB>) -> anyhow::Result<bool> {
self.subnet_id(state).map(|id| id.route.is_empty())
}
/// Return the current subnet ID.
pub fn subnet_id(&self, state: &mut FvmExecState<DB>) -> anyhow::Result<getter::SubnetID> {
self.getter.call(state, |c| c.get_network_name())
}
/// Fetch the period with which the current subnet has to submit checkpoints to its parent.
pub fn bottom_up_check_period(&self, state: &mut FvmExecState<DB>) -> anyhow::Result<u64> {
Ok(self
.getter
.call(state, |c| c.bottom_up_check_period())?
.as_u64())
}
/// Fetch the bottom-up message batch enqueued for a given checkpoint height.
pub fn bottom_up_msg_batch(
&self,
state: &mut FvmExecState<DB>,
height: u64,
) -> anyhow::Result<getter::BottomUpMsgBatch> {
let batch = self.getter.call(state, |c| {
c.bottom_up_msg_batch(ethers::types::U256::from(height))
})?;
Ok(batch)
}
/// Insert a new checkpoint at the period boundary.
pub fn create_bottom_up_checkpoint(
&self,
state: &mut FvmExecState<DB>,
checkpoint: checkpointing_facet::BottomUpCheckpoint,
power_table: &[Validator<Power>],
) -> anyhow::Result<()> {
// Construct a Merkle tree from the power table, which we can use to validate validator set membership
// when the signatures are submitted in transactions for accumulation.
let tree =
ValidatorMerkleTree::new(power_table).context("failed to create validator tree")?;
let total_power = power_table.iter().fold(et::U256::zero(), |p, v| {
p.saturating_add(et::U256::from(v.power.0))
});
self.checkpointing.call(state, |c| {
c.create_bottom_up_checkpoint(checkpoint, tree.root_hash().0, total_power)
})
}
/// Retrieve checkpoints which have not reached a quorum.
pub fn incomplete_checkpoints(
&self,
state: &mut FvmExecState<DB>,
) -> anyhow::Result<Vec<getter::BottomUpCheckpoint>> {
self.getter.call(state, |c| c.get_incomplete_checkpoints())
}
/// Apply all pending validator changes, returning the newly adopted configuration number, or 0 if there were no changes.
pub fn apply_validator_changes(&self, state: &mut FvmExecState<DB>) -> anyhow::Result<u64> {
self.topdown.call(state, |c| c.apply_finality_changes())
}
/// Get the currently active validator set.
pub fn current_membership(
&self,
state: &mut FvmExecState<DB>,
) -> anyhow::Result<getter::Membership> {
self.getter.call(state, |c| c.get_current_membership())
}
/// Get the current power table, which is the same as the membership but parsed into domain types.
pub fn current_power_table(
&self,
state: &mut FvmExecState<DB>,
) -> anyhow::Result<(ConfigurationNumber, Vec<Validator<Power>>)> {
let membership = self
.current_membership(state)
.context("failed to get current membership")?;
let power_table = membership_to_power_table(&membership, state.power_scale());
Ok((membership.configuration_number, power_table))
}
/// Construct the input parameters for adding a signature to the checkpoint.
///
/// This will need to be broadcasted as a transaction.
pub fn add_checkpoint_signature_calldata(
&self,
checkpoint: checkpointing_facet::BottomUpCheckpoint,
power_table: &[Validator<Power>],
validator: &Validator<Power>,
secret_key: &SecretKey,
) -> anyhow::Result<et::Bytes> {
debug_assert_eq!(validator.public_key.0, secret_key.public_key());
let height = checkpoint.block_height;
let weight = et::U256::from(validator.power.0);
let hash = checkpoint.abi_hash();
let signature = sign_secp256k1(secret_key, &hash);
let signature =
from_fvm::to_eth_signature(&signature, false).context("invalid signature")?;
let signature = et::Bytes::from(signature.to_vec());
let tree =
ValidatorMerkleTree::new(power_table).context("failed to construct Merkle tree")?;
let membership_proof = tree
.prove(validator)
.context("failed to construct Merkle proof")?
.into_iter()
.map(|p| p.into())
.collect();
let call = self.checkpointing.contract().add_checkpoint_signature(
height,
membership_proof,
weight,
signature,
);
let calldata = call
.calldata()
.ok_or_else(|| anyhow!("no calldata for adding signature"))?;
Ok(calldata)
}
/// Commit the parent finality to the gateway and returns the previously committed finality.
/// None implies there is no previously committed finality.
pub fn commit_parent_finality(
&self,
state: &mut FvmExecState<DB>,
finality: IPCParentFinality,
) -> anyhow::Result<Option<IPCParentFinality>> {
let evm_finality = top_down_finality_facet::ParentFinality::try_from(finality)?;
let (has_committed, prev_finality) = self
.topdown
.call(state, |c| c.commit_parent_finality(evm_finality))?;
Ok(if !has_committed {
None
} else {
Some(IPCParentFinality::from(prev_finality))
})
}
pub fn store_validator_changes(
&self,
state: &mut FvmExecState<DB>,
changes: Vec<StakingChangeRequest>,
) -> anyhow::Result<()> {
if changes.is_empty() {
return Ok(());
}
let mut change_requests = vec![];
for c in changes {
change_requests.push(top_down_finality_facet::StakingChangeRequest::try_from(c)?);
}
self.topdown
.call(state, |c| c.store_validator_changes(change_requests))
}
/// Call this function to mint some FIL to the gateway contract
pub fn mint_to_gateway(
&self,
state: &mut FvmExecState<DB>,
value: TokenAmount,
) -> anyhow::Result<()> {
let state_tree = state.state_tree_mut();
state_tree.mutate_actor(ipc::GATEWAY_ACTOR_ID, |actor_state| {
actor_state.balance += value;
Ok(())
})?;
Ok(())
}
pub fn apply_cross_messages(
&self,
state: &mut FvmExecState<DB>,
cross_messages: Vec<IpcEnvelope>,
) -> anyhow::Result<FvmApplyRet> {
let messages = cross_messages
.into_iter()
.map(xnet_messaging_facet::IpcEnvelope::try_from)
.collect::<Result<Vec<_>, _>>()
.context("failed to convert cross messages")?;
let r = self
.xnet
.call_with_return(state, |c| c.apply_cross_messages(messages))?;
Ok(r.into_return())
}
pub fn get_latest_parent_finality(
&self,
state: &mut FvmExecState<DB>,
) -> anyhow::Result<IPCParentFinality> {
let r = self
.getter
.call(state, |c| c.get_latest_parent_finality())?;
Ok(IPCParentFinality::from(r))
}
/// Get the Ethereum adresses of validators who signed a checkpoint.
pub fn checkpoint_signatories(
&self,
state: &mut FvmExecState<DB>,
height: u64,
) -> anyhow::Result<Vec<EthAddress>> {
let (_, _, addrs, _) = self.getter.call(state, |c| {
c.get_checkpoint_signature_bundle(ethers::types::U256::from(height))
})?;
let addrs = addrs.into_iter().map(|a| a.into()).collect();
Ok(addrs)
}
}
/// Total amount of tokens to mint as a result of top-down messages arriving at the subnet.
pub fn tokens_to_mint(msgs: &[ipc_api::cross::IpcEnvelope]) -> TokenAmount {
msgs.iter()
.fold(TokenAmount::from_atto(0), |mut total, msg| {
// Both fees and value are considered to enter the ciruculating supply of the subnet.
// Fees might be distributed among subnet validators.
total += &msg.value;
total
})
}
/// Total amount of tokens to burn as a result of bottom-up messages leaving the subnet.
pub fn tokens_to_burn(msgs: &[checkpointing_facet::IpcEnvelope]) -> TokenAmount {
msgs.iter()
.fold(TokenAmount::from_atto(0), |mut total, msg| {
// Both fees and value were taken from the sender, and both are going up to the parent subnet:
// https://github.com/consensus-shipyard/ipc-solidity-actors/blob/e4ec0046e2e73e2f91d7ab8ae370af2c487ce526/src/gateway/GatewayManagerFacet.sol#L143-L150
// Fees might be distirbuted among relayers.
total += from_eth::to_fvm_tokens(&msg.value);
total
})
}
/// Convert the collaterals and metadata in the membership to the public key and power expected by the system.
fn membership_to_power_table(
m: &gateway_getter_facet::Membership,
power_scale: PowerScale,
) -> Vec<Validator<Power>> {
let mut pt = Vec::new();
for v in m.validators.iter() {
// Ignoring any metadata that isn't a public key.
if let Ok(pk) = PublicKey::parse_slice(&v.metadata, None) {
let c = from_eth::to_fvm_tokens(&v.weight);
pt.push(Validator {
public_key: ValidatorKey(pk),
power: Collateral(c).into_power(power_scale),
})
}
}
pt
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/interpreter/src/fvm/store/memory.rs | fendermint/vm/interpreter/src/fvm/store/memory.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use std::{
collections::HashMap,
sync::{Arc, RwLock},
};
use anyhow::Result;
use cid::Cid;
use fvm_ipld_blockstore::Blockstore;
/// An in-memory blockstore that can be shared between threads,
/// unlike [fvm_ipld_blockstore::memory::MemoryBlockstore].
#[derive(Debug, Default, Clone)]
pub struct MemoryBlockstore {
blocks: Arc<RwLock<HashMap<Cid, Vec<u8>>>>,
}
impl MemoryBlockstore {
pub fn new() -> Self {
Self::default()
}
}
impl Blockstore for MemoryBlockstore {
fn has(&self, k: &Cid) -> Result<bool> {
let guard = self.blocks.read().unwrap();
Ok(guard.contains_key(k))
}
fn get(&self, k: &Cid) -> Result<Option<Vec<u8>>> {
let guard = self.blocks.read().unwrap();
Ok(guard.get(k).cloned())
}
fn put_keyed(&self, k: &Cid, block: &[u8]) -> Result<()> {
let mut guard = self.blocks.write().unwrap();
guard.insert(*k, block.into());
Ok(())
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/interpreter/src/fvm/store/mod.rs | fendermint/vm/interpreter/src/fvm/store/mod.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use cid::Cid;
use fvm_ipld_blockstore::Blockstore;
use fvm_shared::EMPTY_ARR_CID;
pub mod memory;
#[derive(Clone)]
pub struct ReadOnlyBlockstore<DB>(DB);
impl<DB> ReadOnlyBlockstore<DB> {
pub fn new(store: DB) -> Self {
Self(store)
}
}
impl<DB> Blockstore for ReadOnlyBlockstore<DB>
where
DB: Blockstore + Clone,
{
fn get(&self, k: &Cid) -> anyhow::Result<Option<Vec<u8>>> {
self.0.get(k)
}
fn put_keyed(&self, k: &Cid, block: &[u8]) -> anyhow::Result<()> {
// The FVM inserts this each time to make sure it exists.
if *k == EMPTY_ARR_CID {
return self.0.put_keyed(k, block);
}
panic!("never intended to use put on the read-only blockstore")
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/vm/interpreter/tests/golden.rs | fendermint/vm/interpreter/tests/golden.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
/// JSON based test in case we want to configure the FvmStateParams by hand.
mod json {
use fendermint_testing::golden_json;
use fendermint_vm_interpreter::fvm::state::FvmStateParams;
use quickcheck::Arbitrary;
golden_json! { "fvmstateparams/json", fvmstateparams, FvmStateParams::arbitrary }
}
/// CBOR based tests in case we have to grab FvmStateParams from on-chain storage.
mod cbor {
use fendermint_testing::golden_cbor;
use fendermint_vm_interpreter::fvm::state::FvmStateParams;
use quickcheck::Arbitrary;
golden_cbor! { "fvmstateparams/cbor", fvmstateparams, FvmStateParams::arbitrary }
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/rocksdb/src/namespaces.rs | fendermint/rocksdb/src/namespaces.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
/// List all column families to help keep them unique.
///
/// # Example
///
/// ```
/// use fendermint_rocksdb::namespaces;
///
/// namespaces!(MySpace { foo, bar });
///
/// let ms = MySpace::default();
/// let nss = ms.values();
/// let ns_foo = &ms.foo;
/// ```
#[macro_export]
macro_rules! namespaces {
($name:ident { $($col:ident),* }) => {
struct $name {
pub $($col: String),+
}
impl Default for $name {
fn default() -> Self {
Self {
$($col: stringify!($col).to_owned()),+
}
}
}
impl $name {
/// List column family names, all of which are required for re-opening the databasae.
pub fn values(&self) -> Vec<&str> {
vec![$(self.$col.as_ref()),+]
}
}
};
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/rocksdb/src/lib.rs | fendermint/rocksdb/src/lib.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
mod rocks;
#[cfg(feature = "blockstore")]
pub mod blockstore;
#[cfg(feature = "kvstore")]
mod kvstore;
pub mod namespaces;
pub use rocks::{Error as RocksDbError, RocksDb, RocksDbConfig};
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/rocksdb/src/kvstore.rs | fendermint/rocksdb/src/kvstore.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use anyhow::anyhow;
use fendermint_storage::Decode;
use fendermint_storage::Encode;
use fendermint_storage::KVResult;
use fendermint_storage::KVTransaction;
use fendermint_storage::KVWritable;
use fendermint_storage::KVWrite;
use fendermint_storage::{KVError, KVRead, KVReadable, KVStore};
use rocksdb::BoundColumnFamily;
use rocksdb::ErrorKind;
use rocksdb::OptimisticTransactionDB;
use rocksdb::SnapshotWithThreadMode;
use rocksdb::Transaction;
use std::cell::RefCell;
use std::collections::BTreeMap;
use std::mem::ManuallyDrop;
use std::sync::Arc;
use std::thread;
use crate::RocksDb;
/// Cache column families to avoid further cloning on each access.
struct ColumnFamilyCache<'a> {
db: &'a OptimisticTransactionDB,
cfs: RefCell<BTreeMap<String, Arc<BoundColumnFamily<'a>>>>,
}
impl<'a> ColumnFamilyCache<'a> {
fn new(db: &'a OptimisticTransactionDB) -> Self {
Self {
db,
cfs: Default::default(),
}
}
/// Look up a column family and pass it to a closure.
/// Return an error if it doesn't exist.
fn with_cf_handle<F, T>(&self, name: &str, f: F) -> KVResult<T>
where
F: FnOnce(&Arc<BoundColumnFamily<'a>>) -> KVResult<T>,
{
let mut cfs = self.cfs.borrow_mut();
let cf = match cfs.get(name) {
Some(cf) => cf,
None => match self.db.cf_handle(name) {
None => {
return Err(KVError::Unexpected(
anyhow!("column family {name} doesn't exist").into(),
))
}
Some(cf) => {
cfs.insert(name.to_owned(), cf);
cfs.get(name).unwrap()
}
},
};
f(cf)
}
}
/// For reads, we can just take a snapshot of the DB.
pub struct RocksDbReadTx<'a> {
cache: ColumnFamilyCache<'a>,
snapshot: SnapshotWithThreadMode<'a, OptimisticTransactionDB>,
}
/// For writes, we use a transaction which we'll either commit or roll back at the end.
pub struct RocksDbWriteTx<'a> {
cache: ColumnFamilyCache<'a>,
tx: ManuallyDrop<Transaction<'a, OptimisticTransactionDB>>,
}
impl<'a> RocksDbWriteTx<'a> {
// This method takes the transaction without running the panicky destructor.
fn take_tx(self) -> Transaction<'a, OptimisticTransactionDB> {
let mut this = ManuallyDrop::new(self);
unsafe { ManuallyDrop::take(&mut this.tx) }
}
}
impl<S> KVReadable<S> for RocksDb
where
S: KVStore<Repr = Vec<u8>>,
S::Namespace: AsRef<str>,
{
type Tx<'a> = RocksDbReadTx<'a>
where
Self: 'a;
fn read(&self) -> Self::Tx<'_> {
let snapshot = self.db.snapshot();
RocksDbReadTx {
cache: ColumnFamilyCache::new(&self.db),
snapshot,
}
}
}
impl<S> KVWritable<S> for RocksDb
where
S: KVStore<Repr = Vec<u8>>,
S::Namespace: AsRef<str>,
{
type Tx<'a> = RocksDbWriteTx<'a>
where
Self: 'a;
fn write(&self) -> Self::Tx<'_> {
RocksDbWriteTx {
cache: ColumnFamilyCache::new(&self.db),
tx: ManuallyDrop::new(self.db.transaction()),
}
}
}
impl<'a, S> KVRead<S> for RocksDbReadTx<'a>
where
S: KVStore<Repr = Vec<u8>>,
S::Namespace: AsRef<str>,
{
fn get<K, V>(&self, ns: &S::Namespace, k: &K) -> KVResult<Option<V>>
where
S: Encode<K> + Decode<V>,
{
self.cache.with_cf_handle(ns.as_ref(), |cf| {
let key = S::to_repr(k)?;
let res = self
.snapshot
.get_cf(cf, key.as_ref())
.map_err(to_kv_error)?;
match res {
Some(bz) => Ok(Some(S::from_repr(&bz)?)),
None => Ok(None),
}
})
}
fn iterate<K, V>(&self, ns: &S::Namespace) -> impl Iterator<Item = KVResult<(K, V)>>
where
S: Decode<K> + Decode<V>,
<S as KVStore>::Repr: Ord + 'static,
{
self.cache
.with_cf_handle(ns.as_ref(), |cf| {
let it = self.snapshot.iterator_cf(cf, rocksdb::IteratorMode::Start);
let it = it.map(|res| res.map_err(to_kv_error)).map(|res| {
res.and_then(|(k, v)| {
let k: K = S::from_repr(&k.to_vec())?;
let v: V = S::from_repr(&v.to_vec())?;
Ok((k, v))
})
});
Ok(it)
})
.expect("just wrapped into ok")
}
}
impl<'a, S> KVRead<S> for RocksDbWriteTx<'a>
where
S: KVStore<Repr = Vec<u8>>,
S::Namespace: AsRef<str>,
{
fn get<K, V>(&self, ns: &S::Namespace, k: &K) -> KVResult<Option<V>>
where
S: Encode<K> + Decode<V>,
{
self.cache.with_cf_handle(ns.as_ref(), |cf| {
let key = S::to_repr(k)?;
let res = self.tx.get_cf(cf, key.as_ref()).map_err(to_kv_error)?;
match res {
Some(bz) => Ok(Some(S::from_repr(&bz)?)),
None => Ok(None),
}
})
}
fn iterate<K, V>(&self, ns: &S::Namespace) -> impl Iterator<Item = KVResult<(K, V)>>
where
S: Decode<K> + Decode<V>,
<S as KVStore>::Repr: Ord + 'static,
{
self.cache
.with_cf_handle(ns.as_ref(), |cf| {
let it = self.tx.iterator_cf(cf, rocksdb::IteratorMode::Start);
let it = it.map(|res| res.map_err(to_kv_error)).map(|res| {
res.and_then(|(k, v)| {
let k: K = S::from_repr(&k.to_vec())?;
let v: V = S::from_repr(&v.to_vec())?;
Ok((k, v))
})
});
Ok(it)
})
.expect("just wrapped into ok")
}
}
impl<'a, S> KVWrite<S> for RocksDbWriteTx<'a>
where
S: KVStore<Repr = Vec<u8>>,
S::Namespace: AsRef<str>,
{
fn put<K, V>(&mut self, ns: &S::Namespace, k: &K, v: &V) -> KVResult<()>
where
S: Encode<K> + Encode<V>,
{
self.cache.with_cf_handle(ns.as_ref(), |cf| {
let k = S::to_repr(k)?;
let v = S::to_repr(v)?;
self.tx
.put_cf(cf, k.as_ref(), v.as_ref())
.map_err(to_kv_error)?;
Ok(())
})
}
fn delete<K>(&mut self, ns: &S::Namespace, k: &K) -> KVResult<()>
where
S: Encode<K>,
{
self.cache.with_cf_handle(ns.as_ref(), |cf| {
let k = S::to_repr(k)?;
self.tx.delete_cf(cf, k.as_ref()).map_err(to_kv_error)?;
Ok(())
})
}
}
impl<'a> KVTransaction for RocksDbWriteTx<'a> {
fn commit(self) -> KVResult<()> {
let tx = self.take_tx();
tx.commit().map_err(to_kv_error)
}
fn rollback(self) -> KVResult<()> {
let tx = self.take_tx();
tx.rollback().map_err(to_kv_error)
}
}
impl<'a> Drop for RocksDbWriteTx<'a> {
fn drop(&mut self) {
if !thread::panicking() {
panic!("Transaction prematurely dropped. Must call `.commit()` or `.rollback()`.");
}
}
}
fn to_kv_error(e: rocksdb::Error) -> KVError {
if e.kind() == ErrorKind::Busy {
KVError::Conflict
} else {
KVError::Unexpected(Box::new(e))
}
}
#[cfg(all(feature = "kvstore", test))]
mod tests {
use std::borrow::Cow;
use quickcheck::{QuickCheck, Testable};
use serde::{de::DeserializeOwned, Serialize};
use fendermint_storage::{testing::*, Codec, Decode, Encode, KVError, KVResult, KVStore};
use crate::{RocksDb, RocksDbConfig};
const TEST_COUNT: u64 = 20;
#[derive(Clone)]
struct TestKVStore;
impl KVStore for TestKVStore {
type Namespace = TestNamespace;
type Repr = Vec<u8>;
}
impl<T: Serialize> Encode<T> for TestKVStore {
fn to_repr(value: &T) -> KVResult<Cow<Self::Repr>> {
fvm_ipld_encoding::to_vec(value)
.map_err(|e| KVError::Codec(Box::new(e)))
.map(Cow::Owned)
}
}
impl<T: DeserializeOwned> Decode<T> for TestKVStore {
fn from_repr(repr: &Self::Repr) -> KVResult<T> {
fvm_ipld_encoding::from_slice(repr).map_err(|e| KVError::Codec(Box::new(e)))
}
}
impl<T> Codec<T> for TestKVStore where TestKVStore: Encode<T> + Decode<T> {}
fn new_backend() -> RocksDb {
let dir = tempfile::Builder::new()
.tempdir()
.expect("error creating temporary path for db");
let path = dir.path().join("rocksdb");
let db = RocksDb::open(path, &RocksDbConfig::default()).expect("error creating RocksDB");
// Create the column families the test will use.
for name in test_namespaces() {
let _ = db.new_cf_handle(name).unwrap();
}
db
}
// Not using the `#[quickcheck]` macro so I can run fewer tests becasue they are slow.
fn run_quickcheck<F: Testable>(f: F) {
QuickCheck::new().tests(TEST_COUNT).quickcheck(f)
}
#[test]
fn writable() {
run_quickcheck(
(|data| {
let backend = new_backend();
check_writable::<TestKVStore>(&backend, data)
}) as fn(TestData) -> bool,
)
}
#[test]
fn write_isolation() {
run_quickcheck(
(|data| {
let backend = new_backend();
check_write_isolation::<TestKVStore>(&backend, data)
}) as fn(TestDataMulti<2>) -> bool,
)
}
#[test]
fn write_isolation_concurrent() {
run_quickcheck(
(|data1, data2| {
let backend = new_backend();
check_write_isolation_concurrent::<TestKVStore, _>(&backend, data1, data2)
}) as fn(TestData, TestData) -> bool,
)
}
#[test]
fn write_serialization_concurrent() {
run_quickcheck(
(|data1, data2| {
let backend = new_backend();
check_write_serialization_concurrent::<TestKVStore, _>(&backend, data1, data2)
}) as fn(TestData, TestData) -> bool,
)
}
#[test]
fn read_isolation() {
run_quickcheck(
(|data| {
let backend = new_backend();
check_read_isolation::<TestKVStore, _>(&backend, data)
}) as fn(TestData) -> bool,
)
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/rocksdb/src/blockstore.rs | fendermint/rocksdb/src/blockstore.rs | use std::sync::Arc;
// Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use anyhow::anyhow;
use cid::Cid;
use fvm_ipld_blockstore::Blockstore;
use rocksdb::{BoundColumnFamily, OptimisticTransactionDB, WriteBatchWithTransaction};
use crate::RocksDb;
impl Blockstore for RocksDb {
fn get(&self, k: &Cid) -> anyhow::Result<Option<Vec<u8>>> {
Ok(self.read(k.to_bytes())?)
}
fn put_keyed(&self, k: &Cid, block: &[u8]) -> anyhow::Result<()> {
Ok(self.write(k.to_bytes(), block)?)
}
// Called by the BufferedBlockstore during flush.
fn put_many_keyed<D, I>(&self, blocks: I) -> anyhow::Result<()>
where
Self: Sized,
D: AsRef<[u8]>,
I: IntoIterator<Item = (Cid, D)>,
{
let mut batch = WriteBatchWithTransaction::<true>::default();
for (cid, v) in blocks.into_iter() {
let k = cid.to_bytes();
let v = v.as_ref();
batch.put(k, v);
}
// This function is used in `fvm_ipld_car::load_car`
// It reduces time cost of loading mainnet snapshot
// by ~10% by not writing to WAL(write ahead log).
// Ok(self.db.write_without_wal(batch)?)
// For some reason with the `write_without_wal` version if I restart the application
// it doesn't find the manifest root.
Ok(self.db.write(batch)?)
}
}
/// A [`Blockstore`] implementation that writes to a specific namespace, not the default like above.
#[derive(Clone)]
pub struct NamespaceBlockstore {
db: Arc<OptimisticTransactionDB>,
ns: String,
}
impl NamespaceBlockstore {
pub fn new(db: RocksDb, ns: String) -> anyhow::Result<Self> {
// All namespaces are pre-created during open.
if !db.has_cf_handle(&ns) {
Err(anyhow!("namespace {ns} does not exist!"))
} else {
Ok(Self { db: db.db, ns })
}
}
// Unfortunately there doesn't seem to be a way to avoid having to
// clone another instance for each operation :(
fn cf(&self) -> anyhow::Result<Arc<BoundColumnFamily>> {
self.db
.cf_handle(&self.ns)
.ok_or_else(|| anyhow!("namespace {} does not exist!", self.ns))
}
}
impl Blockstore for NamespaceBlockstore {
fn get(&self, k: &Cid) -> anyhow::Result<Option<Vec<u8>>> {
Ok(self.db.get_cf(&self.cf()?, k.to_bytes())?)
}
fn put_keyed(&self, k: &Cid, block: &[u8]) -> anyhow::Result<()> {
Ok(self.db.put_cf(&self.cf()?, k.to_bytes(), block)?)
}
// Called by the BufferedBlockstore during flush.
fn put_many_keyed<D, I>(&self, blocks: I) -> anyhow::Result<()>
where
Self: Sized,
D: AsRef<[u8]>,
I: IntoIterator<Item = (Cid, D)>,
{
let cf = self.cf()?;
let mut batch = WriteBatchWithTransaction::<true>::default();
for (cid, v) in blocks.into_iter() {
let k = cid.to_bytes();
let v = v.as_ref();
batch.put_cf(&cf, k, v);
}
Ok(self.db.write(batch)?)
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/rocksdb/src/rocks/config.rs | fendermint/rocksdb/src/rocks/config.rs | // Copyright 2022-2024 Protocol Labs
// Copyright 2019-2022 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
use anyhow::anyhow;
use rocksdb::{
BlockBasedOptions, Cache, DBCompactionStyle, DBCompressionType, DataBlockIndexType, LogLevel,
Options,
};
use serde::{Deserialize, Serialize};
/// Only subset of possible options is implemented, add missing ones when needed.
/// For description of different options please refer to the `rocksdb` crate documentation.
/// <https://docs.rs/rocksdb/latest/rocksdb/>
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct RocksDbConfig {
pub create_if_missing: bool,
pub parallelism: i32,
/// This is the `memtable` size in bytes.
pub write_buffer_size: usize,
pub max_open_files: i32,
pub max_background_jobs: Option<i32>,
pub compaction_style: String,
pub compression_type: String,
pub enable_statistics: bool,
pub stats_dump_period_sec: u32,
pub log_level: String,
pub optimize_filters_for_hits: bool,
pub optimize_for_point_lookup: i32,
}
impl Default for RocksDbConfig {
fn default() -> Self {
Self {
create_if_missing: true,
parallelism: num_cpus::get() as i32,
write_buffer_size: 2usize.pow(30), // 1 GiB
max_open_files: -1,
max_background_jobs: None,
compaction_style: "none".into(),
compression_type: "lz4".into(),
enable_statistics: false,
stats_dump_period_sec: 600,
log_level: "warn".into(),
optimize_filters_for_hits: true,
optimize_for_point_lookup: 8,
}
}
}
impl From<&RocksDbConfig> for Options {
fn from(config: &RocksDbConfig) -> Self {
let mut db_opts = Options::default();
db_opts.create_if_missing(config.create_if_missing);
db_opts.increase_parallelism(config.parallelism);
db_opts.set_write_buffer_size(config.write_buffer_size);
db_opts.set_max_open_files(config.max_open_files);
if let Some(max_background_jobs) = config.max_background_jobs {
db_opts.set_max_background_jobs(max_background_jobs);
}
if let Some(compaction_style) = compaction_style_from_str(&config.compaction_style).unwrap()
{
db_opts.set_compaction_style(compaction_style);
db_opts.set_disable_auto_compactions(false);
} else {
db_opts.set_disable_auto_compactions(true);
}
db_opts.set_compression_type(compression_type_from_str(&config.compression_type).unwrap());
if config.enable_statistics {
db_opts.set_stats_dump_period_sec(config.stats_dump_period_sec);
db_opts.enable_statistics();
};
db_opts.set_log_level(log_level_from_str(&config.log_level).unwrap());
db_opts.set_optimize_filters_for_hits(config.optimize_filters_for_hits);
// Comes from https://github.com/facebook/rocksdb/blob/main/options/options.cc#L606
// Only modified to upgrade format to v5
if !config.optimize_for_point_lookup.is_negative() {
let cache_size = config.optimize_for_point_lookup as usize;
let mut opts = BlockBasedOptions::default();
opts.set_format_version(5);
opts.set_data_block_index_type(DataBlockIndexType::BinaryAndHash);
opts.set_data_block_hash_ratio(0.75);
opts.set_bloom_filter(10.0, false);
let cache = Cache::new_lru_cache(cache_size * 1024 * 1024);
opts.set_block_cache(&cache);
db_opts.set_block_based_table_factory(&opts);
db_opts.set_memtable_prefix_bloom_ratio(0.02);
db_opts.set_memtable_whole_key_filtering(true);
}
db_opts
}
}
/// Converts string to a compaction style `RocksDB` variant.
fn compaction_style_from_str(s: &str) -> anyhow::Result<Option<DBCompactionStyle>> {
match s.to_lowercase().as_str() {
"level" => Ok(Some(DBCompactionStyle::Level)),
"universal" => Ok(Some(DBCompactionStyle::Universal)),
"fifo" => Ok(Some(DBCompactionStyle::Fifo)),
"none" => Ok(None),
_ => Err(anyhow!("invalid compaction option")),
}
}
/// Converts string to a compression type `RocksDB` variant.
fn compression_type_from_str(s: &str) -> anyhow::Result<DBCompressionType> {
let valid_options = [
#[cfg(feature = "bzip2")]
"bz2",
#[cfg(feature = "lz4")]
"lz4",
#[cfg(feature = "lz4")]
"lz4hc",
#[cfg(feature = "snappy")]
"snappy",
#[cfg(feature = "zlib")]
"zlib",
#[cfg(feature = "zstd")]
"zstd",
"none",
];
match s.to_lowercase().as_str() {
#[cfg(feature = "bzip2")]
"bz2" => Ok(DBCompressionType::Bz2),
#[cfg(feature = "lz4")]
"lz4" => Ok(DBCompressionType::Lz4),
#[cfg(feature = "lz4")]
"lz4hc" => Ok(DBCompressionType::Lz4hc),
#[cfg(feature = "snappy")]
"snappy" => Ok(DBCompressionType::Snappy),
#[cfg(feature = "zlib")]
"zlib" => Ok(DBCompressionType::Zlib),
#[cfg(feature = "zstd")]
"zstd" => Ok(DBCompressionType::Zstd),
"none" => Ok(DBCompressionType::None),
opt => Err(anyhow!(
"invalid compression option: {opt}, valid options: {}",
valid_options.join(",")
)),
}
}
/// Converts string to a log level `RocksDB` variant.
fn log_level_from_str(s: &str) -> anyhow::Result<LogLevel> {
match s.to_lowercase().as_str() {
"debug" => Ok(LogLevel::Debug),
"warn" => Ok(LogLevel::Warn),
"error" => Ok(LogLevel::Error),
"fatal" => Ok(LogLevel::Fatal),
"header" => Ok(LogLevel::Header),
_ => Err(anyhow!("invalid log level option")),
}
}
#[cfg(test)]
mod test {
use super::*;
use rocksdb::DBCompactionStyle;
#[test]
fn compaction_style_from_str_test() {
let test_cases = vec![
("Level", Ok(Some(DBCompactionStyle::Level))),
("UNIVERSAL", Ok(Some(DBCompactionStyle::Universal))),
("fifo", Ok(Some(DBCompactionStyle::Fifo))),
("none", Ok(None)),
("cthulhu", Err(anyhow!("some error message"))),
];
for (input, expected) in test_cases {
let actual = compaction_style_from_str(input);
if let Ok(compaction_style) = actual {
assert_eq!(expected.unwrap(), compaction_style);
} else {
assert!(expected.is_err());
}
}
}
#[test]
fn compression_style_from_str_test() {
let test_cases = vec![
#[cfg(feature = "bzip2")]
("bz2", Ok(DBCompressionType::Bz2)),
#[cfg(feature = "lz4")]
("lz4", Ok(DBCompressionType::Lz4)),
#[cfg(feature = "lz4")]
("lz4HC", Ok(DBCompressionType::Lz4hc)),
#[cfg(feature = "snappy")]
("SNAPPY", Ok(DBCompressionType::Snappy)),
#[cfg(feature = "zlib")]
("zlib", Ok(DBCompressionType::Zlib)),
#[cfg(feature = "zstd")]
("ZSTD", Ok(DBCompressionType::Zstd)),
("none", Ok(DBCompressionType::None)),
("cthulhu", Err(anyhow!("some error message"))),
];
for (input, expected) in test_cases {
let actual = compression_type_from_str(input);
if let Ok(compression_type) = actual {
assert_eq!(expected.unwrap(), compression_type);
let dir = tempfile::tempdir().unwrap();
let mut opt = rocksdb::Options::default();
opt.create_if_missing(true);
opt.set_compression_type(compression_type);
rocksdb::DB::open(&opt, dir.path()).unwrap();
} else {
assert!(expected.is_err());
}
}
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/rocksdb/src/rocks/error.rs | fendermint/rocksdb/src/rocks/error.rs | // Copyright 2022-2024 Protocol Labs
// Copyright 2019-2022 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
use thiserror::Error;
/// Database error
#[derive(Debug, Error)]
pub enum Error {
#[error(transparent)]
Database(#[from] rocksdb::Error),
#[error("{0}")]
Other(String),
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/rocksdb/src/rocks/mod.rs | fendermint/rocksdb/src/rocks/mod.rs | // Copyright 2022-2024 Protocol Labs
// Copyright 2019-2022 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
use rocksdb::{
ColumnFamilyDescriptor, ErrorKind, OptimisticTransactionDB, Options, WriteBatchWithTransaction,
};
use std::{path::Path, sync::Arc};
mod config;
mod error;
pub use config::RocksDbConfig;
pub use error::Error;
#[derive(Clone)]
pub struct RocksDb {
pub db: Arc<OptimisticTransactionDB>,
options: Options,
}
/// `RocksDb` is used as the KV store. Unlike the implementation in Forest
/// which is using the `DB` type, this one is using `OptimisticTransactionDB`
/// so that we can make use of transactions that can be rolled back.
///
/// Usage:
/// ```no_run
/// use fendermint_rocksdb::{RocksDb, RocksDbConfig};
///
/// let mut db = RocksDb::open("test_db", &RocksDbConfig::default()).unwrap();
/// ```
impl RocksDb {
/// Open existing column families.
pub fn open<P>(path: P, config: &RocksDbConfig) -> Result<Self, Error>
where
P: AsRef<Path>,
{
let cfs: Vec<String> = Vec::new();
Self::open_cf(path, config, cfs.iter())
}
/// Open existing column families and potentially create new ones, using the same config.
pub fn open_cf<P, I, N>(path: P, config: &RocksDbConfig, cfs: I) -> Result<Self, Error>
where
P: AsRef<Path>,
I: Iterator<Item = N>,
N: AsRef<str>,
{
let db_opts: rocksdb::Options = config.into();
let ex_cfs = Self::list_cf(&path, config)?;
let ex_cfs = ex_cfs
.into_iter()
.map(|cf| ColumnFamilyDescriptor::new(cf, db_opts.clone()));
let db = OptimisticTransactionDB::open_cf_descriptors(&db_opts, path, ex_cfs)?;
let db = Self {
db: Arc::new(db),
options: db_opts,
};
for cf in cfs {
if !db.has_cf_handle(cf.as_ref()) {
db.new_cf_handle(cf.as_ref())?;
}
}
Ok(db)
}
/// List existing column families in a database.
///
/// These need to be passed to `open_cf` when we are reopening the database.
/// If the database doesn't exist, the method returns an empty list.
fn list_cf<P>(path: P, config: &RocksDbConfig) -> Result<Vec<String>, Error>
where
P: AsRef<Path>,
{
let db_opts: rocksdb::Options = config.into();
match OptimisticTransactionDB::<rocksdb::MultiThreaded>::list_cf(&db_opts, path) {
Ok(cfs) => Ok(cfs),
Err(e) if e.kind() == ErrorKind::IOError => Ok(Vec::new()),
Err(e) => Err(Error::Database(e)),
}
}
pub fn get_statistics(&self) -> Option<String> {
self.options.get_statistics()
}
pub fn read<K>(&self, key: K) -> Result<Option<Vec<u8>>, Error>
where
K: AsRef<[u8]>,
{
self.db.get(key).map_err(Error::from)
}
pub fn write<K, V>(&self, key: K, value: V) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
Ok(self.db.put(key, value)?)
}
pub fn delete<K>(&self, key: K) -> Result<(), Error>
where
K: AsRef<[u8]>,
{
Ok(self.db.delete(key)?)
}
pub fn exists<K>(&self, key: K) -> Result<bool, Error>
where
K: AsRef<[u8]>,
{
self.db
.get_pinned(key)
.map(|v| v.is_some())
.map_err(Error::from)
}
pub fn bulk_write<K, V>(&self, values: &[(K, V)]) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
let mut batch = WriteBatchWithTransaction::<true>::default();
for (k, v) in values {
batch.put(k, v);
}
Ok(self.db.write_without_wal(batch)?)
}
pub fn flush(&self) -> Result<(), Error> {
self.db.flush().map_err(|e| Error::Other(e.to_string()))
}
/// Check if a column family exists
pub fn has_cf_handle(&self, name: &str) -> bool {
self.db.cf_handle(name).is_some()
}
/// Create a new column family, using the default options.
///
/// Returns error if it already exists.
pub fn new_cf_handle<'a>(&self, name: &'a str) -> Result<&'a str, Error> {
if self.has_cf_handle(name) {
return Err(Error::Other(format!(
"column family '{name}' already exists"
)));
}
self.db.create_cf(name, &self.options)?;
Ok(name)
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/crypto/src/lib.rs | fendermint/crypto/src/lib.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use base64::engine::GeneralPurpose;
use base64::engine::{DecodePaddingMode, GeneralPurposeConfig};
use base64::{alphabet, Engine};
use rand::Rng;
use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};
pub use libsecp256k1::{PublicKey, RecoveryId, Signature};
/// A [`GeneralPurpose`] engine using the [`alphabet::STANDARD`] base64 alphabet
/// padding bytes when writing but requireing no padding when reading.
const B64_ENGINE: GeneralPurpose = GeneralPurpose::new(
&alphabet::STANDARD,
GeneralPurposeConfig::new()
.with_encode_padding(true)
.with_decode_padding_mode(DecodePaddingMode::Indifferent),
);
/// Encode bytes in a format that the Genesis deserializer can handle.
pub fn to_b64(bz: &[u8]) -> String {
B64_ENGINE.encode(bz)
}
/// Decode bytes from Base64
pub fn from_b64(b64: &str) -> anyhow::Result<Vec<u8>> {
Ok(B64_ENGINE.decode(b64)?)
}
/// Create a new key and make sure the wrapped public key is normalized,
/// which is to ensure the results look the same after a serialization roundtrip.
pub fn normalize_public_key(pk: PublicKey) -> PublicKey {
let mut aff: libsecp256k1::curve::Affine = pk.into();
aff.x.normalize();
aff.y.normalize();
PublicKey::try_from(aff).unwrap()
}
/// Wrapper around a [libsecp256k1::SecretKey] that implements [Zeroize].
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct SecretKey(libsecp256k1::SecretKey);
impl SecretKey {
pub fn sign(&self, bz: &[u8; 32]) -> (libsecp256k1::Signature, libsecp256k1::RecoveryId) {
libsecp256k1::sign(&libsecp256k1::Message::parse(bz), &self.0)
}
pub fn random<R: Rng>(rng: &mut R) -> Self {
Self(libsecp256k1::SecretKey::random(rng))
}
pub fn public_key(&self) -> PublicKey {
PublicKey::from_secret_key(&self.0)
}
pub fn serialize(&self) -> Zeroizing<[u8; libsecp256k1::util::SECRET_KEY_SIZE]> {
Zeroizing::new(self.0.serialize())
}
}
impl Zeroize for SecretKey {
fn zeroize(&mut self) {
let mut sk = libsecp256k1::SecretKey::default();
std::mem::swap(&mut self.0, &mut sk);
let mut sk: libsecp256k1::curve::Scalar = sk.into();
sk.0.zeroize();
}
}
impl Drop for SecretKey {
fn drop(&mut self) {
self.zeroize()
}
}
impl ZeroizeOnDrop for SecretKey {}
impl TryFrom<Vec<u8>> for SecretKey {
type Error = libsecp256k1::Error;
fn try_from(mut value: Vec<u8>) -> Result<Self, Self::Error> {
let sk = libsecp256k1::SecretKey::parse_slice(&value)?;
value.zeroize();
Ok(Self(sk))
}
}
impl From<libsecp256k1::SecretKey> for SecretKey {
fn from(value: libsecp256k1::SecretKey) -> Self {
Self(value)
}
}
impl From<&SecretKey> for PublicKey {
fn from(value: &SecretKey) -> Self {
value.public_key()
}
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/abci/src/lib.rs | fendermint/abci/src/lib.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
mod application;
pub use application::{AbciResult, Application, ApplicationService};
pub mod util;
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/abci/src/application.rs | fendermint/abci/src/application.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
use async_trait::async_trait;
use futures::future::FutureExt;
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
use tendermint::abci::{request, response, Request, Response};
use tower::Service;
use tower_abci::BoxError;
use crate::util::take_until_max_size;
/// Allow returning a result from the methods, so the [`Application`]
/// implementation doesn't have to be full of `.expect("failed...")`
/// or `.unwrap()` calls. It is still good practice to use for example
/// `anyhow::Context` to provide better error feedback.
///
/// If an error is returned, the [`tower_abci::Service`] will handle
/// it by crashing at the moment.
pub type AbciResult<T> = std::result::Result<T, BoxError>;
/// Asynchronous equivalent of of [tendermint_abci::Application].
///
/// See the [spec](https://github.com/tendermint/tendermint/blob/v0.37.0-rc2/spec/abci) for the expected behaviour.
#[allow(unused_variables)]
#[async_trait]
pub trait Application {
/// Echo back the same message as provided in the request.
async fn echo(&self, request: request::Echo) -> AbciResult<response::Echo> {
Ok(response::Echo {
message: request.message,
})
}
/// Provide information about the ABCI application.
async fn info(&self, request: request::Info) -> AbciResult<response::Info> {
Ok(Default::default())
}
/// Called once upon genesis.
async fn init_chain(&self, request: request::InitChain) -> AbciResult<response::InitChain> {
Ok(Default::default())
}
/// Query the application for data at the current or past height.
async fn query(&self, request: request::Query) -> AbciResult<response::Query> {
Ok(Default::default())
}
/// Check the given transaction before putting it into the local mempool.
async fn check_tx(&self, request: request::CheckTx) -> AbciResult<response::CheckTx> {
Ok(Default::default())
}
/// Opportunity for the application to modify the proposed transactions.
///
/// The application must copy the transactions it wants to propose into the response and respect the size restrictions.
///
/// See the [spec](https://github.com/tendermint/tendermint/tree/v0.37.0-rc2/spec/abci#prepareproposal).
async fn prepare_proposal(
&self,
request: request::PrepareProposal,
) -> AbciResult<response::PrepareProposal> {
let txs = take_until_max_size(request.txs, request.max_tx_bytes.try_into().unwrap());
Ok(response::PrepareProposal { txs })
}
/// Opportunity for the application to inspect the proposal before voting on it.
///
/// The application should accept the proposal unless there's something wrong with it.
///
/// See the [spec](https://github.com/tendermint/tendermint/tree/v0.37.0-rc2/spec/abci#processproposal).
async fn process_proposal(
&self,
request: request::ProcessProposal,
) -> AbciResult<response::ProcessProposal> {
Ok(response::ProcessProposal::Accept)
}
async fn extend_vote(&self, request: request::ExtendVote) -> AbciResult<response::ExtendVote> {
Ok(response::ExtendVote {
vote_extension: Default::default(),
})
}
async fn verify_vote_extension(
&self,
request: request::VerifyVoteExtension,
) -> AbciResult<response::VerifyVoteExtension> {
if request.vote_extension.is_empty() {
Ok(response::VerifyVoteExtension::Accept)
} else {
Ok(response::VerifyVoteExtension::Reject)
}
}
async fn finalize_block(
&self,
request: request::FinalizeBlock,
) -> AbciResult<response::FinalizeBlock> {
Ok(response::FinalizeBlock {
events: Default::default(),
tx_results: Default::default(),
validator_updates: Default::default(),
consensus_param_updates: Default::default(),
app_hash: Default::default(),
})
}
/// Commit the current state at the current height.
async fn commit(&self) -> AbciResult<response::Commit> {
Ok(Default::default())
}
/// Used during state sync to discover available snapshots on peers.
async fn list_snapshots(&self) -> AbciResult<response::ListSnapshots> {
Ok(Default::default())
}
/// Called when bootstrapping the node using state sync.
async fn offer_snapshot(
&self,
request: request::OfferSnapshot,
) -> AbciResult<response::OfferSnapshot> {
Ok(Default::default())
}
/// Used during state sync to retrieve chunks of snapshots from peers.
async fn load_snapshot_chunk(
&self,
request: request::LoadSnapshotChunk,
) -> AbciResult<response::LoadSnapshotChunk> {
Ok(Default::default())
}
/// Apply the given snapshot chunk to the application's state.
async fn apply_snapshot_chunk(
&self,
request: request::ApplySnapshotChunk,
) -> AbciResult<response::ApplySnapshotChunk> {
Ok(Default::default())
}
}
/// Wrapper to adapt an `Application` to a `tower::Service`.
pub struct ApplicationService<A: Application + Sync + Send + Clone + 'static>(pub A);
impl<A> Service<Request> for ApplicationService<A>
where
A: Application + Sync + Send + Clone + 'static,
{
type Response = Response;
type Error = BoxError;
type Future = Pin<Box<dyn Future<Output = Result<Response, BoxError>> + Send + 'static>>;
/// At this level the application is always ready to receive requests.
/// Throttling is handled in the layers added on top of it.
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: Request) -> Self::Future {
// Must make sure this is a cheap clone, required so the app can be moved into the async boxed future.
// See https://tokio.rs/blog/2021-05-14-inventing-the-service-trait
// The alternative is to perform the operation synchronously right here,
// but if we use the `tower_abci::buffer4::Worker` that means nothing else
// get processed during that time.
let app = self.0.clone();
// Another trick to avoid any subtle bugs is the mem::replace.
// See https://github.com/tower-rs/tower/issues/547
let app: A = std::mem::replace(&mut self.0, app);
// Because this is async, make sure the `Consensus` service is wrapped in a concurrency limiting Tower layer.
let res = async move {
let res = match req {
Request::Echo(r) => Response::Echo(log_error(app.echo(r).await)?),
Request::Info(r) => Response::Info(log_error(app.info(r).await)?),
Request::InitChain(r) => Response::InitChain(log_error(app.init_chain(r).await)?),
Request::Query(r) => Response::Query(log_error(app.query(r).await)?),
Request::CheckTx(r) => Response::CheckTx(log_error(app.check_tx(r).await)?),
Request::PrepareProposal(r) => {
Response::PrepareProposal(log_error(app.prepare_proposal(r).await)?)
}
Request::ProcessProposal(r) => {
Response::ProcessProposal(log_error(app.process_proposal(r).await)?)
}
Request::FinalizeBlock(r) => {
Response::FinalizeBlock(log_error(app.finalize_block(r).await)?)
}
Request::ExtendVote(r) => {
Response::ExtendVote(log_error(app.extend_vote(r).await)?)
}
Request::VerifyVoteExtension(r) => {
Response::VerifyVoteExtension(log_error(app.verify_vote_extension(r).await)?)
}
Request::Commit => Response::Commit(log_error(app.commit().await)?),
Request::ListSnapshots => {
Response::ListSnapshots(log_error(app.list_snapshots().await)?)
}
Request::OfferSnapshot(r) => {
Response::OfferSnapshot(log_error(app.offer_snapshot(r).await)?)
}
Request::LoadSnapshotChunk(r) => {
Response::LoadSnapshotChunk(log_error(app.load_snapshot_chunk(r).await)?)
}
Request::ApplySnapshotChunk(r) => {
Response::ApplySnapshotChunk(log_error(app.apply_snapshot_chunk(r).await)?)
}
Request::Flush => panic!("Flush should be handled by the Server!"),
};
Ok(res)
};
res.boxed()
}
}
fn log_error<T>(res: AbciResult<T>) -> AbciResult<T> {
if let Err(ref e) = res {
tracing::error!("failed to execute ABCI request: {e:#}");
}
res
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/abci/src/util.rs | fendermint/abci/src/util.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
/// Take the first transactions until the first one that would exceed the maximum limit.
///
/// The function does not skip or reorder transaction even if a later one would stay within the limit.
pub fn take_until_max_size<T: AsRef<[u8]>>(txs: Vec<T>, max_tx_bytes: usize) -> Vec<T> {
let mut size: usize = 0;
let mut out = Vec::new();
for tx in txs {
let bz: &[u8] = tx.as_ref();
if size.saturating_add(bz.len()) > max_tx_bytes {
break;
}
size += bz.len();
out.push(tx);
}
out
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
ChainSafe/Delorean-Protocol | https://github.com/ChainSafe/Delorean-Protocol/blob/7f0f8b8a48f44486434bae881ba85785f6f7cf64/fendermint/abci/examples/kvstore.rs | fendermint/abci/examples/kvstore.rs | // Copyright 2022-2024 Protocol Labs
// SPDX-License-Identifier: Apache-2.0, MIT
//! Example ABCI application, an in-memory key-value store.
use async_stm::{atomically, TVar};
use async_trait::async_trait;
use fendermint_abci::{
util::take_until_max_size, AbciResult as Result, Application, ApplicationService,
};
use structopt::StructOpt;
use tendermint::abci::{request, response, Event, EventAttributeIndexExt};
use tower::ServiceBuilder;
use tower_abci::{v038::split, v038::Server};
use tracing::{info, Level};
// For the sake of example, sho the relationship between buffering, concurrency and block size.
const MAX_TXNS: usize = 100;
/// In-memory, hashmap-backed key-value store ABCI application.
///
/// Using STM just to see if it works. It's obviously an overkill here.
#[derive(Clone)]
struct KVStore {
store: TVar<im::HashMap<String, String>>,
height: TVar<u32>,
app_hash: TVar<[u8; 8]>,
}
impl KVStore {
pub fn new() -> Self {
Self {
store: TVar::new(im::HashMap::new()),
height: TVar::new(Default::default()),
app_hash: TVar::new(Default::default()),
}
}
}
#[async_trait]
impl Application for KVStore {
async fn info(&self, _request: request::Info) -> Result<response::Info> {
let (height, app_hash) = atomically(|| {
let height = self.height.read_clone()?.into();
let app_hash = self.app_hash.read()?.to_vec().try_into().unwrap();
Ok((height, app_hash))
})
.await;
Ok(response::Info {
data: "kvstore-example".to_string(),
version: "0.1.0".to_string(),
app_version: 1,
last_block_height: height,
last_block_app_hash: app_hash,
})
}
async fn query(&self, request: request::Query) -> Result<response::Query> {
let key = String::from_utf8(request.data.to_vec()).unwrap();
let (value, log) = atomically(|| match self.store.read()?.get(&key) {
Some(v) => Ok((v.clone(), "exists".to_string())),
None => Ok(("".to_string(), "does not exist".to_string())),
})
.await;
Ok(response::Query {
log,
key: key.into_bytes().into(),
value: value.into_bytes().into(),
..Default::default()
})
}
async fn prepare_proposal(
&self,
request: request::PrepareProposal,
) -> Result<response::PrepareProposal> {
let mut txs = take_until_max_size(request.txs, request.max_tx_bytes.try_into().unwrap());
// Enfore transaciton limit so that we don't have a problem with buffering.
txs.truncate(MAX_TXNS);
Ok(response::PrepareProposal { txs })
}
async fn process_proposal(
&self,
request: request::ProcessProposal,
) -> Result<response::ProcessProposal> {
if request.txs.len() > MAX_TXNS {
Ok(response::ProcessProposal::Reject)
} else {
Ok(response::ProcessProposal::Accept)
}
}
async fn finalize_block(
&self,
request: request::FinalizeBlock,
) -> Result<response::FinalizeBlock> {
let mut events = vec![];
for tx in request.txs.iter() {
let tx = String::from_utf8(tx.to_vec()).unwrap();
let (key, value) = match tx.split('=').collect::<Vec<_>>() {
k if k.len() == 1 => (k[0], k[0]),
kv => (kv[0], kv[1]),
};
atomically(|| {
self.store.update(|mut store| {
store.insert(key.into(), value.into());
store
})
})
.await;
info!(?key, ?value, "update");
events.push(Event::new(
"app",
vec![
("key", key).index(),
("index_key", "index is working").index(),
("noindex_key", "index is working").no_index(),
],
));
}
Ok(response::FinalizeBlock {
events: events,
tx_results: vec![Default::default(); request.txs.len()],
validator_updates: vec![],
consensus_param_updates: None,
app_hash: Default::default(),
})
}
async fn commit(&self) -> Result<response::Commit> {
let (retain_height, app_hash) = atomically(|| {
// As in the other kvstore examples, just use store.len() as the "hash"
let app_hash = (self.store.read()?.len() as u64).to_be_bytes();
self.app_hash.replace(app_hash)?;
let retain_height = self.height.modify(|h| (h + 1, h))?;
Ok((retain_height.into(), app_hash.to_vec().try_into().unwrap()))
})
.await;
info!(?retain_height, "commit");
Ok(response::Commit {
data: app_hash,
retain_height,
})
}
}
#[derive(Debug, StructOpt)]
struct Opt {
/// Bind the TCP server to this host.
#[structopt(short, long, default_value = "127.0.0.1")]
host: String,
/// Bind the TCP server to this port.
#[structopt(short, long, default_value = "26658")]
port: u16,
/// Increase output logging verbosity to DEBUG level.
#[structopt(short, long)]
verbose: bool,
}
impl Opt {
pub fn log_level(&self) -> Level {
if self.verbose {
Level::DEBUG
} else {
Level::INFO
}
}
}
#[tokio::main]
async fn main() {
let opt = Opt::from_args();
tracing_subscriber::fmt()
.with_max_level(opt.log_level())
.init();
// Construct our ABCI application.
let service = ApplicationService(KVStore::new());
// Split it into components.
let (consensus, mempool, snapshot, info) = split::service(service, 1);
// Hand those components to the ABCI server. This is where tower layers could be added.
let server = Server::builder()
.consensus(
// Because message handling is asynchronous, we must limit the concurrency of `consensus` to 1,
// otherwise transactions can be executed in an arbitrary order. `buffer` is required to avoid
// deadlocks in the connection handler; in ABCI++ (pre 2.0) we need to allow for all potential
// messages in the block, plus the surrounding begin/end/commit methods to be pipelined. The
// message limit is enforced in proposal preparation and processing.
ServiceBuilder::new()
.buffer(MAX_TXNS + 3)
.concurrency_limit(1)
.service(consensus),
)
.snapshot(snapshot)
.mempool(mempool)
.info(info)
.finish()
.unwrap();
// Run the ABCI server.
server
.listen_tcp(format!("{}:{}", opt.host, opt.port))
.await
.unwrap();
}
| rust | Apache-2.0 | 7f0f8b8a48f44486434bae881ba85785f6f7cf64 | 2026-01-04T20:23:10.651123Z | false |
dylibso/modsurfer | https://github.com/dylibso/modsurfer/blob/71ae2ca8e06917b33281bd0b75843ee60edccba8/validation/src/lib.rs | validation/src/lib.rs | #![allow(suspicious_double_ref_op)]
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
use std::path::PathBuf;
use std::{collections::BTreeMap, fmt::Display, process::ExitCode};
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
use comfy_table::{modifiers::UTF8_SOLID_INNER_BORDERS, presets::UTF8_FULL, Row, Table};
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
use extism::Plugin;
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
use extism_convert::Protobuf;
use modsurfer_convert::from_api;
use modsurfer_proto_v1::api::Module as ApiModule;
use anyhow::Result;
use human_bytes::human_bytes;
use parse_size::parse_size;
use serde::{Deserialize, Serialize};
use serde_with::skip_serializing_none;
mod diff;
pub use diff::Diff;
#[derive(Debug, Deserialize, Default, Serialize)]
#[serde(deny_unknown_fields)]
pub struct Validation {
pub validate: Check,
}
#[skip_serializing_none]
#[derive(Debug, Deserialize, Default, Serialize)]
#[serde(deny_unknown_fields)]
pub struct Check {
pub url: Option<String>,
pub allow_wasi: Option<bool>,
pub imports: Option<Imports>,
pub exports: Option<Exports>,
pub size: Option<Size>,
pub complexity: Option<Complexity>,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(deny_unknown_fields)]
pub enum RiskLevel {
#[serde(rename = "low")]
Low,
#[serde(rename = "medium")]
Medium,
#[serde(rename = "high")]
High,
}
/// The output of the "Cyclomatic Complexity" algorithm run on a graph analysis of the WebAssembly
/// code inside the provided module. The risk is purely related to computational resource usage,
/// not code security or any other interpretation of risk.
impl RiskLevel {
fn max(&self) -> u32 {
match self {
RiskLevel::Low => std::env::var("MODSURFER_RISK_LOW")
.unwrap_or(2500.to_string())
.parse::<u32>()
.expect("valid low risk level setting"),
RiskLevel::Medium => std::env::var("MODSURFER_RISK_MEDIUM")
.unwrap_or(50000.to_string())
.parse::<u32>()
.expect("valid medium risk level setting"),
RiskLevel::High => std::env::var("MODSURFER_RISK_HIGH")
.unwrap_or(u32::MAX.to_string())
.parse::<u32>()
.expect("valid high risk level setting"),
}
}
}
impl From<u32> for RiskLevel {
fn from(value: u32) -> Self {
if value <= RiskLevel::Low.max() {
RiskLevel::Low
} else if value <= RiskLevel::Medium.max() {
RiskLevel::Medium
} else {
RiskLevel::High
}
}
}
impl Display for RiskLevel {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(match self {
RiskLevel::Low => "low",
RiskLevel::Medium => "medium",
RiskLevel::High => "high",
})
}
}
#[skip_serializing_none]
#[derive(Debug, Deserialize, Serialize, Default)]
#[serde(deny_unknown_fields)]
pub struct Complexity {
pub max_risk: Option<RiskLevel>,
pub max_score: Option<u32>,
}
#[allow(unused)]
pub enum ComplexityKind {
MaxRisk(RiskLevel),
MaxScore(u32),
}
impl Complexity {
fn kind(&self) -> Result<ComplexityKind> {
match (self.max_risk.clone(), self.max_score) {
(None, None) => anyhow::bail!("No complexity check found."),
(None, Some(_score)) => {
anyhow::bail!("Only `complexity.max_risk` is currently supported.")
}
(Some(risk), None) => Ok(ComplexityKind::MaxRisk(risk)),
(Some(_), Some(_)) => {
anyhow::bail!("Only `complexity.max_risk` is currently supported.")
}
}
}
}
#[skip_serializing_none]
#[derive(Debug, Deserialize, Serialize)]
#[serde(deny_unknown_fields)]
#[serde(untagged)]
pub enum NamespaceItem {
Name(String),
Item {
name: String,
#[serde(default)]
functions: Vec<FunctionItem>,
},
}
impl NamespaceItem {
fn name(&self) -> &String {
match self {
NamespaceItem::Name(name) => name,
NamespaceItem::Item { name, .. } => name,
}
}
fn functions(&self) -> &[FunctionItem] {
match self {
NamespaceItem::Name(_) => &[],
NamespaceItem::Item { functions, .. } => functions,
}
}
}
#[skip_serializing_none]
#[derive(Debug, Deserialize, Serialize, PartialEq)]
#[serde(untagged)]
#[serde(deny_unknown_fields)]
pub enum ImportItem {
Name(String),
Item {
namespace: Option<String>,
name: String,
params: Option<Vec<modsurfer_module::ValType>>,
results: Option<Vec<modsurfer_module::ValType>>,
},
}
impl ImportItem {
fn name(&self) -> &String {
match self {
ImportItem::Name(name) => name,
ImportItem::Item { name, .. } => name,
}
}
fn namespace(&self) -> Option<&str> {
match self {
ImportItem::Name(_) => None,
ImportItem::Item { namespace, .. } => namespace.as_deref(),
}
}
fn results(&self) -> Option<&[modsurfer_module::ValType]> {
match self {
ImportItem::Name(_) => None,
ImportItem::Item { results, .. } => results.as_deref(),
}
}
fn params(&self) -> Option<&[modsurfer_module::ValType]> {
match self {
ImportItem::Name(_) => None,
ImportItem::Item { params, .. } => params.as_deref(),
}
}
}
#[skip_serializing_none]
#[derive(Debug, Deserialize, Serialize)]
#[serde(untagged)]
#[serde(deny_unknown_fields)]
pub enum FunctionItem {
Name(String),
Item {
name: String,
params: Option<Vec<modsurfer_module::ValType>>,
results: Option<Vec<modsurfer_module::ValType>>,
hash: Option<String>,
},
}
impl FunctionItem {
fn name(&self) -> &String {
match self {
FunctionItem::Name(name) => name,
FunctionItem::Item { name, .. } => name,
}
}
fn hash(&self) -> Option<&str> {
match self {
FunctionItem::Name(_name) => None,
FunctionItem::Item { hash, .. } => hash.as_deref(),
}
}
fn results(&self) -> Option<&[modsurfer_module::ValType]> {
match self {
FunctionItem::Name(_) => None,
FunctionItem::Item { results, .. } => results.as_deref(),
}
}
fn params(&self) -> Option<&[modsurfer_module::ValType]> {
match self {
FunctionItem::Name(_) => None,
FunctionItem::Item { params, .. } => params.as_deref(),
}
}
}
#[skip_serializing_none]
#[derive(Debug, Deserialize, Serialize, Default)]
#[serde(deny_unknown_fields)]
pub struct Namespace {
pub include: Option<Vec<NamespaceItem>>,
pub exclude: Option<Vec<NamespaceItem>>,
}
#[skip_serializing_none]
#[derive(Debug, Deserialize, Serialize, Default)]
#[serde(deny_unknown_fields)]
pub struct Imports {
pub include: Option<Vec<ImportItem>>,
pub exclude: Option<Vec<ImportItem>>,
pub namespace: Option<Namespace>,
}
#[skip_serializing_none]
#[derive(Debug, Deserialize, Serialize, Default)]
#[serde(deny_unknown_fields)]
pub struct Exports {
pub include: Option<Vec<FunctionItem>>,
pub exclude: Option<Vec<FunctionItem>>,
pub max: Option<u32>,
}
#[skip_serializing_none]
#[derive(Debug, Deserialize, Serialize, Default)]
#[serde(deny_unknown_fields)]
pub struct Size {
pub max: Option<String>,
}
#[derive(Debug, Deserialize, Serialize)]
pub enum Classification {
AbiCompatibilty,
ResourceLimit,
Security,
}
impl Display for Classification {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Classification::AbiCompatibilty => f.write_str("ABI Compatibility")?,
Classification::ResourceLimit => f.write_str("Resource Limit")?,
Classification::Security => f.write_str("Security")?,
}
Ok(())
}
}
#[derive(Debug, Deserialize, Serialize)]
pub struct FailureDetail {
pub actual: String,
pub expected: String,
pub severity: usize,
pub classification: Classification,
}
#[derive(Debug, Deserialize, Serialize)]
pub struct Report {
/// k/v pair of the dot-separated path to validation field and expectation info
pub fails: BTreeMap<String, FailureDetail>,
}
impl Report {
pub fn as_exit_code(&self) -> ExitCode {
match self.fails.len() {
0 => ExitCode::SUCCESS,
_ => ExitCode::FAILURE,
}
}
pub fn has_failures(&self) -> bool {
!self.fails.is_empty()
}
}
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
impl Display for Report {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if self.fails.is_empty() {
return Ok(());
}
let mut table = Table::new();
table.load_preset(UTF8_FULL);
table.apply_modifier(UTF8_SOLID_INNER_BORDERS);
table.set_header(vec![
"Status",
"Property",
"Expected",
"Actual",
"Classification",
"Severity",
]);
self.fails.iter().for_each(|fail| {
const SEVERITY_MAX: usize = 10;
let severity = if fail.1.severity <= SEVERITY_MAX {
fail.1.severity
} else {
SEVERITY_MAX
};
table.add_row(Row::from(vec![
"FAIL",
fail.0.as_str(),
fail.1.expected.as_str(),
fail.1.actual.as_str(),
fail.1.classification.to_string().as_str(),
"|".repeat(severity).as_str(),
]));
});
f.write_str(table.to_string().as_str())
}
}
impl Report {
fn new() -> Self {
Self {
fails: Default::default(),
}
}
fn validate_fn(
&mut self,
name: &str,
expected: String,
actual: String,
valid: bool,
severity: usize,
classification: Classification,
) {
if !valid {
self.fails.insert(
name.to_string(),
FailureDetail {
actual,
expected,
severity,
classification,
},
);
}
}
fn validate_fn_hash(&mut self, name: &str, expected: String, actual: Option<String>) {
if let Some(actual) = actual.clone() {
let test = expected == actual;
self.validate_fn(
name,
expected,
actual,
test,
7,
Classification::AbiCompatibilty,
);
} else {
self.fails.insert(
name.to_string(),
FailureDetail {
actual: actual.unwrap_or_else(|| String::from("<NONE>")),
expected,
severity: 7,
classification: Classification::AbiCompatibilty,
},
);
}
}
fn validate_fn_type(
&mut self,
name: &str,
actual: &modsurfer_module::FunctionType,
params: Option<&[modsurfer_module::ValType]>,
results: Option<&[modsurfer_module::ValType]>,
) {
if let Some(expected) = params {
let test_params = actual.params == expected;
self.validate_fn(
&format!("{name}.params"),
format!("{:?}", expected),
format!("{:?}", actual.params),
test_params,
8,
Classification::AbiCompatibilty,
);
};
if let Some(expected) = results {
let test_results = actual.results == expected;
self.validate_fn(
&format!("{name}.results"),
format!("{:?}", expected),
format!("{:?}", actual.results),
test_results,
8,
Classification::AbiCompatibilty,
);
};
}
}
struct Exist(bool);
impl Display for Exist {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if self.0 {
f.write_str("included")?;
} else {
f.write_str("excluded")?;
}
Ok(())
}
}
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
pub struct Module {}
// this uses Extism's "typed plugin" macro to produce a new struct `ModuleParser`, which contains
// an associated function `parse_module`. This enables us to wrap the extism::Plugin type and feel
// more like regular Rust functions vs. the using the generalized `Plugin::call` function.
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
extism::typed_plugin!(ModuleParser {
parse_module(&[u8]) -> Protobuf<ApiModule>;
});
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
impl Module {
// NOTE: this function executes WebAssembly code as a plugin managed by Extism (https://extism.org)
// and is distributed under the same license as the primary codebase. See LICENSE file in the
// root of this repository.
//
// The source code to the WebAssembly binary is not open source.
//
// Importantly, this code has no side-effects, and uses no system resources. The `false`
// parameter provided to `Plugin::new` below, ensures that the module is run without functions
// provided by the WASI module namespace. Therefore it has no access to your running environment
// nor any system resources such as a filesystem or network.
//
// The function within the WebAssembly, "parse_module", only parses bytes provided to it from
// the host context (the `wasm`), and collects parsed information into the `Module` which is
// returned as a protobuf-encoded struct.
pub fn parse(wasm: impl AsRef<[u8]>) -> Result<modsurfer_module::Module> {
let mut plugin: ModuleParser =
Plugin::new(modsurfer_plugins::MODSURFER_WASM, [], false)?.try_into()?;
let Protobuf(data) = plugin.parse_module(wasm.as_ref())?;
let metadata = if data.metadata.is_empty() {
None
} else {
Some(data.metadata)
};
let inserted_at: std::time::SystemTime = data
.inserted_at
.unwrap_or_else(|| protobuf::well_known_types::timestamp::Timestamp::new())
.into();
let module = modsurfer_module::Module {
hash: data.hash,
imports: from_api::imports(data.imports),
exports: from_api::exports(data.exports),
size: data.size as u64,
location: data.location,
source_language: from_api::source_language(
data.source_language.enum_value_or_default(),
),
metadata,
inserted_at: inserted_at.into(),
strings: data.strings,
complexity: data.complexity,
graph: data.graph,
function_hashes: data.function_hashes,
};
Ok(module)
}
}
fn namespace_prefix(import_item: &ImportItem, fn_name: &str) -> String {
match import_item.namespace() {
Some(ns) => format!("{}::{}", ns, fn_name),
None => fn_name.into(),
}
}
pub fn validate(validation: Validation, module: modsurfer_module::Module) -> Result<Report> {
let mut report = Report::new();
// WASI
if let Some(allowed) = validation.validate.allow_wasi {
let actual = module
.get_import_namespaces()
.contains(&"wasi_snapshot_preview1");
report.validate_fn(
"allow_wasi",
allowed.to_string(),
actual.to_string(),
!(allowed == false && actual),
10,
Classification::AbiCompatibilty,
);
}
// Imports
if let Some(imports) = validation.validate.imports {
let actual_import_module_func_types = module
.imports
.iter()
.map(|im| {
(
(im.module_name.as_str(), im.func.name.as_str()),
&im.func.ty,
)
})
.collect::<std::collections::BTreeMap<_, _>>();
let import_func_types = actual_import_module_func_types
.iter()
.map(|((_, k), ty)| (*k, ty.clone()))
.collect::<BTreeMap<_, _>>();
let import_module_names = module.get_import_namespaces();
// expect that all actual imports parsed from the module are within a subset of the import
// functions listed in the checkfile
if let Some(include) = imports.include {
actual_import_module_func_types.iter().for_each(
|((actual_namespace, actual_func_name), actual_func_ty)| {
let actual_module_import = ImportItem::Item {
namespace: Some(actual_namespace.to_string()),
name: actual_func_name.to_string(),
params: Some(actual_func_ty.params.clone()),
results: Some(actual_func_ty.results.clone()),
};
// check that we have at minimum a match for name and namespace, use this module
// to further check the params and results
let found = include.iter().find(|checkfile_import| {
checkfile_import.name() == actual_module_import.name()
&& checkfile_import.namespace() == actual_module_import.namespace()
});
if found.is_none() {
report.validate_fn(
&format!(
"imports.include.{}",
namespace_prefix(&actual_module_import, actual_func_name)
),
Exist(false).to_string(),
Exist(true).to_string(),
false,
10,
Classification::AbiCompatibilty,
);
} else {
// if an import _is_ contained in the checkfile, also validate that the
// function type is equivalent to the expected type in the checkfile
let checkfile_import = found.expect("module import must exist");
report.validate_fn_type(
&format!(
"imports.include.{}",
namespace_prefix(&actual_module_import, actual_func_name)
),
&actual_func_ty,
checkfile_import.params(),
checkfile_import.results(),
);
}
},
);
}
if let Some(exclude) = imports.exclude {
exclude.iter().for_each(|imp| {
let name = imp.name();
let test = if let Some(ns) = imp.namespace() {
actual_import_module_func_types.contains_key(&(ns, name))
} else {
import_func_types.contains_key(&name.as_str())
};
let ty = if let Some(ns) = imp.namespace() {
actual_import_module_func_types.get(&(ns, name))
} else {
import_func_types.get(name.as_str())
};
if test {
let ty = ty.unwrap();
report.validate_fn_type(
&format!("imports.exclude.{}", namespace_prefix(&imp, name)),
*ty,
imp.params(),
imp.results(),
);
};
report.validate_fn(
&format!("imports.exclude.{}", namespace_prefix(&imp, name)),
Exist(false).to_string(),
Exist(test).to_string(),
!test,
5,
Classification::AbiCompatibilty,
);
});
}
if let Some(namespace) = imports.namespace {
if let Some(include) = namespace.include {
include.iter().for_each(|ns| {
let name = ns.name();
let functions = ns.functions();
let test = import_module_names.contains(&name.as_str());
report.validate_fn(
&format!("imports.namespace.include.{}", name),
Exist(true).to_string(),
Exist(test).to_string(),
test,
8,
Classification::AbiCompatibilty,
);
for f in functions.iter() {
let test = actual_import_module_func_types
.contains_key(&(name, f.name().as_str()));
report.validate_fn(
&format!("imports.namespace.include.{name}::{}", f.name()),
Exist(true).to_string(),
Exist(test).to_string(),
test,
8,
Classification::AbiCompatibilty,
);
if test {
let ty = actual_import_module_func_types
.get(&(name, f.name().as_str()))
.unwrap();
report.validate_fn_type(
&format!("imports.namespace.include.{name}::{}", f.name()),
*ty,
f.params(),
f.results(),
);
}
}
});
}
if let Some(exclude) = namespace.exclude {
exclude.iter().for_each(|ns| {
let name = ns.name();
let functions = ns.functions();
let test = import_module_names.contains(&name.as_str());
report.validate_fn(
&format!("imports.namespace.exclude.{}", name),
Exist(false).to_string(),
Exist(test).to_string(),
!test,
10,
Classification::AbiCompatibilty,
);
for f in functions.iter() {
let test = actual_import_module_func_types
.contains_key(&(name, f.name().as_str()));
if test {
let ty = actual_import_module_func_types
.get(&(name, f.name().as_str()))
.unwrap();
report.validate_fn_type(
&format!("imports.namespace.exclude.{name}::{}", f.name()),
*ty,
f.params(),
f.results(),
);
};
report.validate_fn(
&format!("imports.namespace.exclude.{name}::{}", f.name()),
Exist(false).to_string(),
Exist(test).to_string(),
!test,
10,
Classification::AbiCompatibilty,
);
}
});
}
}
}
// Exports
if let Some(exports) = validation.validate.exports {
let export_func_types = module
.exports
.iter()
.map(|im| (im.func.name.as_str(), &im.func.ty))
.collect::<std::collections::BTreeMap<_, _>>();
if let Some(max) = exports.max {
let num = export_func_types.len() as u32;
let overage = num.saturating_sub(max);
let max = if max == 0 { 1 } else { max };
let severity = ((overage as f32 / max as f32) * 10.0).ceil() as usize;
let test = num <= max;
report.validate_fn(
"exports.max",
format!("<= {max}"),
num.to_string(),
test,
severity,
Classification::Security,
);
}
if let Some(include) = exports.include {
include.iter().for_each(|f| {
let name = f.name();
let test = export_func_types.contains_key(name.as_str());
report.validate_fn(
&format!("exports.include.{}", name),
Exist(true).to_string(),
Exist(test).to_string(),
test,
10,
Classification::AbiCompatibilty,
);
if test {
let ty = export_func_types.get(name.as_str()).unwrap();
report.validate_fn_type(
&format!("exports.include.{}", name),
*ty,
f.params(),
f.results(),
);
}
if let Some(hash) = f.hash() {
report.validate_fn_hash(
&format!("exports.hash.{}", name),
hash.to_string(),
module.function_hashes.get(name).map(|x| x.clone()),
);
}
});
}
if let Some(exclude) = exports.exclude {
exclude.iter().for_each(|f| {
let name = f.name();
let ty = export_func_types.get(name.as_str());
let test = ty.is_some();
if test {
let ty = ty.unwrap();
report.validate_fn_type(
&format!("exports.include.{}", name),
*ty,
f.params(),
f.results(),
);
}
report.validate_fn(
&format!("exports.exclude.{}", name),
Exist(false).to_string(),
Exist(test).to_string(),
!test,
5,
Classification::AbiCompatibilty,
);
});
}
}
// Size
if let Some(size) = validation.validate.size {
if let Some(max) = size.max {
let parsed = parse_size(&max).unwrap();
let human_actual = human_bytes(module.size as f64);
let test = module.size <= parsed;
report.validate_fn(
"size.max",
format!("<= {max}"),
human_actual.to_string(),
test,
(module.size / parsed) as usize,
Classification::ResourceLimit,
);
}
}
// Complexity
if let Some(complexity) = validation.validate.complexity {
let module_complexity = module.complexity.ok_or_else(|| anyhow::anyhow!("Could not determine module complexity, please remove the complexity parameter from your checkfile."))?;
match complexity.kind()? {
ComplexityKind::MaxRisk(risk) => {
report.validate_fn(
"complexity.max_risk",
format!("<= {}", risk),
RiskLevel::from(module_complexity).to_string(),
risk.max() >= module_complexity,
(module_complexity / risk.max()) as usize,
Classification::ResourceLimit,
);
}
_ => unreachable!(),
}
}
Ok(report)
}
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
pub async fn validate_module(file: &PathBuf, check: &PathBuf) -> Result<Report> {
// read the wasm file and parse a Module from it to later validate against the check file.
// NOTE: the Module is produced by executing plugin code, linked and called from the
// `Module::parse` function.
let module_data = tokio::fs::read(file).await?;
let module = Module::parse(&module_data)?;
let mut buf = tokio::fs::read(check).await?;
let mut validation: Validation = serde_yaml::from_slice(&buf)?;
if let Some(url) = validation.validate.url {
// fetch remote validation file
println!("Fetching validation schema from URL: {}", url);
let resp = reqwest::get(&url).await?;
if !resp.status().is_success() {
anyhow::bail!(
"Failed to make request for remote validation schema: {}",
url
);
}
buf.clear();
buf = resp.bytes().await?.into();
// parse the file again & reassign `validation`
validation = serde_yaml::from_slice(&buf)?;
}
validate(validation, module)
}
pub fn generate_checkfile(module: &modsurfer_module::Module) -> Result<Validation> {
let mut validation = Validation::default();
let namespaces = module.get_import_namespaces();
// allow_wasi
if namespaces.contains(&"wasi_snapshot_preview1") {
validation.validate.allow_wasi = Some(true);
}
// imports (add all to include + namespace)
let mut imports = Imports::default();
let mut include_imports = vec![];
module.imports.iter().for_each(|imp| {
include_imports.push(ImportItem::Item {
namespace: Some(imp.module_name.clone()),
name: imp.func.name.clone(),
params: Some(imp.func.ty.params.clone()),
results: Some(imp.func.ty.results.clone()),
});
});
imports.include = Some(include_imports);
// imports.namespace (add all to imports)
let mut namespace = Namespace::default();
namespace.include = Some(
namespaces
.iter()
.map(|name| NamespaceItem::Name(name.to_string()))
.collect::<Vec<_>>(),
);
if !namespaces.is_empty() {
imports.namespace = Some(namespace);
}
// exports (add all exports)
let mut exports = Exports::default();
let mut include_exports = vec![];
module.exports.iter().for_each(|exp| {
include_exports.push(FunctionItem::Item {
name: exp.func.name.clone(),
params: Some(exp.func.ty.params.clone()),
results: Some(exp.func.ty.results.clone()),
hash: module.function_hashes.get(&exp.func.name).cloned(),
});
});
let export_count = include_exports.len();
exports.include = Some(include_exports);
// exports.max (match number of exports)
exports.max = Some(export_count as u32);
// size.max (use size from module)
let mut size = Size::default();
// add 10% padded size to max.size (ref: https://github.com/dylibso/modsurfer/issues/71)
let padded_size = module.size as f64 * 1.1;
size.max = Some(human_bytes(padded_size));
// complexity.max_risk (use complexity)
let mut complexity = Complexity::default();
complexity.max_risk = Some(RiskLevel::from(module.complexity.unwrap_or_default()));
validation.validate.url = None;
validation.validate.imports = Some(imports);
validation.validate.exports = Some(exports);
validation.validate.size = Some(size);
validation.validate.complexity = Some(complexity);
Ok(validation)
}
| rust | Apache-2.0 | 71ae2ca8e06917b33281bd0b75843ee60edccba8 | 2026-01-04T20:22:33.912580Z | false |
dylibso/modsurfer | https://github.com/dylibso/modsurfer/blob/71ae2ca8e06917b33281bd0b75843ee60edccba8/validation/src/diff.rs | validation/src/diff.rs | use std::fmt::Write;
use anyhow::Error;
use colored::Colorize;
#[derive(Debug, Clone, serde::Serialize)]
#[serde(transparent)]
pub struct Diff(String);
impl std::fmt::Display for Diff {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.0.fmt(f)
}
}
impl Diff {
pub fn new(
a: &modsurfer_module::Module,
b: &modsurfer_module::Module,
color_term: bool,
with_context: bool,
) -> Result<Self, Error> {
let a_string = serde_yaml::to_string(&crate::generate_checkfile(a)?.validate)?;
let b_string = serde_yaml::to_string(&crate::generate_checkfile(b)?.validate)?;
let diff = similar::TextDiff::from_lines(a_string.as_str(), b_string.as_str());
let mut output = String::new();
let mut changes = 0;
for change in diff.iter_all_changes() {
let (sign, color) = match change.tag() {
similar::ChangeTag::Delete => {
changes += 1;
("- ", "red")
}
similar::ChangeTag::Insert => {
changes += 1;
("+ ", "green")
}
similar::ChangeTag::Equal if with_context => (" ", ""),
_ => continue,
};
if color_term {
write!(
&mut output,
"{}{}",
sign.color(color),
change.as_str().unwrap_or_default().color(color)
)?;
} else {
write!(&mut output, "{}{}", sign, change)?;
}
}
if changes == 0 {
return Ok(Diff(String::new()));
}
Ok(Diff(output))
}
}
impl From<Diff> for String {
fn from(x: Diff) -> String {
x.0
}
}
impl AsRef<str> for Diff {
fn as_ref(&self) -> &str {
self.0.as_str()
}
}
| rust | Apache-2.0 | 71ae2ca8e06917b33281bd0b75843ee60edccba8 | 2026-01-04T20:22:33.912580Z | false |
dylibso/modsurfer | https://github.com/dylibso/modsurfer/blob/71ae2ca8e06917b33281bd0b75843ee60edccba8/scripts/protobuf-list-modules-response/src/main.rs | scripts/protobuf-list-modules-response/src/main.rs | use std::fs;
use std::path::PathBuf;
use anyhow::Result;
use modsurfer_convert::{
api::{ListModulesResponse, Module as ProtoModule},
to_api,
};
use modsurfer_module::Module;
use modsurfer::ModuleParser;
use protobuf::Message;
#[tokio::main]
async fn main() -> Result<()> {
let args = std::env::args().skip(1).collect::<Vec<String>>();
let mut take = args[0].parse::<i32>().unwrap_or_else(|_| -1);
println!("taking {} modules", take);
// collect all the files in the wasm directory (expects these to be .wasm modules)
let wasm_dir = PathBuf::new()
.join(env!("CARGO_WORKSPACE_DIR"))
.join("wasm");
let dir = fs::read_dir(wasm_dir)?;
let files = dir
.filter_map(|entry| {
entry.ok().and_then(|entry| {
Some(entry).and_then(|entry| match entry.metadata() {
Ok(m) if !m.is_dir() => Some(entry),
_ => None,
})
})
})
.collect::<Vec<_>>();
if take < 0 {
take = files.len() as i32;
}
// parse .wasm into modsurfer Modules
let mut modules = vec![];
for file in files.iter().take(take as usize) {
println!("reading: {:?}", file.path());
let m = Module::new_from_file(file.path()).await?;
modules.push(m);
}
let total = modules.len() as u64;
// give each module an ID, and convert it to the protobuf Module message, collect all
let modules = modules
.into_iter()
.zip(1i64..total as i64)
.map(|(m, id)| to_api::module(m, id))
.collect::<Vec<ProtoModule>>();
// construct a protobuf ListModulesResponse message
let output = ListModulesResponse {
modules,
total,
..Default::default()
};
// serialize the protobuf message and write to a file on disk
let data = output.write_to_bytes()?;
fs::write("ListModulesResponse.pb", &data)?;
println!("wrote ListModulesResponse.pb with {} modules.", total);
Ok(())
}
| rust | Apache-2.0 | 71ae2ca8e06917b33281bd0b75843ee60edccba8 | 2026-01-04T20:22:33.912580Z | false |
dylibso/modsurfer | https://github.com/dylibso/modsurfer/blob/71ae2ca8e06917b33281bd0b75843ee60edccba8/demangle/src/lib.rs | demangle/src/lib.rs | /// Demangle function name, currently supports C++ and Rust
pub fn demangle_function_name(name: impl Into<String>) -> String {
let name = name.into();
if let Ok(name) = cpp_demangle::Symbol::new(&name) {
if let Ok(name) = name.demangle(&cpp_demangle::DemangleOptions::default()) {
return name;
}
} else if let Ok(name) = rustc_demangle::try_demangle(&name) {
return name.to_string();
}
name
}
#[cfg(all(target_arch = "wasm32", feature = "web"))]
use wasm_bindgen::prelude::*;
#[cfg(all(target_arch = "wasm32", feature = "web"))]
#[wasm_bindgen]
pub fn demangle(name: String) -> String {
demangle_function_name(name)
}
| rust | Apache-2.0 | 71ae2ca8e06917b33281bd0b75843ee60edccba8 | 2026-01-04T20:22:33.912580Z | false |
dylibso/modsurfer | https://github.com/dylibso/modsurfer/blob/71ae2ca8e06917b33281bd0b75843ee60edccba8/proto/v1/src/api.rs | proto/v1/src/api.rs | // This file is generated by rust-protobuf 3.4.0. Do not edit
// .proto file is parsed by protoc --rust-out=...
// @generated
// https://github.com/rust-lang/rust-clippy/issues/702
#![allow(unknown_lints)]
#![allow(clippy::all)]
#![allow(unused_attributes)]
#![cfg_attr(rustfmt, rustfmt::skip)]
#![allow(box_pointers)]
#![allow(dead_code)]
#![allow(missing_docs)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
#![allow(trivial_casts)]
#![allow(unused_results)]
#![allow(unused_mut)]
//! Generated file from `proto/v1/api.proto`
/// Generated files are compatible only with the same version
/// of protobuf runtime.
const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_3_4_0;
/// Contained by an import or export element within a wasm binary.
// @@protoc_insertion_point(message:Function)
#[derive(PartialEq,Clone,Default,Debug)]
pub struct Function {
// message fields
// @@protoc_insertion_point(field:Function.params)
pub params: ::std::vec::Vec<::protobuf::EnumOrUnknown<ValType>>,
// @@protoc_insertion_point(field:Function.results)
pub results: ::std::vec::Vec<::protobuf::EnumOrUnknown<ValType>>,
// @@protoc_insertion_point(field:Function.name)
pub name: ::std::string::String,
// special fields
// @@protoc_insertion_point(special_field:Function.special_fields)
pub special_fields: ::protobuf::SpecialFields,
}
impl<'a> ::std::default::Default for &'a Function {
fn default() -> &'a Function {
<Function as ::protobuf::Message>::default_instance()
}
}
impl Function {
pub fn new() -> Function {
::std::default::Default::default()
}
fn generated_message_descriptor_data() -> ::protobuf::reflect::GeneratedMessageDescriptorData {
let mut fields = ::std::vec::Vec::with_capacity(3);
let mut oneofs = ::std::vec::Vec::with_capacity(0);
fields.push(::protobuf::reflect::rt::v2::make_vec_simpler_accessor::<_, _>(
"params",
|m: &Function| { &m.params },
|m: &mut Function| { &mut m.params },
));
fields.push(::protobuf::reflect::rt::v2::make_vec_simpler_accessor::<_, _>(
"results",
|m: &Function| { &m.results },
|m: &mut Function| { &mut m.results },
));
fields.push(::protobuf::reflect::rt::v2::make_simpler_field_accessor::<_, _>(
"name",
|m: &Function| { &m.name },
|m: &mut Function| { &mut m.name },
));
::protobuf::reflect::GeneratedMessageDescriptorData::new_2::<Function>(
"Function",
fields,
oneofs,
)
}
}
impl ::protobuf::Message for Function {
const NAME: &'static str = "Function";
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::Result<()> {
while let Some(tag) = is.read_raw_tag_or_eof()? {
match tag {
8 => {
self.params.push(is.read_enum_or_unknown()?);
},
10 => {
::protobuf::rt::read_repeated_packed_enum_or_unknown_into(is, &mut self.params)?
},
16 => {
self.results.push(is.read_enum_or_unknown()?);
},
18 => {
::protobuf::rt::read_repeated_packed_enum_or_unknown_into(is, &mut self.results)?
},
26 => {
self.name = is.read_string()?;
},
tag => {
::protobuf::rt::read_unknown_or_skip_group(tag, is, self.special_fields.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u64 {
let mut my_size = 0;
for value in &self.params {
my_size += ::protobuf::rt::int32_size(1, value.value());
};
for value in &self.results {
my_size += ::protobuf::rt::int32_size(2, value.value());
};
if !self.name.is_empty() {
my_size += ::protobuf::rt::string_size(3, &self.name);
}
my_size += ::protobuf::rt::unknown_fields_size(self.special_fields.unknown_fields());
self.special_fields.cached_size().set(my_size as u32);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::Result<()> {
for v in &self.params {
os.write_enum(1, ::protobuf::EnumOrUnknown::value(v))?;
};
for v in &self.results {
os.write_enum(2, ::protobuf::EnumOrUnknown::value(v))?;
};
if !self.name.is_empty() {
os.write_string(3, &self.name)?;
}
os.write_unknown_fields(self.special_fields.unknown_fields())?;
::std::result::Result::Ok(())
}
fn special_fields(&self) -> &::protobuf::SpecialFields {
&self.special_fields
}
fn mut_special_fields(&mut self) -> &mut ::protobuf::SpecialFields {
&mut self.special_fields
}
fn new() -> Function {
Function::new()
}
fn clear(&mut self) {
self.params.clear();
self.results.clear();
self.name.clear();
self.special_fields.clear();
}
fn default_instance() -> &'static Function {
static instance: Function = Function {
params: ::std::vec::Vec::new(),
results: ::std::vec::Vec::new(),
name: ::std::string::String::new(),
special_fields: ::protobuf::SpecialFields::new(),
};
&instance
}
}
impl ::protobuf::MessageFull for Function {
fn descriptor() -> ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::Lazy::new();
descriptor.get(|| file_descriptor().message_by_package_relative_name("Function").unwrap()).clone()
}
}
impl ::std::fmt::Display for Function {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for Function {
type RuntimeType = ::protobuf::reflect::rt::RuntimeTypeMessage<Self>;
}
/// A function and module namespace that is defined outside of the current
/// module, and referenced & called by the current module.
// @@protoc_insertion_point(message:Import)
#[derive(PartialEq,Clone,Default,Debug)]
pub struct Import {
// message fields
// @@protoc_insertion_point(field:Import.module_name)
pub module_name: ::std::string::String,
// @@protoc_insertion_point(field:Import.func)
pub func: ::protobuf::MessageField<Function>,
// special fields
// @@protoc_insertion_point(special_field:Import.special_fields)
pub special_fields: ::protobuf::SpecialFields,
}
impl<'a> ::std::default::Default for &'a Import {
fn default() -> &'a Import {
<Import as ::protobuf::Message>::default_instance()
}
}
impl Import {
pub fn new() -> Import {
::std::default::Default::default()
}
fn generated_message_descriptor_data() -> ::protobuf::reflect::GeneratedMessageDescriptorData {
let mut fields = ::std::vec::Vec::with_capacity(2);
let mut oneofs = ::std::vec::Vec::with_capacity(0);
fields.push(::protobuf::reflect::rt::v2::make_simpler_field_accessor::<_, _>(
"module_name",
|m: &Import| { &m.module_name },
|m: &mut Import| { &mut m.module_name },
));
fields.push(::protobuf::reflect::rt::v2::make_message_field_accessor::<_, Function>(
"func",
|m: &Import| { &m.func },
|m: &mut Import| { &mut m.func },
));
::protobuf::reflect::GeneratedMessageDescriptorData::new_2::<Import>(
"Import",
fields,
oneofs,
)
}
}
impl ::protobuf::Message for Import {
const NAME: &'static str = "Import";
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::Result<()> {
while let Some(tag) = is.read_raw_tag_or_eof()? {
match tag {
10 => {
self.module_name = is.read_string()?;
},
18 => {
::protobuf::rt::read_singular_message_into_field(is, &mut self.func)?;
},
tag => {
::protobuf::rt::read_unknown_or_skip_group(tag, is, self.special_fields.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u64 {
let mut my_size = 0;
if !self.module_name.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.module_name);
}
if let Some(v) = self.func.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint64_size(len) + len;
}
my_size += ::protobuf::rt::unknown_fields_size(self.special_fields.unknown_fields());
self.special_fields.cached_size().set(my_size as u32);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::Result<()> {
if !self.module_name.is_empty() {
os.write_string(1, &self.module_name)?;
}
if let Some(v) = self.func.as_ref() {
::protobuf::rt::write_message_field_with_cached_size(2, v, os)?;
}
os.write_unknown_fields(self.special_fields.unknown_fields())?;
::std::result::Result::Ok(())
}
fn special_fields(&self) -> &::protobuf::SpecialFields {
&self.special_fields
}
fn mut_special_fields(&mut self) -> &mut ::protobuf::SpecialFields {
&mut self.special_fields
}
fn new() -> Import {
Import::new()
}
fn clear(&mut self) {
self.module_name.clear();
self.func.clear();
self.special_fields.clear();
}
fn default_instance() -> &'static Import {
static instance: Import = Import {
module_name: ::std::string::String::new(),
func: ::protobuf::MessageField::none(),
special_fields: ::protobuf::SpecialFields::new(),
};
&instance
}
}
impl ::protobuf::MessageFull for Import {
fn descriptor() -> ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::Lazy::new();
descriptor.get(|| file_descriptor().message_by_package_relative_name("Import").unwrap()).clone()
}
}
impl ::std::fmt::Display for Import {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for Import {
type RuntimeType = ::protobuf::reflect::rt::RuntimeTypeMessage<Self>;
}
/// A function that is defined inside the current module, made available to
/// outside modules / environments.
// @@protoc_insertion_point(message:Export)
#[derive(PartialEq,Clone,Default,Debug)]
pub struct Export {
// message fields
// @@protoc_insertion_point(field:Export.func)
pub func: ::protobuf::MessageField<Function>,
// special fields
// @@protoc_insertion_point(special_field:Export.special_fields)
pub special_fields: ::protobuf::SpecialFields,
}
impl<'a> ::std::default::Default for &'a Export {
fn default() -> &'a Export {
<Export as ::protobuf::Message>::default_instance()
}
}
impl Export {
pub fn new() -> Export {
::std::default::Default::default()
}
fn generated_message_descriptor_data() -> ::protobuf::reflect::GeneratedMessageDescriptorData {
let mut fields = ::std::vec::Vec::with_capacity(1);
let mut oneofs = ::std::vec::Vec::with_capacity(0);
fields.push(::protobuf::reflect::rt::v2::make_message_field_accessor::<_, Function>(
"func",
|m: &Export| { &m.func },
|m: &mut Export| { &mut m.func },
));
::protobuf::reflect::GeneratedMessageDescriptorData::new_2::<Export>(
"Export",
fields,
oneofs,
)
}
}
impl ::protobuf::Message for Export {
const NAME: &'static str = "Export";
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::Result<()> {
while let Some(tag) = is.read_raw_tag_or_eof()? {
match tag {
10 => {
::protobuf::rt::read_singular_message_into_field(is, &mut self.func)?;
},
tag => {
::protobuf::rt::read_unknown_or_skip_group(tag, is, self.special_fields.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u64 {
let mut my_size = 0;
if let Some(v) = self.func.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint64_size(len) + len;
}
my_size += ::protobuf::rt::unknown_fields_size(self.special_fields.unknown_fields());
self.special_fields.cached_size().set(my_size as u32);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::Result<()> {
if let Some(v) = self.func.as_ref() {
::protobuf::rt::write_message_field_with_cached_size(1, v, os)?;
}
os.write_unknown_fields(self.special_fields.unknown_fields())?;
::std::result::Result::Ok(())
}
fn special_fields(&self) -> &::protobuf::SpecialFields {
&self.special_fields
}
fn mut_special_fields(&mut self) -> &mut ::protobuf::SpecialFields {
&mut self.special_fields
}
fn new() -> Export {
Export::new()
}
fn clear(&mut self) {
self.func.clear();
self.special_fields.clear();
}
fn default_instance() -> &'static Export {
static instance: Export = Export {
func: ::protobuf::MessageField::none(),
special_fields: ::protobuf::SpecialFields::new(),
};
&instance
}
}
impl ::protobuf::MessageFull for Export {
fn descriptor() -> ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::Lazy::new();
descriptor.get(|| file_descriptor().message_by_package_relative_name("Export").unwrap()).clone()
}
}
impl ::std::fmt::Display for Export {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for Export {
type RuntimeType = ::protobuf::reflect::rt::RuntimeTypeMessage<Self>;
}
/// Details about a wasm module, either extracted directly from the binary, or
/// inferred somehow.
// @@protoc_insertion_point(message:Module)
#[derive(PartialEq,Clone,Default,Debug)]
pub struct Module {
// message fields
/// ID for this module, generated by the database.
// @@protoc_insertion_point(field:Module.id)
pub id: i64,
/// sha256 hash of the modules raw bytes
// @@protoc_insertion_point(field:Module.hash)
pub hash: ::std::string::String,
/// function imports called by the module (see:
/// <https://github.com/WebAssembly/design/blob/main/Modules.md#imports)>
// @@protoc_insertion_point(field:Module.imports)
pub imports: ::std::vec::Vec<Import>,
/// function exports provided by the module (see:
/// <https://github.com/WebAssembly/design/blob/main/Modules.md#exports)>
// @@protoc_insertion_point(field:Module.exports)
pub exports: ::std::vec::Vec<Export>,
/// size in bytes of the module
// @@protoc_insertion_point(field:Module.size)
pub size: u64,
/// path or locator to the module
// @@protoc_insertion_point(field:Module.location)
pub location: ::std::string::String,
/// programming language used to produce this module
// @@protoc_insertion_point(field:Module.source_language)
pub source_language: ::protobuf::EnumOrUnknown<SourceLanguage>,
/// arbitrary metadata provided by the operator of this module
// @@protoc_insertion_point(field:Module.metadata)
pub metadata: ::std::collections::HashMap<::std::string::String, ::std::string::String>,
/// timestamp when this module was loaded and stored
// @@protoc_insertion_point(field:Module.inserted_at)
pub inserted_at: ::protobuf::MessageField<::protobuf::well_known_types::timestamp::Timestamp>,
/// the interned strings stored in the wasm binary (panic/abort messages, etc.)
// @@protoc_insertion_point(field:Module.strings)
pub strings: ::std::vec::Vec<::std::string::String>,
/// the cyclomatic complexity
/// (<https://en.wikipedia.org/wiki/Cyclomatic_complexity>) of the instructions
// @@protoc_insertion_point(field:Module.complexity)
pub complexity: ::std::option::Option<u32>,
/// the serialized graph in json format
// @@protoc_insertion_point(field:Module.graph)
pub graph: ::std::option::Option<::std::vec::Vec<u8>>,
/// function hashes
// @@protoc_insertion_point(field:Module.function_hashes)
pub function_hashes: ::std::collections::HashMap<::std::string::String, ::std::string::String>,
// special fields
// @@protoc_insertion_point(special_field:Module.special_fields)
pub special_fields: ::protobuf::SpecialFields,
}
impl<'a> ::std::default::Default for &'a Module {
fn default() -> &'a Module {
<Module as ::protobuf::Message>::default_instance()
}
}
impl Module {
pub fn new() -> Module {
::std::default::Default::default()
}
fn generated_message_descriptor_data() -> ::protobuf::reflect::GeneratedMessageDescriptorData {
let mut fields = ::std::vec::Vec::with_capacity(13);
let mut oneofs = ::std::vec::Vec::with_capacity(0);
fields.push(::protobuf::reflect::rt::v2::make_simpler_field_accessor::<_, _>(
"id",
|m: &Module| { &m.id },
|m: &mut Module| { &mut m.id },
));
fields.push(::protobuf::reflect::rt::v2::make_simpler_field_accessor::<_, _>(
"hash",
|m: &Module| { &m.hash },
|m: &mut Module| { &mut m.hash },
));
fields.push(::protobuf::reflect::rt::v2::make_vec_simpler_accessor::<_, _>(
"imports",
|m: &Module| { &m.imports },
|m: &mut Module| { &mut m.imports },
));
fields.push(::protobuf::reflect::rt::v2::make_vec_simpler_accessor::<_, _>(
"exports",
|m: &Module| { &m.exports },
|m: &mut Module| { &mut m.exports },
));
fields.push(::protobuf::reflect::rt::v2::make_simpler_field_accessor::<_, _>(
"size",
|m: &Module| { &m.size },
|m: &mut Module| { &mut m.size },
));
fields.push(::protobuf::reflect::rt::v2::make_simpler_field_accessor::<_, _>(
"location",
|m: &Module| { &m.location },
|m: &mut Module| { &mut m.location },
));
fields.push(::protobuf::reflect::rt::v2::make_simpler_field_accessor::<_, _>(
"source_language",
|m: &Module| { &m.source_language },
|m: &mut Module| { &mut m.source_language },
));
fields.push(::protobuf::reflect::rt::v2::make_map_simpler_accessor::<_, _, _>(
"metadata",
|m: &Module| { &m.metadata },
|m: &mut Module| { &mut m.metadata },
));
fields.push(::protobuf::reflect::rt::v2::make_message_field_accessor::<_, ::protobuf::well_known_types::timestamp::Timestamp>(
"inserted_at",
|m: &Module| { &m.inserted_at },
|m: &mut Module| { &mut m.inserted_at },
));
fields.push(::protobuf::reflect::rt::v2::make_vec_simpler_accessor::<_, _>(
"strings",
|m: &Module| { &m.strings },
|m: &mut Module| { &mut m.strings },
));
fields.push(::protobuf::reflect::rt::v2::make_option_accessor::<_, _>(
"complexity",
|m: &Module| { &m.complexity },
|m: &mut Module| { &mut m.complexity },
));
fields.push(::protobuf::reflect::rt::v2::make_option_accessor::<_, _>(
"graph",
|m: &Module| { &m.graph },
|m: &mut Module| { &mut m.graph },
));
fields.push(::protobuf::reflect::rt::v2::make_map_simpler_accessor::<_, _, _>(
"function_hashes",
|m: &Module| { &m.function_hashes },
|m: &mut Module| { &mut m.function_hashes },
));
::protobuf::reflect::GeneratedMessageDescriptorData::new_2::<Module>(
"Module",
fields,
oneofs,
)
}
}
impl ::protobuf::Message for Module {
const NAME: &'static str = "Module";
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::Result<()> {
while let Some(tag) = is.read_raw_tag_or_eof()? {
match tag {
8 => {
self.id = is.read_int64()?;
},
26 => {
self.hash = is.read_string()?;
},
34 => {
self.imports.push(is.read_message()?);
},
42 => {
self.exports.push(is.read_message()?);
},
48 => {
self.size = is.read_uint64()?;
},
58 => {
self.location = is.read_string()?;
},
64 => {
self.source_language = is.read_enum_or_unknown()?;
},
74 => {
let len = is.read_raw_varint32()?;
let old_limit = is.push_limit(len as u64)?;
let mut key = ::std::default::Default::default();
let mut value = ::std::default::Default::default();
while let Some(tag) = is.read_raw_tag_or_eof()? {
match tag {
10 => key = is.read_string()?,
18 => value = is.read_string()?,
_ => ::protobuf::rt::skip_field_for_tag(tag, is)?,
};
}
is.pop_limit(old_limit);
self.metadata.insert(key, value);
},
82 => {
::protobuf::rt::read_singular_message_into_field(is, &mut self.inserted_at)?;
},
90 => {
self.strings.push(is.read_string()?);
},
104 => {
self.complexity = ::std::option::Option::Some(is.read_uint32()?);
},
114 => {
self.graph = ::std::option::Option::Some(is.read_bytes()?);
},
122 => {
let len = is.read_raw_varint32()?;
let old_limit = is.push_limit(len as u64)?;
let mut key = ::std::default::Default::default();
let mut value = ::std::default::Default::default();
while let Some(tag) = is.read_raw_tag_or_eof()? {
match tag {
10 => key = is.read_string()?,
18 => value = is.read_string()?,
_ => ::protobuf::rt::skip_field_for_tag(tag, is)?,
};
}
is.pop_limit(old_limit);
self.function_hashes.insert(key, value);
},
tag => {
::protobuf::rt::read_unknown_or_skip_group(tag, is, self.special_fields.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u64 {
let mut my_size = 0;
if self.id != 0 {
my_size += ::protobuf::rt::int64_size(1, self.id);
}
if !self.hash.is_empty() {
my_size += ::protobuf::rt::string_size(3, &self.hash);
}
for value in &self.imports {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint64_size(len) + len;
};
for value in &self.exports {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint64_size(len) + len;
};
if self.size != 0 {
my_size += ::protobuf::rt::uint64_size(6, self.size);
}
if !self.location.is_empty() {
my_size += ::protobuf::rt::string_size(7, &self.location);
}
if self.source_language != ::protobuf::EnumOrUnknown::new(SourceLanguage::Unknown) {
my_size += ::protobuf::rt::int32_size(8, self.source_language.value());
}
for (k, v) in &self.metadata {
let mut entry_size = 0;
entry_size += ::protobuf::rt::string_size(1, &k);
entry_size += ::protobuf::rt::string_size(2, &v);
my_size += 1 + ::protobuf::rt::compute_raw_varint64_size(entry_size) + entry_size
};
if let Some(v) = self.inserted_at.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint64_size(len) + len;
}
for value in &self.strings {
my_size += ::protobuf::rt::string_size(11, &value);
};
if let Some(v) = self.complexity {
my_size += ::protobuf::rt::uint32_size(13, v);
}
if let Some(v) = self.graph.as_ref() {
my_size += ::protobuf::rt::bytes_size(14, &v);
}
for (k, v) in &self.function_hashes {
let mut entry_size = 0;
entry_size += ::protobuf::rt::string_size(1, &k);
entry_size += ::protobuf::rt::string_size(2, &v);
my_size += 1 + ::protobuf::rt::compute_raw_varint64_size(entry_size) + entry_size
};
my_size += ::protobuf::rt::unknown_fields_size(self.special_fields.unknown_fields());
self.special_fields.cached_size().set(my_size as u32);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::Result<()> {
if self.id != 0 {
os.write_int64(1, self.id)?;
}
if !self.hash.is_empty() {
os.write_string(3, &self.hash)?;
}
for v in &self.imports {
::protobuf::rt::write_message_field_with_cached_size(4, v, os)?;
};
for v in &self.exports {
::protobuf::rt::write_message_field_with_cached_size(5, v, os)?;
};
if self.size != 0 {
os.write_uint64(6, self.size)?;
}
if !self.location.is_empty() {
os.write_string(7, &self.location)?;
}
if self.source_language != ::protobuf::EnumOrUnknown::new(SourceLanguage::Unknown) {
os.write_enum(8, ::protobuf::EnumOrUnknown::value(&self.source_language))?;
}
for (k, v) in &self.metadata {
let mut entry_size = 0;
entry_size += ::protobuf::rt::string_size(1, &k);
entry_size += ::protobuf::rt::string_size(2, &v);
os.write_raw_varint32(74)?; // Tag.
os.write_raw_varint32(entry_size as u32)?;
os.write_string(1, &k)?;
os.write_string(2, &v)?;
};
if let Some(v) = self.inserted_at.as_ref() {
::protobuf::rt::write_message_field_with_cached_size(10, v, os)?;
}
for v in &self.strings {
os.write_string(11, &v)?;
};
if let Some(v) = self.complexity {
os.write_uint32(13, v)?;
}
if let Some(v) = self.graph.as_ref() {
os.write_bytes(14, v)?;
}
for (k, v) in &self.function_hashes {
let mut entry_size = 0;
entry_size += ::protobuf::rt::string_size(1, &k);
entry_size += ::protobuf::rt::string_size(2, &v);
os.write_raw_varint32(122)?; // Tag.
os.write_raw_varint32(entry_size as u32)?;
os.write_string(1, &k)?;
os.write_string(2, &v)?;
};
os.write_unknown_fields(self.special_fields.unknown_fields())?;
::std::result::Result::Ok(())
}
fn special_fields(&self) -> &::protobuf::SpecialFields {
&self.special_fields
}
fn mut_special_fields(&mut self) -> &mut ::protobuf::SpecialFields {
&mut self.special_fields
}
fn new() -> Module {
Module::new()
}
fn clear(&mut self) {
self.id = 0;
self.hash.clear();
self.imports.clear();
self.exports.clear();
self.size = 0;
self.location.clear();
self.source_language = ::protobuf::EnumOrUnknown::new(SourceLanguage::Unknown);
self.metadata.clear();
self.inserted_at.clear();
self.strings.clear();
self.complexity = ::std::option::Option::None;
self.graph = ::std::option::Option::None;
self.function_hashes.clear();
self.special_fields.clear();
}
fn default_instance() -> &'static Module {
static instance: ::protobuf::rt::Lazy<Module> = ::protobuf::rt::Lazy::new();
instance.get(Module::new)
}
}
impl ::protobuf::MessageFull for Module {
fn descriptor() -> ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::Lazy::new();
descriptor.get(|| file_descriptor().message_by_package_relative_name("Module").unwrap()).clone()
}
}
impl ::std::fmt::Display for Module {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for Module {
type RuntimeType = ::protobuf::reflect::rt::RuntimeTypeMessage<Self>;
}
/// Details about a wasm module graph
// @@protoc_insertion_point(message:ModuleGraph)
#[derive(PartialEq,Clone,Default,Debug)]
pub struct ModuleGraph {
// message fields
/// ID for this module, generated by the database.
// @@protoc_insertion_point(field:ModuleGraph.id)
pub id: i64,
/// the serialized graph in json format
// @@protoc_insertion_point(field:ModuleGraph.json_bytes)
pub json_bytes: ::std::vec::Vec<u8>,
// special fields
// @@protoc_insertion_point(special_field:ModuleGraph.special_fields)
pub special_fields: ::protobuf::SpecialFields,
}
impl<'a> ::std::default::Default for &'a ModuleGraph {
fn default() -> &'a ModuleGraph {
<ModuleGraph as ::protobuf::Message>::default_instance()
}
}
impl ModuleGraph {
pub fn new() -> ModuleGraph {
::std::default::Default::default()
}
fn generated_message_descriptor_data() -> ::protobuf::reflect::GeneratedMessageDescriptorData {
let mut fields = ::std::vec::Vec::with_capacity(2);
let mut oneofs = ::std::vec::Vec::with_capacity(0);
fields.push(::protobuf::reflect::rt::v2::make_simpler_field_accessor::<_, _>(
"id",
|m: &ModuleGraph| { &m.id },
|m: &mut ModuleGraph| { &mut m.id },
));
fields.push(::protobuf::reflect::rt::v2::make_simpler_field_accessor::<_, _>(
"json_bytes",
|m: &ModuleGraph| { &m.json_bytes },
|m: &mut ModuleGraph| { &mut m.json_bytes },
));
::protobuf::reflect::GeneratedMessageDescriptorData::new_2::<ModuleGraph>(
"ModuleGraph",
fields,
oneofs,
| rust | Apache-2.0 | 71ae2ca8e06917b33281bd0b75843ee60edccba8 | 2026-01-04T20:22:33.912580Z | true |
dylibso/modsurfer | https://github.com/dylibso/modsurfer/blob/71ae2ca8e06917b33281bd0b75843ee60edccba8/proto/v1/src/mod.rs | proto/v1/src/mod.rs | // @generated
pub mod api;
| rust | Apache-2.0 | 71ae2ca8e06917b33281bd0b75843ee60edccba8 | 2026-01-04T20:22:33.912580Z | false |
dylibso/modsurfer | https://github.com/dylibso/modsurfer/blob/71ae2ca8e06917b33281bd0b75843ee60edccba8/api/src/mock_client.rs | api/src/mock_client.rs | use std::{collections::HashMap, sync::Mutex};
use anyhow::Result;
use async_trait::async_trait;
use lazy_static::lazy_static;
#[cfg(not(feature = "mock-empty"))]
use modsurfer_convert::api::ListModulesResponse;
use modsurfer_module::{Export, Import, Module, SourceLanguage};
use url::Url;
use crate::{ApiClient, List, Persisted, SortDirection, SortField};
#[cfg(not(feature = "mock-empty"))]
lazy_static! {
static ref MODULES: &'static [u8] = include_bytes!("../ListModulesResponse.pb");
static ref PB_DATA: ListModulesResponse =
protobuf::Message::parse_from_bytes(&MODULES).unwrap();
static ref MOCK_CLIENT_DATA: Mutex<Vec<Persisted<Module>>> = Mutex::new(
PB_DATA
.modules
.clone()
.into_iter()
.map(Into::into)
.collect::<Vec<Persisted<Module>>>()
);
}
#[cfg(feature = "mock-empty")]
lazy_static! {
static ref MOCK_CLIENT_DATA: Mutex<Vec<Persisted<Module>>> = Mutex::new(vec![]);
}
#[derive(Clone, Default)]
pub struct Client;
impl Client {
pub fn modules(&self) -> Vec<Persisted<Module>> {
MOCK_CLIENT_DATA.lock().unwrap().to_vec()
}
pub fn module(&self, module_id: i64) -> Option<Persisted<Module>> {
MOCK_CLIENT_DATA
.lock()
.unwrap()
.iter()
.find(|m| m.get_id() == module_id)
.map(|m| m.clone())
}
}
#[async_trait(?Send)]
impl ApiClient for Client {
fn new(_base_url: &str) -> Result<Self>
where
Self: Sized,
{
Ok(Self)
}
async fn get_module(&self, module_id: i64) -> Result<Persisted<Module>> {
match self.module(module_id) {
Some(p) => Ok(p),
None => Err(anyhow::anyhow!("no module found")), // TODO: improve errors
}
}
async fn list_modules(&self, offset: u32, limit: u32) -> Result<List<Persisted<Module>>> {
let all = self.modules();
let total = all.len() as u32;
let modules = all
.into_iter()
.skip(offset as usize)
.take(limit as usize)
.collect();
let list = List::new(modules, total, offset, limit);
Ok(list)
}
async fn create_module(
&self,
_wasm: impl AsRef<[u8]> + Send,
metadata: Option<HashMap<String, String>>,
_location: Option<Url>,
) -> Result<(i64, String)> {
let mut module = Module::default();
module.metadata = metadata;
let id = (MOCK_CLIENT_DATA.lock().unwrap().len() + 1) as i64;
let hash = module.hash.clone();
MOCK_CLIENT_DATA
.lock()
.unwrap()
.push(Persisted::from_module(id, module));
Ok((id, hash))
}
async fn search_modules(
&self,
module_id: Option<i64>,
hash: Option<String>,
function_name: Option<String>,
module_name: Option<String>,
_imports: Option<Vec<Import>>,
_exports: Option<Vec<Export>>,
_min_size: Option<u64>,
_max_size: Option<u64>,
_location: Option<url::Url>,
source_language: Option<String>,
_metadata: Option<HashMap<String, String>>,
_inserted_before: Option<chrono::DateTime<chrono::Utc>>,
_inserted_after: Option<chrono::DateTime<chrono::Utc>>,
strings: Option<Vec<String>>,
offset: u32,
limit: u32,
_sort_field: Option<SortField>,
_sort_direction: Option<SortDirection>,
) -> Result<List<Persisted<Module>>> {
let modules = MOCK_CLIENT_DATA.lock().unwrap();
if let Some(module_id) = module_id {
if let Some(module) = modules.iter().find(|p| p.get_id() == module_id) {
return Ok(List::new(vec![module.clone()], 1, offset, limit));
}
}
if let Some(hash) = hash {
if let Some(module) = modules.iter().find(|p| p.get_inner().hash == hash) {
return Ok(List::new(vec![module.clone()], 1, offset, limit));
}
}
let mut filtered = modules.clone();
if let Some(function_name) = function_name {
filtered = filtered
.into_iter()
.filter(|p| {
p.get_inner()
.imports
.iter()
.any(|i| i.func.name == function_name)
|| p.get_inner()
.exports
.iter()
.any(|i| i.func.name == function_name)
})
.collect();
}
if let Some(module_name) = module_name {
filtered = filtered
.into_iter()
.filter(|p| {
p.get_inner()
.imports
.iter()
.any(|i| i.module_name == module_name)
})
.collect();
}
if let Some(source_language) = source_language {
let lang: SourceLanguage = source_language.into();
filtered = filtered
.into_iter()
.filter(|p| p.get_inner().source_language == lang)
.collect();
}
if let Some(strings) = strings {
filtered = filtered
.into_iter()
.filter(|p| {
strings
.iter()
.any(|s| p.get_inner().strings.iter().any(|mod_s| mod_s.contains(s)))
})
.collect();
}
let total = filtered.len() as u32;
Ok(List::new(filtered, total, offset, limit))
}
async fn diff_modules(
&self,
_module1: i64,
_module2: i64,
_color_terminal: bool,
_with_context: bool,
) -> Result<String> {
anyhow::bail!("Diff operation unimplemented.")
}
async fn validate_module(
&self,
_wasm: impl AsRef<[u8]> + Send,
_checkfile: impl AsRef<[u8]> + Send,
) -> Result<Report> {
anyhow::bail!("Validate operation unimplemented.")
}
async fn get_module_graph(&self, _module_id: i64) -> Result<Vec<u8>> {
anyhow::bail!("ModuleGraph operation unimplemented.")
}
async fn call_plugin(
&self,
_identifier: String,
_function_name: String,
_function_input: Vec<u8>,
) -> Result<()> {
anyhow::bail!("CallPlugin operation unimplemented.")
}
async fn install_plugin(&self, _identifier: String, _wasm: Vec<u8>) -> Result<()> {
anyhow::bail!("InstallPlugin operation unimplemented.")
}
async fn uninstall_plugin(&self, _identifier: String) -> Result<()> {
anyhow::bail!("UninstallPlugin operation unimplemented.")
}
}
| rust | Apache-2.0 | 71ae2ca8e06917b33281bd0b75843ee60edccba8 | 2026-01-04T20:22:33.912580Z | false |
dylibso/modsurfer | https://github.com/dylibso/modsurfer/blob/71ae2ca8e06917b33281bd0b75843ee60edccba8/api/src/lib.rs | api/src/lib.rs | use std::collections::HashMap;
#[cfg(not(feature = "mock"))]
mod client;
mod interop;
mod sort;
#[cfg(feature = "mock")]
mod mock_client;
#[cfg(feature = "mock")]
pub use mock_client::Client;
#[cfg(not(feature = "mock"))]
pub use client::Client;
pub use interop::{List, Persisted};
pub use sort::{SortDirection, SortField};
pub use anyhow::Result;
use async_trait::async_trait;
use modsurfer_convert::Audit;
use modsurfer_module::{Export, Import, Module};
use modsurfer_validation::Report;
/// A trait to describe the functionality of Modsurfer's internal API client. This is used across
/// the CLI and GUI application. As such, the code must compile to `wasm32-unknown-unknown` target.
#[async_trait(?Send)]
pub trait ApiClient {
fn new(base_url: &str) -> Result<Self>
where
Self: Sized;
async fn get_module(&self, module_id: i64) -> Result<Persisted<Module>>;
async fn list_modules(&self, offset: u32, limit: u32) -> Result<List<Persisted<Module>>>;
async fn create_module(
&self,
wasm: impl AsRef<[u8]> + Send,
metadata: Option<HashMap<String, String>>,
location: Option<url::Url>,
) -> Result<(i64, String)>;
async fn search_modules(
&self,
module_id: Option<i64>,
hash: Option<String>,
function_name: Option<String>,
module_name: Option<String>,
imports: Option<Vec<Import>>,
exports: Option<Vec<Export>>,
min_size: Option<u64>,
max_size: Option<u64>,
location: Option<url::Url>,
source_language: Option<String>,
metadata: Option<HashMap<String, String>>,
inserted_before: Option<chrono::DateTime<chrono::Utc>>,
inserted_after: Option<chrono::DateTime<chrono::Utc>>,
strings: Option<Vec<String>>,
offset: u32,
limit: u32,
sort_field: Option<SortField>,
sort_direction: Option<SortDirection>,
) -> Result<List<Persisted<Module>>>;
async fn delete_modules(&self, _module_ids: Vec<i64>) -> Result<HashMap<i64, String>> {
anyhow::bail!("Delete operation unimplemented.")
}
async fn audit_modules(&self, _audit: Audit) -> Result<HashMap<i64, Report>> {
anyhow::bail!("Audit operation unimplemented.")
}
async fn diff_modules(
&self,
module1: i64,
module2: i64,
color_terminal: bool,
with_context: bool,
) -> Result<String>;
async fn validate_module(
&self,
_wasm: impl AsRef<[u8]> + Send,
_checkfile: impl AsRef<[u8]> + Send,
) -> Result<Report> {
anyhow::bail!("Validate operation unimplemented.")
}
async fn get_module_graph(&self, _module_id: i64) -> Result<Vec<u8>> {
anyhow::bail!("ModuleGraph operation unimplemented.")
}
async fn call_plugin(
&self,
_identifier: String,
_function_name: String,
_function_input: Vec<u8>,
) -> Result<Vec<u8>> {
anyhow::bail!("CallPlugin operation unimplemented.")
}
async fn install_plugin(
&self,
_identifier: String,
_name: Option<String>,
_location: String,
_wasm: Vec<u8>,
) -> Result<()> {
anyhow::bail!("InstallPlugin operation unimplemented.")
}
async fn uninstall_plugin(&self, _identifier: String) -> Result<()> {
anyhow::bail!("UninstallPlugin operation unimplemented.")
}
}
| rust | Apache-2.0 | 71ae2ca8e06917b33281bd0b75843ee60edccba8 | 2026-01-04T20:22:33.912580Z | false |
dylibso/modsurfer | https://github.com/dylibso/modsurfer/blob/71ae2ca8e06917b33281bd0b75843ee60edccba8/api/src/sort.rs | api/src/sort.rs | use modsurfer_convert::api::{Direction, Field};
#[derive(PartialEq, Clone, Debug)]
pub enum SortDirection {
Asc,
Desc,
}
impl SortDirection {
pub fn default() -> SortDirection {
SortDirection::Desc
}
pub fn from_str(f: &str) -> Option<SortDirection> {
match f.to_lowercase().as_str() {
"asc" => Some(SortDirection::Asc),
"desc" => Some(SortDirection::Desc),
_ => None,
}
}
pub fn to_proto(self) -> Direction {
match self {
SortDirection::Asc => Direction::Asc,
SortDirection::Desc => Direction::Desc,
}
}
}
#[derive(PartialEq, Clone, Debug)]
pub enum SortField {
Size,
Name,
CreatedAt,
Language,
ImportsCount,
ExportsCount,
Sha256,
Complexity,
}
impl SortField {
pub fn from_str(f: &str) -> Option<SortField> {
match f.to_lowercase().as_str() {
"size" => Some(SortField::Size),
"name" => Some(SortField::Name),
"created_at" => Some(SortField::CreatedAt),
"language" => Some(SortField::Language),
"imports_count" => Some(SortField::ImportsCount),
"exports_count" => Some(SortField::ExportsCount),
"sha256" => Some(SortField::Sha256),
"complexity" => Some(SortField::Complexity),
_ => None,
}
}
pub fn to_proto(self) -> Field {
match self {
SortField::Size => Field::Size,
SortField::Name => Field::Name,
SortField::CreatedAt => Field::CreatedAt,
SortField::Language => Field::Language,
SortField::ImportsCount => Field::ImportsCount,
SortField::ExportsCount => Field::ExportsCount,
SortField::Sha256 => Field::Sha256,
SortField::Complexity => Field::Complexity,
}
}
}
| rust | Apache-2.0 | 71ae2ca8e06917b33281bd0b75843ee60edccba8 | 2026-01-04T20:22:33.912580Z | false |
dylibso/modsurfer | https://github.com/dylibso/modsurfer/blob/71ae2ca8e06917b33281bd0b75843ee60edccba8/api/src/client.rs | api/src/client.rs | use std::collections::HashMap;
use anyhow::Result;
use async_trait::async_trait;
use modsurfer_convert::{
api::{self, Sort},
to_api, Audit,
};
use modsurfer_module::{Export, Import, Module};
use modsurfer_validation::Report;
use protobuf::{self, EnumOrUnknown, Message, MessageField, SpecialFields};
use reqwest;
use url::Url;
use super::{ApiClient, List, Persisted, SortDirection, SortField};
enum ModserverCommand {
CreateModule(api::CreateModuleRequest),
GetModule(api::GetModuleRequest),
ListModules(api::ListModulesRequest),
SearchModules(api::SearchModulesRequest),
DeleteModules(api::DeleteModulesRequest),
AuditModules(api::AuditModulesRequest),
DiffModules(api::DiffRequest),
ValidateModule(api::ValidateModuleRequest),
GetModuleGraph(api::GetModuleGraphRequest),
CallPlugin(api::CallPluginRequest),
InstallPlugin(api::InstallPluginRequest),
UninstallPlugin(api::UninstallPluginRequest),
}
/// The API Client implementation.
#[derive(Clone)]
pub struct Client {
inner: reqwest::Client,
base_url: String,
}
#[async_trait(?Send)]
impl ApiClient for Client {
/// Construct an API Client using the `base_url`, which should be the server host address and
/// port needed to communicate with a Modsurfer backend. Many backends default to http://localhost:1739.
fn new(base_url: &str) -> Result<Self> {
let inner = reqwest::ClientBuilder::new()
.build()
.map_err(|e| anyhow::anyhow!("{}", e))?;
Ok(Client {
inner,
base_url: base_url.to_string(),
})
}
/// Find a module by its ID.
async fn get_module(&self, module_id: i64) -> Result<Persisted<Module>> {
let req = api::GetModuleRequest {
module_id,
..Default::default()
};
let res: api::GetModuleResponse = self.send(ModserverCommand::GetModule(req)).await?;
if res.error.is_some() {
return Err(api_error(res.error, "get module request failed"));
}
if res.module.is_some() {
Ok(res.module.unwrap().into())
} else {
Err(anyhow::anyhow!("No module found."))
}
}
/// List all modules stored in the database. Provide an offset and limit to control the pagination
/// and size of the result set returned.
async fn list_modules(&self, offset: u32, limit: u32) -> Result<List<Persisted<Module>>> {
let mut pagination: api::Pagination = Default::default();
pagination.limit = limit;
pagination.offset = offset;
let mut req = api::ListModulesRequest::new();
req.pagination = MessageField::some(pagination);
let res: api::ListModulesResponse = self.send(ModserverCommand::ListModules(req)).await?;
if res.error.is_some() {
return Err(api_error(res.error, "list module request failed"));
}
let modules = res.modules.into_iter().map(Into::into).collect();
Ok(List::new(modules, res.total as u32, offset, limit))
}
/// Create a new module entry in Modsurfer. If no `location` is set, the module will be named
/// by its SHA-256 hash + some timestamp in milliseconds. A `location` must be a valid URL, and
/// can use arbitrary schemes such as `file://<PATH>`, `s3://<BUCKET>/<PATH>`, etc. Use the
/// `location` to indicate the module's current or eventual storage identifier.
async fn create_module(
&self,
wasm: impl AsRef<[u8]> + Send,
metadata: Option<HashMap<String, String>>,
location: std::option::Option<Url>,
) -> Result<(i64, String)> {
let req = api::CreateModuleRequest {
wasm: wasm.as_ref().to_vec(),
metadata: metadata.unwrap_or_default(),
location: location.map(Into::into),
..Default::default()
};
let res: api::CreateModuleResponse = self.send(ModserverCommand::CreateModule(req)).await?;
if res.error.is_some() {
return Err(api_error(res.error, "create module request failed"));
}
Ok((res.module_id, res.hash))
}
/// Search for modules based on input parameters. The query will combine these inputs using
/// `AND` conditions.
async fn search_modules(
&self,
module_id: Option<i64>,
hash: Option<String>,
function_name: Option<String>,
module_name: Option<String>,
imports: Option<Vec<Import>>,
exports: Option<Vec<Export>>,
min_size: Option<u64>,
max_size: Option<u64>,
location: Option<url::Url>,
source_language: Option<String>,
metadata: Option<HashMap<String, String>>,
inserted_before: Option<chrono::DateTime<chrono::Utc>>,
inserted_after: Option<chrono::DateTime<chrono::Utc>>,
strings: Option<Vec<String>>,
offset: u32,
limit: u32,
sort_field: Option<SortField>,
sort_direction: Option<SortDirection>,
) -> Result<List<Persisted<Module>>> {
let mut pagination: api::Pagination = Default::default();
pagination.limit = limit;
pagination.offset = offset;
let location = if let Some(u) = location {
Some(u.to_string())
} else {
None
};
let inserted_before = if let Some(t) = inserted_before {
protobuf::MessageField::some(protobuf::well_known_types::timestamp::Timestamp {
seconds: t.timestamp(),
nanos: t.timestamp_subsec_nanos() as i32,
special_fields: protobuf::SpecialFields::new(),
})
} else {
protobuf::MessageField::none()
};
let inserted_after = if let Some(t) = inserted_after {
protobuf::MessageField::some(protobuf::well_known_types::timestamp::Timestamp {
seconds: t.timestamp(),
nanos: t.timestamp_subsec_nanos() as i32,
special_fields: protobuf::SpecialFields::new(),
})
} else {
protobuf::MessageField::none()
};
let sort = match sort_field {
Some(f) => MessageField::some(Sort {
direction: EnumOrUnknown::new(
sort_direction
.unwrap_or(SortDirection::default())
.to_proto(),
),
field: EnumOrUnknown::new(f.to_proto()),
special_fields: SpecialFields::default(),
}),
_ => MessageField::none(),
};
let req = api::SearchModulesRequest {
id: module_id,
hash,
function_name,
module_name,
imports: to_api::imports(imports.unwrap_or_default()),
exports: to_api::exports(exports.unwrap_or_default()),
min_size,
max_size,
location,
sort,
source_language: source_language
.map(From::from)
.map(to_api::source_language)
.map(EnumOrUnknown::new),
metadata: metadata.unwrap_or_default(),
inserted_before,
inserted_after,
strings: strings.unwrap_or_default(),
pagination: MessageField::some(pagination),
..Default::default()
};
let res: api::SearchModulesResponse =
self.send(ModserverCommand::SearchModules(req)).await?;
if res.error.is_some() {
return Err(api_error(res.error, "search modules request failed"));
}
let modules = res.modules.into_iter().map(Into::into).collect();
Ok(List::new(
modules,
res.total as u32,
res.pagination.offset,
res.pagination.limit,
))
}
/// Delete a module from the database. This is a non-reversable operation.
async fn delete_modules(&self, module_ids: Vec<i64>) -> Result<HashMap<i64, String>> {
let req = api::DeleteModulesRequest {
module_ids,
..Default::default()
};
let res: api::DeleteModulesResponse =
self.send(ModserverCommand::DeleteModules(req)).await?;
if res.error.is_some() {
return Err(api_error(res.error, "delete modules request failed"));
}
Ok(res.module_id_hash)
}
/// Audit the modules based on a provided checkfile and expected outcome.
async fn audit_modules(
&self,
audit: Audit,
) -> Result<HashMap<i64, modsurfer_validation::Report>> {
let mut pagination: api::Pagination = Default::default();
pagination.limit = audit.page.limit;
pagination.offset = audit.page.offset;
let req = api::AuditModulesRequest {
outcome: EnumOrUnknown::new(api::AuditOutcome::from(audit.outcome)),
pagination: MessageField::some(pagination),
checkfile: audit.checkfile,
..Default::default()
};
let res: api::AuditModulesResponse = self.send(ModserverCommand::AuditModules(req)).await?;
if res.error.is_some() {
return Err(api_error(res.error, "audit modules request failed"));
}
let mut id_reports: HashMap<i64, modsurfer_validation::Report> = Default::default();
res.invalid_module_report
.iter()
.for_each(|(id, json_report)| {
if let Ok(report) = serde_json::from_slice(json_report) {
let _ = id_reports.insert(*id, report);
} else {
log::error!("failed to decode validation report for module {}", id);
}
});
Ok(id_reports)
}
async fn diff_modules(
&self,
module1: i64,
module2: i64,
color_terminal: bool,
with_context: bool,
) -> Result<String> {
let req = api::DiffRequest {
module1,
module2,
color_terminal,
with_context,
..Default::default()
};
let res: api::DiffResponse = self.send(ModserverCommand::DiffModules(req)).await?;
if res.error.is_some() {
return Err(api_error(res.error, "diff request failed"));
}
Ok(res.diff)
}
async fn validate_module(
&self,
wasm: impl AsRef<[u8]> + Send,
checkfile: impl AsRef<[u8]> + Send,
) -> Result<Report> {
let input = api::validate_module_request::Module_input::Module(wasm.as_ref().to_vec());
let req = api::ValidateModuleRequest {
checkfile: checkfile.as_ref().to_vec(),
module_input: Some(input),
..Default::default()
};
let res: api::ValidateModuleResponse =
self.send(ModserverCommand::ValidateModule(req)).await?;
if res.error.is_some() {
return Err(api_error(res.error, "validate request failed"));
}
Ok(serde_json::from_slice(&res.invalid_module_report)?)
}
/// Find a module graph by its ID.
async fn get_module_graph(&self, module_id: i64) -> Result<Vec<u8>> {
let req = api::GetModuleGraphRequest {
module_id,
..Default::default()
};
let res: api::GetModuleGraphResponse =
self.send(ModserverCommand::GetModuleGraph(req)).await?;
if res.error.is_some() {
return Err(api_error(
res.error,
format!(
"get module graph request failed for module_id {}",
module_id
)
.as_str(),
));
}
if res.module_graph.is_some() {
Ok(res.module_graph.unwrap().json_bytes)
} else {
Err(anyhow::anyhow!(
"No module graph found for module id {}.",
module_id
))
}
}
/// Call a Modsurfer plugin. This feature is only available in enterprise Modsurfer.
async fn call_plugin(
&self,
identifier: String,
function_name: String,
input: Vec<u8>,
) -> Result<Vec<u8>, anyhow::Error> {
let req = api::CallPluginRequest {
identifier: identifier.clone(),
function_name,
input,
..Default::default()
};
let res: api::CallPluginResponse = self.send(ModserverCommand::CallPlugin(req)).await?;
if res.error.is_some() {
return Err(api_error(res.error, "CallPlugin request failed"));
}
Ok(res.output)
}
/// Install a Modsurfer plugin. This feature is only available in enterprise Modsurfer.
async fn install_plugin(
&self,
identifier: String,
name: Option<String>,
location: String,
wasm: Vec<u8>,
) -> Result<(), anyhow::Error> {
let req = api::InstallPluginRequest {
identifier,
name,
location,
wasm: wasm.clone(),
..Default::default()
};
let res: api::InstallPluginResponse =
self.send(ModserverCommand::InstallPlugin(req)).await?;
if res.error.is_some() {
return Err(api_error(res.error, "InstallPlugin request failed"));
}
Ok(())
}
/// Uninstall a Modsurfer plugin. This feature is only available in enterprise Modsurfer.
async fn uninstall_plugin(&self, identifier: String) -> Result<(), anyhow::Error> {
let req = api::UninstallPluginRequest {
identifier,
..Default::default()
};
let res: api::UninstallPluginResponse =
self.send(ModserverCommand::UninstallPlugin(req)).await?;
if res.error.is_some() {
return Err(api_error(res.error, "UninstallPlugin request failed"));
}
Ok(())
}
}
impl Client {
async fn send<T: protobuf::Message>(&self, cmd: ModserverCommand) -> Result<T> {
match cmd {
ModserverCommand::CreateModule(req) => {
let resp = self
.inner
.put(&self.make_endpoint("/api/v1/module"))
.body(req.write_to_bytes()?)
.send()
.await?;
let data = resp.bytes().await?;
let val = protobuf::Message::parse_from_bytes(&data)?;
return Ok(val);
}
ModserverCommand::GetModule(req) => {
let resp = self
.inner
.post(&self.make_endpoint("/api/v1/module"))
.body(req.write_to_bytes()?)
.send()
.await?;
let data = resp.bytes().await?;
let val = protobuf::Message::parse_from_bytes(&data)?;
return Ok(val);
}
ModserverCommand::ListModules(req) => {
let resp = self
.inner
.post(&self.make_endpoint("/api/v1/modules"))
.body(req.write_to_bytes()?)
.send()
.await?;
let data = resp.bytes().await?;
let val = protobuf::Message::parse_from_bytes(&data)?;
return Ok(val);
}
ModserverCommand::SearchModules(req) => {
let resp = self
.inner
.post(&self.make_endpoint("/api/v1/search"))
.body(req.write_to_bytes()?)
.send()
.await?;
let data = resp.bytes().await?;
let val = protobuf::Message::parse_from_bytes(&data)?;
return Ok(val);
}
ModserverCommand::DeleteModules(req) => {
let resp = self
.inner
.delete(&self.make_endpoint("/api/v1/modules"))
.body(req.write_to_bytes()?)
.send()
.await?;
let data = resp.bytes().await?;
let val = protobuf::Message::parse_from_bytes(&data)?;
return Ok(val);
}
ModserverCommand::AuditModules(req) => {
let resp = self
.inner
.post(&self.make_endpoint("/api/v1/audit"))
.body(req.write_to_bytes()?)
.send()
.await?;
let data = resp.bytes().await?;
let val = protobuf::Message::parse_from_bytes(&data)?;
return Ok(val);
}
ModserverCommand::DiffModules(req) => {
let resp = self
.inner
.post(&self.make_endpoint("/api/v1/diff"))
.body(req.write_to_bytes()?)
.send()
.await?;
let data = resp.bytes().await?;
let val = protobuf::Message::parse_from_bytes(&data)?;
return Ok(val);
}
ModserverCommand::ValidateModule(req) => {
let resp = self
.inner
.post(&self.make_endpoint("/api/v1/validate"))
.body(req.write_to_bytes()?)
.send()
.await?;
let data = resp.bytes().await?;
let val = protobuf::Message::parse_from_bytes(&data)?;
return Ok(val);
}
ModserverCommand::GetModuleGraph(req) => {
let resp = self
.inner
.post(&self.make_endpoint("/api/v1/module_graph"))
.body(req.write_to_bytes()?)
.send()
.await?;
let data = resp.bytes().await?;
let val = protobuf::Message::parse_from_bytes(&data)?;
return Ok(val);
}
ModserverCommand::CallPlugin(req) => {
let resp = self
.inner
.post(&self.make_endpoint("/api/v1/plugin"))
.body(req.write_to_bytes()?)
.send()
.await?;
let data = resp.bytes().await?;
let val = protobuf::Message::parse_from_bytes(&data)?;
return Ok(val);
}
ModserverCommand::InstallPlugin(req) => {
let resp = self
.inner
.put(&self.make_endpoint("/api/v1/plugin"))
.body(req.write_to_bytes()?)
.send()
.await?;
let data = resp.bytes().await?;
let val = protobuf::Message::parse_from_bytes(&data)?;
return Ok(val);
}
ModserverCommand::UninstallPlugin(req) => {
let resp = self
.inner
.delete(&self.make_endpoint("/api/v1/plugin"))
.body(req.write_to_bytes()?)
.send()
.await?;
let data = resp.bytes().await?;
let val = protobuf::Message::parse_from_bytes(&data)?;
return Ok(val);
}
}
}
fn make_endpoint(&self, route: &str) -> String {
let base = self.base_url.trim_end_matches('/');
let s = format!("{}{}", base, route);
s
}
}
fn api_error(
error: protobuf::MessageField<modsurfer_convert::api::Error>,
msg: &str,
) -> anyhow::Error {
let e = error.get_or_default();
return anyhow::anyhow!("{}: {} [{}]", msg, e.message, e.code);
}
| rust | Apache-2.0 | 71ae2ca8e06917b33281bd0b75843ee60edccba8 | 2026-01-04T20:22:33.912580Z | false |
dylibso/modsurfer | https://github.com/dylibso/modsurfer/blob/71ae2ca8e06917b33281bd0b75843ee60edccba8/api/src/interop.rs | api/src/interop.rs | use modsurfer_convert::{api, from_api};
use modsurfer_module::Module;
/// A type returned from the API acting as a collection of results, with some additional data about
/// how the results were constructed (including the offset and limit from the caller).
#[derive(Default, PartialEq)]
pub struct List<T> {
inner: Vec<T>,
total: u32,
offset: u32,
limit: u32,
}
impl<T> List<T> {
/// Construct a new List from a container of objects as well as some information about how the
/// the inner container was created.
pub fn new(inner: Vec<T>, total: u32, offset: u32, limit: u32) -> Self {
Self {
inner,
total,
offset,
limit,
}
}
/// Convert the inner container of objects into a `Vec<&T>`, obtaining references to the objects.
pub fn vec(&self) -> Vec<&T> {
self.inner.iter().map(|t| t).collect::<Vec<&T>>()
}
/// Separate the items from within the `List` to use independently.
pub fn split(&self) -> (Vec<&T>, u32, u32, u32) {
let limit = self.limit;
let offset = self.offset;
let total = self.total;
(self.vec(), offset, limit, total)
}
}
/// A helper type, returned from some API operations which contains the database-assigned ID of a
/// persisted object.
#[derive(Clone, Debug, Default, PartialEq)]
pub struct Persisted<T> {
inner: T,
source_id: i64,
}
impl<T> Persisted<T> {
/// Get the database-assigned ID of the persisted object.
pub fn get_id(&self) -> i64 {
self.source_id
}
/// Get the actual persisted object, independent of its ID.
pub fn get_inner(&self) -> &T {
&self.inner
}
/// Return the actual persisted object, dropping the ID
pub fn into_inner(self) -> T {
self.inner
}
}
impl<T> AsRef<T> for Persisted<T> {
fn as_ref(&self) -> &T {
&self.inner
}
}
impl From<api::Module> for Persisted<Module> {
fn from(a: api::Module) -> Self {
let metadata = if a.metadata.is_empty() {
None
} else {
Some(a.metadata)
};
#[cfg(not(target_arch = "wasm32"))]
let inserted_at: std::time::SystemTime = a
.inserted_at
.unwrap_or_else(|| protobuf::well_known_types::timestamp::Timestamp::new())
.into();
#[cfg(target_arch = "wasm32")]
let inserted_at: u64 = a.inserted_at.seconds as u64;
Persisted {
inner: Module {
hash: a.hash,
imports: from_api::imports(a.imports),
exports: from_api::exports(a.exports),
size: a.size as u64,
location: a.location,
source_language: from_api::source_language(
a.source_language.enum_value_or_default(),
),
metadata,
#[cfg(not(target_arch = "wasm32"))]
inserted_at: inserted_at.into(),
#[cfg(target_arch = "wasm32")]
inserted_at: inserted_at,
strings: a.strings,
complexity: a.complexity,
graph: a.graph,
function_hashes: a.function_hashes,
},
source_id: a.id,
}
}
}
#[cfg(feature = "mock")]
impl<T> Persisted<T> {
/// Convert a non-persisted object into one usable by API operations which expect IDs. Only
/// available with the `mock` feature enabled.
pub fn from_module(id: i64, module: T) -> Self {
Persisted {
inner: module,
source_id: id,
}
}
}
| rust | Apache-2.0 | 71ae2ca8e06917b33281bd0b75843ee60edccba8 | 2026-01-04T20:22:33.912580Z | false |
dylibso/modsurfer | https://github.com/dylibso/modsurfer/blob/71ae2ca8e06917b33281bd0b75843ee60edccba8/plugins/src/lib.rs | plugins/src/lib.rs | pub const MODSURFER_WASM: &[u8] = include_bytes!("modsurfer.wasm");
| rust | Apache-2.0 | 71ae2ca8e06917b33281bd0b75843ee60edccba8 | 2026-01-04T20:22:33.912580Z | false |
dylibso/modsurfer | https://github.com/dylibso/modsurfer/blob/71ae2ca8e06917b33281bd0b75843ee60edccba8/cli/src/lib.rs | cli/src/lib.rs | mod cmd;
pub use cmd::exec::{Cli, Hash, Id, Limit, MetadataEntry, Offset, OutputFormat, Version};
| rust | Apache-2.0 | 71ae2ca8e06917b33281bd0b75843ee60edccba8 | 2026-01-04T20:22:33.912580Z | false |
dylibso/modsurfer | https://github.com/dylibso/modsurfer/blob/71ae2ca8e06917b33281bd0b75843ee60edccba8/cli/src/main.rs | cli/src/main.rs | use std::{env, path::PathBuf, process::ExitCode};
use anyhow::Result;
use clap::{Arg, ArgAction, Command};
use modsurfer_convert::AuditOutcome;
use url::Url;
mod cmd;
use cmd::{Cli, Hash, Id, Limit, MetadataEntry, Offset, OutputFormat, Version};
const BASE_URL_ENV: &'static str = "MODSURFER_BASE_URL";
const DEFAULT_BASE_URL: &'static str = "http://localhost:1739";
#[tokio::main]
async fn main() -> Result<ExitCode> {
// get MODSURFER_BASE_URL environment variable if set
let base_url = Url::parse(
env::var_os(BASE_URL_ENV)
.unwrap_or_else(|| DEFAULT_BASE_URL.into())
.to_str()
.unwrap_or(DEFAULT_BASE_URL),
)?;
let cmd = Command::new("modsurfer")
.about("Modsurfer CLI is used to interact with the HTTP API or validate modules offline.")
.version(env!("CARGO_PKG_VERSION"))
.before_help("Copyright Dylibso, Inc. <support@dylib.so>")
.subcommands(make_subcommands());
Cli::new(cmd, base_url).execute().await
}
fn add_output_arg(cmd: Command) -> Command {
cmd.arg(
Arg::new("output-format")
.value_parser(clap::value_parser!(OutputFormat))
.long("output-format")
.required(false)
.help("set the output format of any command, supports `json` or `table` (default)"),
)
}
fn make_subcommands() -> Vec<Command> {
let create = clap::Command::new("create")
.about("Create a new entry for a module.")
.arg(
Arg::new("path")
.value_parser(clap::value_parser!(PathBuf))
.long("path")
.short('p')
.help("a path on disk to a valid WebAssembly module"),
)
.arg(
Arg::new("metadata")
.value_parser(clap::value_parser!(MetadataEntry))
.long("metadata")
.short('m')
.action(ArgAction::Append)
.required(false)
.help(
"a repeatable key=value metadata entry, to add arbitrary context to a module",
),
)
.arg(
Arg::new("location")
.value_parser(clap::value_parser!(url::Url))
.long("location")
.short('l')
.required(false)
.help("a valid URL to where this module should be located"),
)
.arg(
Arg::new("check")
.value_parser(clap::value_parser!(PathBuf))
.long("check")
.short('c')
.required(false)
.help("a path on disk to a YAML checkfile which declares validation requirements"),
);
let delete = clap::Command::new("delete")
.about("Delete a module and its versions.")
.arg(
Arg::new("id")
.value_parser(clap::value_parser!(i64))
.long("id")
.action(ArgAction::Append)
.help("the numeric ID of a module entry in Modsurfer"),
);
let get = clap::Command::new("get")
.about("Get a module by its ID.")
.arg(
Arg::new("id")
.value_parser(clap::value_parser!(Id))
.long("id")
.help("the numeric ID of a module entry in Modsurfer"),
);
let list = clap::Command::new("list")
.about(
"List all modules, paginated by the `offset` and `limit` parameters or their defaults.",
)
.arg(
Arg::new("offset")
.value_parser(clap::value_parser!(Offset))
.long("offset")
.default_value("0")
.help("the pagination offset by which modules are listed"),
)
.arg(
Arg::new("limit")
.value_parser(clap::value_parser!(Limit))
.long("limit")
.default_value("50")
.help("the maximum number of modules in a list of results"),
);
let search = clap::Command::new("search")
.about("Search for modules matching optional parameters.")
.arg(
Arg::new("function-name")
.long("function-name")
.required(false)
.help("adds a search parameter to match on `function-name"),
)
.arg(
Arg::new("module-name")
.long("module-name")
.required(false)
.help("adds a search parameter to match on `module-name`"),
)
.arg(
Arg::new("source-language")
.long("source-language")
.required(false)
.help("adds a search parameter to match on `source-language`"),
)
.arg(
Arg::new("hash")
.value_parser(clap::value_parser!(Hash))
.long("hash")
.required(false)
.help("adds a search parameter to match on `hash`"),
)
.arg(
Arg::new("text")
.long("text")
.required(false)
.help("adds a search parameter to match on `strings` extracted from a module"),
)
.arg(
Arg::new("offset")
.value_parser(clap::value_parser!(Offset))
.long("offset")
.default_value("0")
.help("the pagination offset by which modules are listed"),
)
.arg(
Arg::new("limit")
.value_parser(clap::value_parser!(Limit))
.long("limit")
.default_value("50")
.help("the maximum number of modules in a list of results"),
);
let generate = clap::Command::new("generate")
.about("Generate a starter checkfile from the given module.")
.arg(
Arg::new("path")
.value_parser(clap::value_parser!(PathBuf))
.long("path")
.short('p')
.help("a path on disk to a valid WebAssembly module"),
)
.arg(
Arg::new("output")
.value_parser(clap::value_parser!(PathBuf))
.long("output")
.short('o')
.default_value("mod.yaml")
.help("a path on disk to write a generated YAML checkfile"),
);
let validate = clap::Command::new("validate")
.about("Validate a module using a module checkfile.")
.arg(
Arg::new("path")
.value_parser(clap::value_parser!(PathBuf))
.long("path")
.short('p')
.help("a path on disk to a valid WebAssembly module"),
)
.arg(
Arg::new("check")
.value_parser(clap::value_parser!(PathBuf))
.long("check")
.short('c')
.default_value("mod.yaml")
.help("a path on disk to a YAML file which declares validation requirements"),
);
let yank = clap::Command::new("yank")
.about("Mark a module version as yanked (unavailable).")
.arg(
Arg::new("id")
.value_parser(clap::value_parser!(Id))
.long("id")
.help("the numeric ID of a module entry in Modsurfer"),
)
.arg(
Arg::new("version")
.value_parser(clap::value_parser!(Version))
.long("version")
.help("the version of a module entry in Modsurfer (if no version exists, this command has no effect)",
));
let audit = clap::Command::new("audit")
.about("Return a list of modules which violate requirements in the provided checkfile.")
.arg(
Arg::new("outcome")
.value_parser(clap::value_parser!(AuditOutcome))
.long("outcome")
.default_value("fail")
.help("which type of expected outcome the audit should verify ('pass' or 'fail')"),
)
.arg(
Arg::new("check")
.value_parser(clap::value_parser!(PathBuf))
.long("check")
.short('c')
.default_value("mod.yaml")
.help("a path on disk to a YAML file which declares validation requirements"),
)
.arg(
Arg::new("offset")
.value_parser(clap::value_parser!(Offset))
.long("offset")
.default_value("0")
.help("the pagination offset by which modules are listed"),
)
.arg(
Arg::new("limit")
.value_parser(clap::value_parser!(Limit))
.long("limit")
.default_value("50")
.help("the maximum number of modules in a list of results"),
);
let diff = clap::Command::new("diff")
.about("Compare two modules")
.arg(
Arg::new("with-context")
.value_parser(clap::value_parser!(bool))
.long("with-context")
.default_value("false")
.action(clap::ArgAction::SetTrue)
.help("retain the surrounding unchnaged lines in the diff as context"),
)
.arg(Arg::new("module1").help("first module ID or path to .wasm"))
.arg(Arg::new("module2").help("second module ID or path to .wasm"));
let call_plugin = clap::Command::new("call")
.about("Call a Modsurfer plugin.")
.arg(
Arg::new("identifier")
.long("id")
.help("the identifier of the registered plugin"),
)
.arg(
Arg::new("function")
.long("function")
.short('f')
.help("the function to be called"),
)
.arg(
Arg::new("input")
.long("input")
.short('i')
.help("use @{path_to_file} to specify a file as input to your plugin's function. Otherwise, the value provided will used as input to your function as raw bytes"),
)
.arg(
Arg::new("output")
.value_parser(clap::value_parser!(PathBuf))
.long("output")
.short('o')
.help("a location on disk to write the output. The output of the call will be written to stdout if not specified"),
);
// TODO: allow specification of plugin "config"
let install_plugin =
clap::Command::new("install")
.about("Install a Modsurfer plugin to a given `identifier`. Any subsequent installs for a given `identifier` will overwrite the plugin at that `identifier` with the data provided on the command.")
.arg(
Arg::new("identifier")
.long("id")
.help("the identifier of the plugin to be installed"),
)
.arg(
Arg::new("name")
.required(false)
.long("name")
.short('n')
.help("the human readable name of the plugin"),
)
.arg(Arg::new("wasm").long("wasm").short('w').help(
"a path on disk or a remote URL to the wasm you'd like to install as a plugin",
));
let uninstall_plugin = clap::Command::new("uninstall")
.about("Uninstall a Modsurfer plugin.")
.arg(
Arg::new("identifier")
.long("id")
.help("the identifier of the plugin to uninstall"),
);
let plugin = clap::Command::new("plugin")
.about("Manage and invoke your Modsurfer plugins")
.subcommand(call_plugin)
.subcommand(install_plugin)
.subcommand(uninstall_plugin);
// This collection of commands should be exclusive to ones whose output can be formatted based on the --output-format arg, either `table` (default) or `json`.
// If the command does not reliably support this kind of formatting, put the command within the "chained" vec below.
[create, delete, get, list, search, validate, yank, audit]
.into_iter()
.map(add_output_arg)
.chain(vec![generate, diff, plugin])
.collect()
}
| rust | Apache-2.0 | 71ae2ca8e06917b33281bd0b75843ee60edccba8 | 2026-01-04T20:22:33.912580Z | false |
dylibso/modsurfer | https://github.com/dylibso/modsurfer/blob/71ae2ca8e06917b33281bd0b75843ee60edccba8/cli/src/cmd/mod.rs | cli/src/cmd/mod.rs | pub mod api_result;
pub mod exec;
pub mod generate;
#[allow(unused_imports)]
pub use exec::*;
| rust | Apache-2.0 | 71ae2ca8e06917b33281bd0b75843ee60edccba8 | 2026-01-04T20:22:33.912580Z | false |
dylibso/modsurfer | https://github.com/dylibso/modsurfer/blob/71ae2ca8e06917b33281bd0b75843ee60edccba8/cli/src/cmd/generate.rs | cli/src/cmd/generate.rs | use std::fs::File;
use std::io::Write;
use std::path::PathBuf;
use anyhow::Result;
use serde_yaml;
use modsurfer_validation::{generate_checkfile, Module as ModuleParser};
pub async fn checkfile_from_module(wasm: &PathBuf, output: &PathBuf) -> Result<()> {
let module_data = tokio::fs::read(wasm).await?;
let module = ModuleParser::parse(&module_data)?;
let validation = generate_checkfile(&module)?;
let mut file = File::create(output)?;
writeln!(
&mut file,
"# For more information about other checkfile options, see the documentation at https://dev.dylib.so/docs/modsurfer/cli#checkfile"
)?;
serde_yaml::to_writer(&file, &validation)?;
Ok(())
}
| rust | Apache-2.0 | 71ae2ca8e06917b33281bd0b75843ee60edccba8 | 2026-01-04T20:22:33.912580Z | false |
dylibso/modsurfer | https://github.com/dylibso/modsurfer/blob/71ae2ca8e06917b33281bd0b75843ee60edccba8/cli/src/cmd/exec.rs | cli/src/cmd/exec.rs | #![allow(unused)]
use std::io::Write;
use std::ops::Sub;
use std::path::Path;
use std::process::ExitCode;
use std::{collections::HashMap, ffi::OsString, path::PathBuf};
use anyhow::{anyhow, Result};
use human_bytes::human_bytes;
use modsurfer_api::{ApiClient, Client, Persisted};
use modsurfer_convert::{Audit, AuditOutcome, Pagination};
use modsurfer_module::{Module, SourceLanguage};
use modsurfer_validation::validate_module;
use serde::Serialize;
use url::Url;
use super::api_result::{ApiResult, ApiResults, SimpleApiResult, SimpleApiResults};
use super::generate::checkfile_from_module;
pub type Id = i64;
pub type Hash = String;
pub type ModuleName = String;
pub type FunctionName = String;
pub type TextSearch = String;
pub type Offset = u32;
pub type Limit = u32;
pub type Version = String;
pub type ModuleFile = PathBuf;
pub type CheckFile = PathBuf;
pub type MetadataEntry = String;
pub type WithContext = bool;
pub type Identifier = String;
pub type PluginName = String;
pub type OutputFile = PathBuf;
#[derive(Clone, Debug)]
pub enum BytesOrPath {
Bytes(Vec<u8>),
Path(PathBuf),
}
impl BytesOrPath {
fn from(s: &str) -> Self {
if s.to_owned().starts_with("@") {
let path = s.chars().skip(1).take(s.len() - 1).collect::<String>();
return BytesOrPath::Path(PathBuf::from(path));
}
BytesOrPath::Bytes(s.as_bytes().to_vec())
}
async fn resolve(&self) -> Result<Vec<u8>, anyhow::Error> {
match self {
BytesOrPath::Bytes(v) => Ok(v.to_vec()),
BytesOrPath::Path(v) => {
let data = tokio::fs::read(Path::new(&v)).await?;
return Ok(data);
}
}
}
}
#[derive(Clone, Debug)]
pub enum PathOrUrl {
Path(PathBuf),
Url(url::Url),
}
impl PathOrUrl {
fn from(s: &str) -> Self {
match url::Url::parse(s) {
Ok(v) => PathOrUrl::Url(v),
Err(_) => PathOrUrl::Path(PathBuf::from(s)),
}
}
async fn resolve(&self) -> Result<Vec<u8>, anyhow::Error> {
match self {
PathOrUrl::Path(v) => Ok(tokio::fs::read(v).await?),
PathOrUrl::Url(v) => Ok(reqwest::get(v.as_str()).await?.bytes().await?.to_vec()),
}
}
}
#[derive(Debug)]
pub struct Cli {
cmd: clap::Command,
help: String,
host: Url,
}
#[derive(Clone, Debug)]
pub enum OutputFormat {
Json,
Table,
}
impl Default for OutputFormat {
fn default() -> Self {
OutputFormat::Table
}
}
impl From<String> for OutputFormat {
fn from(value: String) -> Self {
match value.as_str() {
"json" => Self::Json,
_ => Self::Table,
}
}
}
impl From<OsString> for OutputFormat {
fn from(value: OsString) -> Self {
let s = value.into_string().unwrap_or_default();
s.into()
}
}
#[derive(Clone, Debug)]
pub enum IdOrFilename {
Id(Id),
Filename(String),
}
impl IdOrFilename {
fn parse(s: impl Into<String>) -> Self {
let s = s.into();
if let Ok(x) = s.parse::<Id>() {
return IdOrFilename::Id(x);
}
IdOrFilename::Filename(s)
}
async fn fetch(&self, client: &Client) -> Result<Module, anyhow::Error> {
match self {
IdOrFilename::Id(id) => client.get_module(*id).await.map(|x| x.into_inner()),
IdOrFilename::Filename(filename) => {
let data = std::fs::read(filename)?;
modsurfer_validation::Module::parse(data)
}
}
}
}
#[derive(Debug, Default)]
pub enum Subcommand<'a> {
#[default]
Unknown,
Create(
&'a ModuleFile,
Option<&'a CheckFile>,
HashMap<String, String>,
Option<Url>,
&'a OutputFormat,
),
Delete(Vec<Id>, &'a OutputFormat),
Get(Id, &'a OutputFormat),
List(Offset, Limit, &'a OutputFormat),
Search(
Option<&'a Hash>,
Option<&'a ModuleName>,
Option<&'a FunctionName>,
Option<SourceLanguage>,
Option<&'a TextSearch>,
Offset,
Limit,
&'a OutputFormat,
),
Generate(ModuleFile, CheckFile),
Validate(ModuleFile, CheckFile, &'a OutputFormat),
Yank(Id, Version, &'a OutputFormat),
Audit(CheckFile, AuditOutcome, Offset, Limit, &'a OutputFormat),
Diff(IdOrFilename, IdOrFilename, WithContext),
CallPlugin(
Identifier,
FunctionName,
BytesOrPath,
Option<&'a OutputFile>,
),
InstallPlugin(Identifier, Option<&'a PluginName>, PathOrUrl),
UninstallPlugin(Identifier),
}
impl Cli {
pub fn new(mut cmd: clap::Command, host: Url) -> Self {
let help = cmd.render_long_help().to_string();
Self { cmd, help, host }
}
pub async fn execute(&self) -> Result<ExitCode> {
match self.cmd.clone().get_matches().subcommand() {
Some(x) => self.run(x).await,
_ => {
println!("{}", self.help);
Ok(ExitCode::SUCCESS)
}
}
}
async fn run(&self, sub: impl Into<Subcommand<'_>>) -> Result<ExitCode> {
match sub.into() {
Subcommand::Unknown => unimplemented!("Unknown subcommand.\n\n{}", self.help),
Subcommand::Create(module_path, checkfile_path, metadata, location, output_format) => {
if let Some(check) = checkfile_path {
let report = validate_module(&module_path, check).await?;
if report.has_failures() {
println!(
"{}",
match output_format {
OutputFormat::Json => serde_json::to_string_pretty(&report)?,
OutputFormat::Table => report.to_string(),
}
);
return Ok(report.as_exit_code());
}
}
let wasm = tokio::fs::read(module_path).await?;
let client = Client::new(self.host.as_str())?;
let (id, hash) = client.create_module(wasm, Some(metadata), location).await?;
let output = SimpleApiResults {
results: vec![SimpleApiResult {
module_id: id,
hash: hash.clone(),
}],
};
println!(
"{}",
match output_format {
OutputFormat::Json => serde_json::to_string_pretty(&output)?,
OutputFormat::Table => output.to_string(),
}
);
Ok(ExitCode::SUCCESS)
}
Subcommand::Delete(ids, output_format) => {
let client = Client::new(self.host.as_str())?;
let deleted_modules = client.delete_modules(ids).await?;
let results = deleted_modules
.iter()
.map(|(id, hash)| SimpleApiResult {
module_id: *id,
hash: hash.clone(),
})
.collect();
let output = SimpleApiResults { results };
println!(
"{}",
match output_format {
OutputFormat::Json => serde_json::to_string_pretty(&output)?,
OutputFormat::Table => output.to_string(),
}
);
Ok(ExitCode::SUCCESS)
}
Subcommand::Get(id, output_format) => {
let client = Client::new(self.host.as_str())?;
let m = client.get_module(id).await?;
let results = vec![to_api_result(&m)];
let output = ApiResults { results };
println!(
"{}",
match output_format {
OutputFormat::Json => serde_json::to_string_pretty(&output)?,
OutputFormat::Table => output.to_string(),
}
);
Ok(ExitCode::SUCCESS)
}
Subcommand::List(offset, limit, output_format) => {
let client = Client::new(self.host.as_str())?;
let list = client.list_modules(offset, limit).await?;
let results = list.vec().into_iter().map(to_api_result).collect();
let output = ApiResults { results };
println!(
"{}",
match output_format {
OutputFormat::Json => serde_json::to_string_pretty(&output)?,
OutputFormat::Table => output.to_string(),
}
);
Ok(ExitCode::SUCCESS)
}
Subcommand::Search(
hash,
mod_name,
func_name,
src_lang,
text_search,
offset,
limit,
output_format,
) => {
let client = Client::new(self.host.as_str())?;
let modules = client
.search_modules(
None,
hash.map(String::clone),
func_name.map(String::clone),
mod_name.map(String::clone),
None,
None,
None,
None,
None,
src_lang.map(|lang| lang.to_string()),
None,
None,
None,
text_search.map(|s| vec![s.clone()]),
offset,
limit,
None,
None,
)
.await?;
let results = modules.vec().into_iter().map(to_api_result).collect();
let output = ApiResults { results };
println!(
"{}",
match output_format {
OutputFormat::Json => serde_json::to_string_pretty(&output)?,
OutputFormat::Table => output.to_string(),
}
);
Ok(ExitCode::SUCCESS)
}
Subcommand::Generate(file, check) => match checkfile_from_module(&file, &check).await {
Ok(_) => Ok(ExitCode::SUCCESS),
Err(e) => {
println!("{:?}", e);
Ok(ExitCode::FAILURE)
}
},
Subcommand::Validate(file, check, output_format) => {
let report = validate_module(&file, &check).await?;
match output_format {
OutputFormat::Json => println!("{}", serde_json::to_string_pretty(&report)?),
OutputFormat::Table => {
if report.has_failures() {
println!("{report}")
}
}
};
Ok(report.as_exit_code())
}
Subcommand::Yank(_id, _version, _output_format) => {
println!("`yank` is not yet supported. Reach out to support@dylib.so for more information!");
Ok(ExitCode::FAILURE)
}
Subcommand::Audit(check, outcome, offset, limit, output_format) => {
let checkfile = tokio::fs::read(&check).await?;
let page = Pagination { offset, limit };
let audit = Audit {
checkfile,
page,
outcome,
};
let client = Client::new(self.host.as_str())?;
let reports = client.audit_modules(audit).await?;
match output_format {
OutputFormat::Json => println!("{}", serde_json::to_string_pretty(&reports)?),
OutputFormat::Table => {
let mut buf = vec![];
reports.iter().enumerate().for_each(|(i, (id, report))| {
if i != 0 {
writeln!(buf, "");
}
writeln!(buf, "Report for module: {id}");
writeln!(buf, "{}", report);
});
print!("{}", String::from_utf8(buf)?);
}
};
Ok(ExitCode::SUCCESS)
}
Subcommand::Diff(module1, module2, with_context) => {
let client = Client::new(self.host.as_str())?;
let module1 = module1.fetch(&client).await?;
let module2 = module2.fetch(&client).await?;
let diff = modsurfer_validation::Diff::new(
&module1,
&module2,
colored::control::SHOULD_COLORIZE.should_colorize(),
with_context,
)?
.to_string();
print!("{}", diff);
Ok(ExitCode::SUCCESS)
}
Subcommand::CallPlugin(identifier, function, input_arg, output) => {
let input = input_arg.resolve().await?;
let client = Client::new(self.host.as_str())?;
let res = client.call_plugin(identifier, function, input).await?;
if let Some(output) = output {
tokio::fs::write(output, res).await?;
} else {
std::io::stdout().write_all(&res);
}
Ok(ExitCode::SUCCESS)
}
Subcommand::InstallPlugin(identifier, name, wasm) => {
let location = match &wasm {
PathOrUrl::Path(v) => v.to_str().unwrap_or_else(|| ""),
PathOrUrl::Url(v) => v.as_str(),
}
.to_string();
let name = name.cloned();
let wasm = wasm.resolve().await?;
let client = Client::new(self.host.as_str())?;
let res = client
.install_plugin(identifier, name, location, wasm)
.await?;
Ok(ExitCode::SUCCESS)
}
Subcommand::UninstallPlugin(identifier) => {
let client = Client::new(self.host.as_str())?;
let res = client.uninstall_plugin(identifier).await?;
Ok(ExitCode::SUCCESS)
}
}
}
}
fn to_api_result(m: &Persisted<Module>) -> ApiResult {
ApiResult {
module_id: m.get_id(),
hash: m.get_inner().hash.clone(),
file_name: m.get_inner().file_name(),
exports: m.get_inner().exports.len(),
imports: m.get_inner().imports.len(),
namespaces: m.get_inner().get_import_namespaces(),
source_language: m.get_inner().source_language.clone(),
size: human_bytes(m.get_inner().size as f64),
}
}
fn output_format(args: &clap::ArgMatches) -> &OutputFormat {
args.get_one("output-format")
.unwrap_or_else(|| &OutputFormat::Table)
}
impl<'a> From<(&'a str, &'a clap::ArgMatches)> for Subcommand<'a> {
fn from(input: (&'a str, &'a clap::ArgMatches)) -> Self {
match input {
("create", args) => {
let module_path = args
.get_one::<PathBuf>("path")
.expect("must provide a --path to the module on disk");
let checkfile_path: Option<&PathBuf> = args.get_one("check");
let raw_metadata = args
.get_many("metadata")
.unwrap_or_default()
.cloned()
.collect::<Vec<String>>();
let metadata: HashMap<String, String> = raw_metadata
.into_iter()
.map(|raw| {
let parts = raw.split("=").collect::<Vec<_>>();
(parts[0].to_string(), parts[1].to_string())
})
.collect();
let location: Option<&Url> = args.get_one("location");
Subcommand::Create(
module_path,
checkfile_path,
metadata,
location.cloned(),
output_format(args),
)
}
("delete", args) => Subcommand::Delete(
args.get_many("id")
.expect("module id(s) to delete")
.cloned()
.collect::<Vec<Id>>(),
output_format(args),
),
("get", args) => Subcommand::Get(
*args.get_one("id").expect("valid module ID"),
output_format(args),
),
("list", args) => Subcommand::List(
*args.get_one("offset").unwrap_or_else(|| &0),
*args.get_one("limit").unwrap_or_else(|| &50),
output_format(args),
),
("search", args) => {
let hash: Option<&Hash> = args.get_one("hash");
let mod_name: Option<&ModuleName> = args.get_one("module-name");
let func_name: Option<&FunctionName> = args.get_one("function-name");
let src_lang: Option<SourceLanguage> = args
.get_one("source-language")
.map(|s: &String| s.clone().into());
let text_search: Option<&TextSearch> = args.get_one("text");
let offset: Offset = *args
.get_one("offset")
.expect("offset should have default value");
let limit: Limit = *args
.get_one("limit")
.expect("limit should have default value");
Subcommand::Search(
hash,
mod_name,
func_name,
src_lang,
text_search,
offset,
limit,
output_format(args),
)
}
("generate", args) => Subcommand::Generate(
args.get_one::<PathBuf>("path")
.expect("valid module path")
.clone(),
args.get_one::<PathBuf>("output")
.expect("valid checkfile output path")
.clone(),
),
("validate", args) => Subcommand::Validate(
args.get_one::<PathBuf>("path")
.expect("valid module path")
.clone(),
args.get_one::<PathBuf>("check")
.expect("valid checkfile path")
.clone(),
output_format(args),
),
("yank", args) => Subcommand::Yank(
*args.get_one::<Id>("id").expect("id is required"),
args.get_one::<Version>("version")
.expect("version is required")
.clone(),
output_format(args),
),
("audit", args) => {
let offset: Offset = *args
.get_one("offset")
.expect("offset should have default value");
let limit: Limit = *args
.get_one("limit")
.expect("limit should have default value");
Subcommand::Audit(
args.get_one::<PathBuf>("check")
.expect("valid checkfile path")
.clone(),
args.get_one::<AuditOutcome>("outcome")
.expect("requires valid outcome ('pass' or 'fail')")
.clone(),
offset,
limit,
output_format(args),
)
}
("diff", args) => {
let module1 = args.get_one::<String>("module1").expect("id is required");
let module2 = args.get_one::<String>("module2").expect("id is required");
let with_context = *args
.get_one::<WithContext>("with-context")
.unwrap_or_else(|| &false);
Subcommand::Diff(
IdOrFilename::parse(module1),
IdOrFilename::parse(module2),
with_context,
)
}
("plugin", args) => match args.subcommand() {
Some(("call", args)) => {
let identifier = args
.get_one::<Identifier>("identifier")
.expect("identifier is required");
let function_name = args
.get_one::<FunctionName>("function")
.expect("function is required");
let input = args.get_one::<String>("input").expect("input is required");
let output = args.get_one::<OutputFile>("output");
Subcommand::CallPlugin(
identifier.to_string(),
function_name.to_string(),
BytesOrPath::from(input),
output,
)
}
Some(("install", args)) => {
let identifier = args
.get_one::<Identifier>("identifier")
.expect("identifier is required");
let name = args.get_one::<PluginName>("name");
let wasm = args.get_one::<String>("wasm").expect("wasm is required");
Subcommand::InstallPlugin(identifier.to_string(), name, PathOrUrl::from(wasm))
}
Some(("uninstall", args)) => {
let identifier = args
.get_one::<Identifier>("identifier")
.expect("identifier is required");
Subcommand::UninstallPlugin(identifier.to_string())
}
_ => Subcommand::Unknown,
},
_ => Subcommand::Unknown,
}
}
}
| rust | Apache-2.0 | 71ae2ca8e06917b33281bd0b75843ee60edccba8 | 2026-01-04T20:22:33.912580Z | false |
dylibso/modsurfer | https://github.com/dylibso/modsurfer/blob/71ae2ca8e06917b33281bd0b75843ee60edccba8/cli/src/cmd/api_result.rs | cli/src/cmd/api_result.rs | use std::fmt::Display;
use comfy_table::{modifiers::UTF8_SOLID_INNER_BORDERS, presets::UTF8_FULL, Row, Table};
use modsurfer_module::SourceLanguage;
use serde::{ser::SerializeStruct, Serialize};
#[derive(Serialize)]
pub struct ApiResults<'a> {
pub results: Vec<ApiResult<'a>>,
}
pub struct ApiResult<'a> {
pub module_id: i64,
pub hash: String,
pub file_name: String,
pub exports: usize,
pub imports: usize,
pub namespaces: Vec<&'a str>,
pub source_language: SourceLanguage,
pub size: String,
}
#[derive(Serialize)]
pub struct SimpleApiResults {
pub results: Vec<SimpleApiResult>,
}
pub struct SimpleApiResult {
pub module_id: i64,
pub hash: String,
}
impl<'a> Serialize for ApiResult<'a> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let mut state = serializer.serialize_struct("ApiResult", 8)?;
state.serialize_field("module_id", &self.module_id.to_string())?;
state.serialize_field("hash", &self.hash)?;
state.serialize_field("file_name", &self.file_name)?;
state.serialize_field("exports", &self.exports)?;
state.serialize_field("imports", &self.imports)?;
state.serialize_field("namespaces", &self.namespaces)?;
state.serialize_field("source_language", &self.source_language)?;
state.serialize_field("size", &self.size)?;
state.end()
}
}
impl Serialize for SimpleApiResult {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let mut state = serializer.serialize_struct("SimpleApiResult", 8)?;
state.serialize_field("module_id", &self.module_id.to_string())?;
state.serialize_field("hash", &self.hash)?;
state.end()
}
}
impl Display for SimpleApiResults {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut table = Table::new();
table.load_preset(UTF8_FULL);
table.apply_modifier(UTF8_SOLID_INNER_BORDERS);
table.set_header(vec!["ID", "Hash"]);
self.results.iter().for_each(|m| {
table.add_row(Row::from(vec![m.module_id.to_string(), m.hash.clone()]));
});
f.write_str(table.to_string().as_str())
}
}
impl Display for ApiResults<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut table = Table::new();
table.load_preset(UTF8_FULL);
table.apply_modifier(UTF8_SOLID_INNER_BORDERS);
table.set_header(vec![
"ID",
"Hash",
"Filename",
"# Exports",
"# Imports",
"Namespaces",
"Source",
"Size",
]);
if self.results.is_empty() {
return f.write_str(table.to_string().as_str());
}
self.results.iter().for_each(|m| {
table.add_row(Row::from(vec![
m.module_id.to_string(),
m.hash[0..6].to_string(),
m.file_name.clone(),
m.exports.to_string(),
m.imports.to_string(),
m.namespaces.join(", "),
m.source_language.to_string(),
m.size.clone(),
]));
});
f.write_str(table.to_string().as_str())
}
}
| rust | Apache-2.0 | 71ae2ca8e06917b33281bd0b75843ee60edccba8 | 2026-01-04T20:22:33.912580Z | false |
dylibso/modsurfer | https://github.com/dylibso/modsurfer/blob/71ae2ca8e06917b33281bd0b75843ee60edccba8/module/src/lib.rs | module/src/lib.rs | pub use anyhow::Error;
mod function;
mod module;
mod source_language;
pub use function::{Function, FunctionType, ValType};
pub use module::{Export, Import, Module};
pub use source_language::SourceLanguage;
| rust | Apache-2.0 | 71ae2ca8e06917b33281bd0b75843ee60edccba8 | 2026-01-04T20:22:33.912580Z | false |
dylibso/modsurfer | https://github.com/dylibso/modsurfer/blob/71ae2ca8e06917b33281bd0b75843ee60edccba8/module/src/module.rs | module/src/module.rs | use std::collections::{HashMap, HashSet};
use crate::{Function, SourceLanguage};
use serde;
use url;
#[derive(Debug, Default, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
pub struct Import {
pub module_name: String,
pub func: Function,
}
#[derive(Debug, Default, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
pub struct Export {
pub func: Function,
}
/// A description of a wasm module extracted from the binary, encapsulating
#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
pub struct Module {
/// sha256 hash of the modules raw bytes
pub hash: String,
/// function imports called by the module (see: <https://github.com/WebAssembly/design/blob/main/Modules.md#imports>)
pub imports: Vec<Import>,
/// function exports provided by the module (see: <https://github.com/WebAssembly/design/blob/main/Modules.md#exports>)
pub exports: Vec<Export>,
/// size in bytes of the module
pub size: u64,
/// path or locator to the module
pub location: String,
/// programming language used to produce this module
pub source_language: SourceLanguage,
/// arbitrary metadata provided by the operator of this module
pub metadata: Option<HashMap<String, String>>,
/// timestamp when this module was loaded and stored
#[cfg(not(target_arch = "wasm32"))]
pub inserted_at: chrono::DateTime<chrono::Utc>,
#[cfg(target_arch = "wasm32")]
pub inserted_at: u64,
/// the interned strings stored in the wasm binary (panic/abort messages, etc.)
pub strings: Vec<String>,
/// cyclomatic complexity of the module
pub complexity: Option<u32>,
/// the graph in Dot format
pub graph: Option<Vec<u8>>,
/// function hashes
pub function_hashes: HashMap<String, String>,
}
impl Module {
// TODO: also add memory imports and other items of interest
/// return the namespaces from which this module imports functions
pub fn get_import_namespaces(&self) -> Vec<&str> {
self.imports
.iter()
.fold(HashSet::new(), |mut acc, import| {
acc.insert(import.module_name.as_str());
acc
})
.into_iter()
.collect()
}
}
impl Default for Module {
fn default() -> Self {
Module {
hash: String::new(),
imports: vec![],
exports: vec![],
size: 0,
location: String::new(),
source_language: SourceLanguage::Unknown,
metadata: None,
#[cfg(not(target_arch = "wasm32"))]
inserted_at: chrono::Utc::now(),
#[cfg(target_arch = "wasm32")]
inserted_at: 0,
strings: vec![],
complexity: None,
graph: None,
function_hashes: HashMap::new(),
}
}
}
impl Module {
pub fn file_name(&self) -> String {
std::path::Path::new(self.location_url().path())
.file_name()
.unwrap_or_default()
.to_str()
.unwrap_or_default()
.to_owned()
}
pub fn location_url(&self) -> url::Url {
url::Url::parse(self.location.as_str()).expect("Invalid location")
}
}
| rust | Apache-2.0 | 71ae2ca8e06917b33281bd0b75843ee60edccba8 | 2026-01-04T20:22:33.912580Z | false |
dylibso/modsurfer | https://github.com/dylibso/modsurfer/blob/71ae2ca8e06917b33281bd0b75843ee60edccba8/module/src/function.rs | module/src/function.rs | #[derive(Debug, Default, Clone, Hash, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
pub struct Function {
pub name: String,
pub ty: FunctionType,
}
#[derive(Debug, Default, Clone, Hash, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
pub struct FunctionType {
pub params: Vec<ValType>,
pub results: Vec<ValType>,
}
/// Represents the types of values in a WebAssembly module.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
pub enum ValType {
/// The value type is i32.
I32,
/// The value type is i64.
I64,
/// The value type is f32.
F32,
/// The value type is f64.
F64,
/// The value type is v128.
V128,
/// The value type is a function reference.
FuncRef,
/// The value type is an extern reference.
ExternRef,
}
impl ValType {
/// Returns whether this value type is a "reference type".
///
/// Only reference types are allowed in tables, for example, and with some
/// instructions. Current reference types include `funcref` and `externref`.
pub fn is_reference_type(&self) -> bool {
matches!(self, ValType::FuncRef | ValType::ExternRef)
}
}
impl From<wasmparser::ValType> for ValType {
fn from(v: wasmparser::ValType) -> Self {
use wasmparser::ValType as V;
match v {
V::I32 => ValType::I32,
V::I64 => ValType::I64,
V::F32 => ValType::F32,
V::F64 => ValType::F64,
V::V128 => ValType::V128,
V::Ref(wasmparser::RefType::FUNCREF) => ValType::FuncRef,
V::Ref(wasmparser::RefType::EXTERNREF) => ValType::FuncRef,
V::Ref(r) => panic!("Unknown ref type: {:?}", r),
}
}
}
impl From<ValType> for wasmparser::ValType {
fn from(v: ValType) -> Self {
use wasmparser::ValType as V;
match v {
ValType::I32 => V::I32,
ValType::I64 => V::I64,
ValType::F32 => V::F32,
ValType::F64 => V::F64,
ValType::V128 => V::V128,
ValType::FuncRef => V::FUNCREF,
ValType::ExternRef => V::EXTERNREF,
}
}
}
impl<'a> From<&'a wasmparser::FuncType> for FunctionType {
fn from(ft: &'a wasmparser::FuncType) -> Self {
Self {
params: ft.params().iter().cloned().map(ValType::from).collect(),
results: ft.results().iter().cloned().map(ValType::from).collect(),
}
}
}
| rust | Apache-2.0 | 71ae2ca8e06917b33281bd0b75843ee60edccba8 | 2026-01-04T20:22:33.912580Z | false |
dylibso/modsurfer | https://github.com/dylibso/modsurfer/blob/71ae2ca8e06917b33281bd0b75843ee60edccba8/module/src/source_language.rs | module/src/source_language.rs | use std::ffi::OsString;
/// Detected from the `producers` section in the wasm binary, or from other implicit values within
/// the wasm binary.
/// See more: <https://github.com/WebAssembly/tool-conventions/blob/main/ProducersSection.md>
#[non_exhaustive]
#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
pub enum SourceLanguage {
Unknown,
Rust,
Go,
C,
Cpp,
AssemblyScript,
Swift,
JavaScript,
Haskell,
Zig,
}
impl From<String> for SourceLanguage {
fn from(s: String) -> Self {
match s.as_str() {
"Rust" => SourceLanguage::Rust,
"Go" => SourceLanguage::Go,
"C" => SourceLanguage::C,
"C++" => SourceLanguage::Cpp,
"AssemblyScript" => SourceLanguage::AssemblyScript,
"Swift" => SourceLanguage::Swift,
"JavaScript" => SourceLanguage::JavaScript,
"Haskell" => SourceLanguage::Haskell,
"Zig" => SourceLanguage::Zig,
_ => SourceLanguage::Unknown,
}
}
}
impl From<OsString> for SourceLanguage {
fn from(value: OsString) -> Self {
let s = value.into_string().unwrap_or_default();
s.into()
}
}
impl std::fmt::Display for SourceLanguage {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let s = match self {
SourceLanguage::Unknown => "Unknown",
SourceLanguage::Rust => "Rust",
SourceLanguage::Go => "Go",
SourceLanguage::C => "C",
SourceLanguage::Cpp => "C++",
SourceLanguage::AssemblyScript => "AssemblyScript",
SourceLanguage::Swift => "Swift",
SourceLanguage::JavaScript => "JavaScript",
SourceLanguage::Haskell => "Haskell",
SourceLanguage::Zig => "Zig",
};
f.write_str(s)
}
}
impl Default for SourceLanguage {
fn default() -> Self {
SourceLanguage::Unknown
}
}
| rust | Apache-2.0 | 71ae2ca8e06917b33281bd0b75843ee60edccba8 | 2026-01-04T20:22:33.912580Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.