repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/stages/src/stages/hashing_account.rs | crates/stages/stages/src/stages/hashing_account.rs | use alloy_primitives::{keccak256, B256};
use itertools::Itertools;
use reth_config::config::{EtlConfig, HashingConfig};
use reth_db_api::{
cursor::{DbCursorRO, DbCursorRW},
tables,
transaction::{DbTx, DbTxMut},
RawKey, RawTable, RawValue,
};
use reth_etl::Collector;
use reth_primitives_traits::Account;
use reth_provider::{AccountExtReader, DBProvider, HashingWriter, StatsReader};
use reth_stages_api::{
AccountHashingCheckpoint, EntitiesCheckpoint, ExecInput, ExecOutput, Stage, StageCheckpoint,
StageError, StageId, UnwindInput, UnwindOutput,
};
use reth_storage_errors::provider::ProviderResult;
use std::{
fmt::Debug,
ops::{Range, RangeInclusive},
sync::mpsc::{self, Receiver},
};
use tracing::*;
/// Maximum number of channels that can exist in memory.
const MAXIMUM_CHANNELS: usize = 10_000;
/// Maximum number of accounts to hash per rayon worker job.
const WORKER_CHUNK_SIZE: usize = 100;
/// Account hashing stage hashes plain account.
/// This is preparation before generating intermediate hashes and calculating Merkle tree root.
#[derive(Clone, Debug)]
pub struct AccountHashingStage {
/// The threshold (in number of blocks) for switching between incremental
/// hashing and full storage hashing.
pub clean_threshold: u64,
/// The maximum number of accounts to process before committing during unwind.
pub commit_threshold: u64,
/// ETL configuration
pub etl_config: EtlConfig,
}
impl AccountHashingStage {
/// Create new instance of [`AccountHashingStage`].
pub const fn new(config: HashingConfig, etl_config: EtlConfig) -> Self {
Self {
clean_threshold: config.clean_threshold,
commit_threshold: config.commit_threshold,
etl_config,
}
}
}
#[cfg(any(test, feature = "test-utils"))]
impl AccountHashingStage {
/// Initializes the `PlainAccountState` table with `num_accounts` having some random state
/// at the target block, with `txs_range` transactions in each block.
///
/// Proceeds to go to the `BlockTransitionIndex` end, go back `transitions` and change the
/// account state in the `AccountChangeSets` table.
pub fn seed<Tx: DbTx + DbTxMut + 'static, N: reth_provider::providers::ProviderNodeTypes>(
provider: &reth_provider::DatabaseProvider<Tx, N>,
opts: SeedOpts,
) -> Result<Vec<(alloy_primitives::Address, Account)>, StageError>
where
N::Primitives: reth_primitives_traits::FullNodePrimitives<
Block = reth_ethereum_primitives::Block,
BlockHeader = reth_primitives_traits::Header,
>,
{
use alloy_primitives::U256;
use reth_db_api::models::AccountBeforeTx;
use reth_provider::{StaticFileProviderFactory, StaticFileWriter};
use reth_testing_utils::{
generators,
generators::{random_block_range, random_eoa_accounts, BlockRangeParams},
};
let mut rng = generators::rng();
let blocks = random_block_range(
&mut rng,
opts.blocks.clone(),
BlockRangeParams { parent: Some(B256::ZERO), tx_count: opts.txs, ..Default::default() },
);
for block in blocks {
provider.insert_historical_block(block.try_recover().unwrap()).unwrap();
}
provider
.static_file_provider()
.latest_writer(reth_static_file_types::StaticFileSegment::Headers)
.unwrap()
.commit()
.unwrap();
let mut accounts = random_eoa_accounts(&mut rng, opts.accounts);
{
// Account State generator
let mut account_cursor =
provider.tx_ref().cursor_write::<tables::PlainAccountState>()?;
accounts.sort_by(|a, b| a.0.cmp(&b.0));
for (addr, acc) in &accounts {
account_cursor.append(*addr, acc)?;
}
let mut acc_changeset_cursor =
provider.tx_ref().cursor_write::<tables::AccountChangeSets>()?;
for (t, (addr, acc)) in opts.blocks.zip(&accounts) {
let Account { nonce, balance, .. } = acc;
let prev_acc = Account {
nonce: nonce - 1,
balance: balance - U256::from(1),
bytecode_hash: None,
};
let acc_before_tx = AccountBeforeTx { address: *addr, info: Some(prev_acc) };
acc_changeset_cursor.append(t, &acc_before_tx)?;
}
}
Ok(accounts)
}
}
impl Default for AccountHashingStage {
fn default() -> Self {
Self {
clean_threshold: 500_000,
commit_threshold: 100_000,
etl_config: EtlConfig::default(),
}
}
}
impl<Provider> Stage<Provider> for AccountHashingStage
where
Provider: DBProvider<Tx: DbTxMut> + HashingWriter + AccountExtReader + StatsReader,
{
/// Return the id of the stage
fn id(&self) -> StageId {
StageId::AccountHashing
}
/// Execute the stage.
fn execute(&mut self, provider: &Provider, input: ExecInput) -> Result<ExecOutput, StageError> {
if input.target_reached() {
return Ok(ExecOutput::done(input.checkpoint()))
}
let (from_block, to_block) = input.next_block_range().into_inner();
// if there are more blocks then threshold it is faster to go over Plain state and hash all
// account otherwise take changesets aggregate the sets and apply hashing to
// AccountHashing table. Also, if we start from genesis, we need to hash from scratch, as
// genesis accounts are not in changeset.
if to_block - from_block > self.clean_threshold || from_block == 1 {
let tx = provider.tx_ref();
// clear table, load all accounts and hash it
tx.clear::<tables::HashedAccounts>()?;
let mut accounts_cursor = tx.cursor_read::<RawTable<tables::PlainAccountState>>()?;
let mut collector =
Collector::new(self.etl_config.file_size, self.etl_config.dir.clone());
let mut channels = Vec::with_capacity(MAXIMUM_CHANNELS);
// channels used to return result of account hashing
for chunk in &accounts_cursor.walk(None)?.chunks(WORKER_CHUNK_SIZE) {
// An _unordered_ channel to receive results from a rayon job
let (tx, rx) = mpsc::channel();
channels.push(rx);
let chunk = chunk.collect::<Result<Vec<_>, _>>()?;
// Spawn the hashing task onto the global rayon pool
rayon::spawn(move || {
for (address, account) in chunk {
let address = address.key().unwrap();
let _ = tx.send((RawKey::new(keccak256(address)), account));
}
});
// Flush to ETL when channels length reaches MAXIMUM_CHANNELS
if !channels.is_empty() && channels.len().is_multiple_of(MAXIMUM_CHANNELS) {
collect(&mut channels, &mut collector)?;
}
}
collect(&mut channels, &mut collector)?;
let mut hashed_account_cursor =
tx.cursor_write::<RawTable<tables::HashedAccounts>>()?;
let total_hashes = collector.len();
let interval = (total_hashes / 10).max(1);
for (index, item) in collector.iter()?.enumerate() {
if index > 0 && index.is_multiple_of(interval) {
info!(
target: "sync::stages::hashing_account",
progress = %format!("{:.2}%", (index as f64 / total_hashes as f64) * 100.0),
"Inserting hashes"
);
}
let (key, value) = item?;
hashed_account_cursor
.append(RawKey::<B256>::from_vec(key), &RawValue::<Account>::from_vec(value))?;
}
} else {
// Aggregate all transition changesets and make a list of accounts that have been
// changed.
let lists = provider.changed_accounts_with_range(from_block..=to_block)?;
// Iterate over plain state and get newest value.
// Assumption we are okay to make is that plainstate represent
// `previous_stage_progress` state.
let accounts = provider.basic_accounts(lists)?;
// Insert and hash accounts to hashing table
provider.insert_account_for_hashing(accounts)?;
}
// We finished the hashing stage, no future iterations is expected for the same block range,
// so no checkpoint is needed.
let checkpoint = StageCheckpoint::new(input.target())
.with_account_hashing_stage_checkpoint(AccountHashingCheckpoint {
progress: stage_checkpoint_progress(provider)?,
..Default::default()
});
Ok(ExecOutput { checkpoint, done: true })
}
/// Unwind the stage.
fn unwind(
&mut self,
provider: &Provider,
input: UnwindInput,
) -> Result<UnwindOutput, StageError> {
let (range, unwind_progress, _) =
input.unwind_block_range_with_threshold(self.commit_threshold);
// Aggregate all transition changesets and make a list of accounts that have been changed.
provider.unwind_account_hashing_range(range)?;
let mut stage_checkpoint =
input.checkpoint.account_hashing_stage_checkpoint().unwrap_or_default();
stage_checkpoint.progress = stage_checkpoint_progress(provider)?;
Ok(UnwindOutput {
checkpoint: StageCheckpoint::new(unwind_progress)
.with_account_hashing_stage_checkpoint(stage_checkpoint),
})
}
}
/// Flushes channels hashes to ETL collector.
fn collect(
channels: &mut Vec<Receiver<(RawKey<B256>, RawValue<Account>)>>,
collector: &mut Collector<RawKey<B256>, RawValue<Account>>,
) -> Result<(), StageError> {
for channel in channels.iter_mut() {
while let Ok((key, v)) = channel.recv() {
collector.insert(key, v)?;
}
}
info!(target: "sync::stages::hashing_account", "Hashed {} entries", collector.len());
channels.clear();
Ok(())
}
// TODO: Rewrite this
/// `SeedOpts` provides configuration parameters for calling `AccountHashingStage::seed`
/// in unit tests or benchmarks to generate an initial database state for running the
/// stage.
///
/// In order to check the "full hashing" mode of the stage you want to generate more
/// transitions than `AccountHashingStage.clean_threshold`. This requires:
/// 1. Creating enough blocks so there's enough transactions to generate the required transition
/// keys in the `BlockTransitionIndex` (which depends on the `TxTransitionIndex` internally)
/// 2. Setting `blocks.len() > clean_threshold` so that there's enough diffs to actually take the
/// 2nd codepath
#[derive(Clone, Debug)]
pub struct SeedOpts {
/// The range of blocks to be generated
pub blocks: RangeInclusive<u64>,
/// The number of accounts to be generated
pub accounts: usize,
/// The range of transactions to be generated per block.
pub txs: Range<u8>,
}
fn stage_checkpoint_progress(provider: &impl StatsReader) -> ProviderResult<EntitiesCheckpoint> {
Ok(EntitiesCheckpoint {
processed: provider.count_entries::<tables::HashedAccounts>()? as u64,
total: provider.count_entries::<tables::PlainAccountState>()? as u64,
})
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::{
stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, TestRunnerError,
UnwindStageTestRunner,
};
use alloy_primitives::U256;
use assert_matches::assert_matches;
use reth_primitives_traits::Account;
use reth_provider::providers::StaticFileWriter;
use reth_stages_api::StageUnitCheckpoint;
use test_utils::*;
stage_test_suite_ext!(AccountHashingTestRunner, account_hashing);
#[tokio::test]
async fn execute_clean_account_hashing() {
let (previous_stage, stage_progress) = (20, 10);
// Set up the runner
let mut runner = AccountHashingTestRunner::default();
runner.set_clean_threshold(1);
let input = ExecInput {
target: Some(previous_stage),
checkpoint: Some(StageCheckpoint::new(stage_progress)),
};
runner.seed_execution(input).expect("failed to seed execution");
let rx = runner.execute(input);
let result = rx.await.unwrap();
assert_matches!(
result,
Ok(ExecOutput {
checkpoint: StageCheckpoint {
block_number,
stage_checkpoint: Some(StageUnitCheckpoint::Account(AccountHashingCheckpoint {
progress: EntitiesCheckpoint {
processed,
total,
},
..
})),
},
done: true,
}) if block_number == previous_stage &&
processed == total &&
total == runner.db.table::<tables::PlainAccountState>().unwrap().len() as u64
);
// Validate the stage execution
assert!(runner.validate_execution(input, result.ok()).is_ok(), "execution validation");
}
mod test_utils {
use super::*;
use crate::test_utils::TestStageDB;
use alloy_primitives::Address;
use reth_provider::DatabaseProviderFactory;
pub(crate) struct AccountHashingTestRunner {
pub(crate) db: TestStageDB,
commit_threshold: u64,
clean_threshold: u64,
etl_config: EtlConfig,
}
impl AccountHashingTestRunner {
pub(crate) fn set_clean_threshold(&mut self, threshold: u64) {
self.clean_threshold = threshold;
}
#[expect(dead_code)]
pub(crate) fn set_commit_threshold(&mut self, threshold: u64) {
self.commit_threshold = threshold;
}
/// Iterates over `PlainAccount` table and checks that the accounts match the ones
/// in the `HashedAccounts` table
pub(crate) fn check_hashed_accounts(&self) -> Result<(), TestRunnerError> {
self.db.query(|tx| {
let mut acc_cursor = tx.cursor_read::<tables::PlainAccountState>()?;
let mut hashed_acc_cursor = tx.cursor_read::<tables::HashedAccounts>()?;
while let Some((address, account)) = acc_cursor.next()? {
let hashed_addr = keccak256(address);
if let Some((_, acc)) = hashed_acc_cursor.seek_exact(hashed_addr)? {
assert_eq!(acc, account)
}
}
Ok(())
})?;
Ok(())
}
/// Same as `check_hashed_accounts`, only that checks with the old account state,
/// namely, the same account with nonce - 1 and balance - 1.
pub(crate) fn check_old_hashed_accounts(&self) -> Result<(), TestRunnerError> {
self.db.query(|tx| {
let mut acc_cursor = tx.cursor_read::<tables::PlainAccountState>()?;
let mut hashed_acc_cursor = tx.cursor_read::<tables::HashedAccounts>()?;
while let Some((address, account)) = acc_cursor.next()? {
let Account { nonce, balance, .. } = account;
let old_acc = Account {
nonce: nonce - 1,
balance: balance - U256::from(1),
bytecode_hash: None,
};
let hashed_addr = keccak256(address);
if let Some((_, acc)) = hashed_acc_cursor.seek_exact(hashed_addr)? {
assert_eq!(acc, old_acc)
}
}
Ok(())
})?;
Ok(())
}
}
impl Default for AccountHashingTestRunner {
fn default() -> Self {
Self {
db: TestStageDB::default(),
commit_threshold: 1000,
clean_threshold: 1000,
etl_config: EtlConfig::default(),
}
}
}
impl StageTestRunner for AccountHashingTestRunner {
type S = AccountHashingStage;
fn db(&self) -> &TestStageDB {
&self.db
}
fn stage(&self) -> Self::S {
Self::S {
commit_threshold: self.commit_threshold,
clean_threshold: self.clean_threshold,
etl_config: self.etl_config.clone(),
}
}
}
impl ExecuteStageTestRunner for AccountHashingTestRunner {
type Seed = Vec<(Address, Account)>;
fn seed_execution(&mut self, input: ExecInput) -> Result<Self::Seed, TestRunnerError> {
let provider = self.db.factory.database_provider_rw()?;
let res = Ok(AccountHashingStage::seed(
&provider,
SeedOpts { blocks: 1..=input.target(), accounts: 10, txs: 0..3 },
)
.unwrap());
provider.commit().expect("failed to commit");
res
}
fn validate_execution(
&self,
input: ExecInput,
output: Option<ExecOutput>,
) -> Result<(), TestRunnerError> {
if let Some(output) = output {
let start_block = input.next_block();
let end_block = output.checkpoint.block_number;
if start_block > end_block {
return Ok(())
}
}
self.check_hashed_accounts()
}
}
impl UnwindStageTestRunner for AccountHashingTestRunner {
fn validate_unwind(&self, _input: UnwindInput) -> Result<(), TestRunnerError> {
self.check_old_hashed_accounts()
}
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/stages/src/stages/utils.rs | crates/stages/stages/src/stages/utils.rs | //! Utils for `stages`.
use alloy_primitives::{BlockNumber, TxNumber};
use reth_config::config::EtlConfig;
use reth_db_api::{
cursor::{DbCursorRO, DbCursorRW},
models::sharded_key::NUM_OF_INDICES_IN_SHARD,
table::{Decompress, Table},
transaction::{DbTx, DbTxMut},
BlockNumberList, DatabaseError,
};
use reth_etl::Collector;
use reth_provider::{
providers::StaticFileProvider, BlockReader, DBProvider, ProviderError,
StaticFileProviderFactory,
};
use reth_stages_api::StageError;
use reth_static_file_types::StaticFileSegment;
use std::{collections::HashMap, hash::Hash, ops::RangeBounds};
use tracing::info;
/// Number of blocks before pushing indices from cache to [`Collector`]
const DEFAULT_CACHE_THRESHOLD: u64 = 100_000;
/// Collects all history (`H`) indices for a range of changesets (`CS`) and stores them in a
/// [`Collector`].
///
/// ## Process
/// The function utilizes a `HashMap` cache with a structure of `PartialKey` (`P`) (Address or
/// Address.StorageKey) to `BlockNumberList`. When the cache exceeds its capacity, its contents are
/// moved to a [`Collector`]. Here, each entry's key is a concatenation of `PartialKey` and the
/// highest block number in its list.
///
/// ## Example
/// 1. Initial Cache State: `{ Address1: [1,2,3], ... }`
/// 2. Cache is flushed to the `Collector`.
/// 3. Updated Cache State: `{ Address1: [100,300], ... }`
/// 4. Cache is flushed again.
///
/// As a result, the `Collector` will contain entries such as `(Address1.3, [1,2,3])` and
/// `(Address1.300, [100,300])`. The entries may be stored across one or more files.
pub(crate) fn collect_history_indices<Provider, CS, H, P>(
provider: &Provider,
range: impl RangeBounds<CS::Key>,
sharded_key_factory: impl Fn(P, BlockNumber) -> H::Key,
partial_key_factory: impl Fn((CS::Key, CS::Value)) -> (u64, P),
etl_config: &EtlConfig,
) -> Result<Collector<H::Key, H::Value>, StageError>
where
Provider: DBProvider,
CS: Table,
H: Table<Value = BlockNumberList>,
P: Copy + Eq + Hash,
{
let mut changeset_cursor = provider.tx_ref().cursor_read::<CS>()?;
let mut collector = Collector::new(etl_config.file_size, etl_config.dir.clone());
let mut cache: HashMap<P, Vec<u64>> = HashMap::default();
let mut collect = |cache: &HashMap<P, Vec<u64>>| {
for (key, indices) in cache {
let last = indices.last().expect("qed");
collector.insert(
sharded_key_factory(*key, *last),
BlockNumberList::new_pre_sorted(indices.iter().copied()),
)?;
}
Ok::<(), StageError>(())
};
// observability
let total_changesets = provider.tx_ref().entries::<CS>()?;
let interval = (total_changesets / 1000).max(1);
let mut flush_counter = 0;
let mut current_block_number = u64::MAX;
for (idx, entry) in changeset_cursor.walk_range(range)?.enumerate() {
let (block_number, key) = partial_key_factory(entry?);
cache.entry(key).or_default().push(block_number);
if idx > 0 && idx.is_multiple_of(interval) && total_changesets > 1000 {
info!(target: "sync::stages::index_history", progress = %format!("{:.4}%", (idx as f64 / total_changesets as f64) * 100.0), "Collecting indices");
}
// Make sure we only flush the cache every DEFAULT_CACHE_THRESHOLD blocks.
if current_block_number != block_number {
current_block_number = block_number;
flush_counter += 1;
if flush_counter > DEFAULT_CACHE_THRESHOLD {
collect(&cache)?;
cache.clear();
flush_counter = 0;
}
}
}
collect(&cache)?;
Ok(collector)
}
/// Given a [`Collector`] created by [`collect_history_indices`] it iterates all entries, loading
/// the indices into the database in shards.
///
/// ## Process
/// Iterates over elements, grouping indices by their partial keys (e.g., `Address` or
/// `Address.StorageKey`). It flushes indices to disk when reaching a shard's max length
/// (`NUM_OF_INDICES_IN_SHARD`) or when the partial key changes, ensuring the last previous partial
/// key shard is stored.
pub(crate) fn load_history_indices<Provider, H, P>(
provider: &Provider,
mut collector: Collector<H::Key, H::Value>,
append_only: bool,
sharded_key_factory: impl Clone + Fn(P, u64) -> <H as Table>::Key,
decode_key: impl Fn(Vec<u8>) -> Result<<H as Table>::Key, DatabaseError>,
get_partial: impl Fn(<H as Table>::Key) -> P,
) -> Result<(), StageError>
where
Provider: DBProvider<Tx: DbTxMut>,
H: Table<Value = BlockNumberList>,
P: Copy + Default + Eq,
{
let mut write_cursor = provider.tx_ref().cursor_write::<H>()?;
let mut current_partial = P::default();
let mut current_list = Vec::<u64>::new();
// observability
let total_entries = collector.len();
let interval = (total_entries / 10).max(1);
for (index, element) in collector.iter()?.enumerate() {
let (k, v) = element?;
let sharded_key = decode_key(k)?;
let new_list = BlockNumberList::decompress_owned(v)?;
if index > 0 && index.is_multiple_of(interval) && total_entries > 10 {
info!(target: "sync::stages::index_history", progress = %format!("{:.2}%", (index as f64 / total_entries as f64) * 100.0), "Writing indices");
}
// AccountsHistory: `Address`.
// StorageHistory: `Address.StorageKey`.
let partial_key = get_partial(sharded_key);
if current_partial != partial_key {
// We have reached the end of this subset of keys so
// we need to flush its last indice shard.
load_indices(
&mut write_cursor,
current_partial,
&mut current_list,
&sharded_key_factory,
append_only,
LoadMode::Flush,
)?;
current_partial = partial_key;
current_list.clear();
// If it's not the first sync, there might an existing shard already, so we need to
// merge it with the one coming from the collector
if !append_only {
if let Some((_, last_database_shard)) =
write_cursor.seek_exact(sharded_key_factory(current_partial, u64::MAX))?
{
current_list.extend(last_database_shard.iter());
}
}
}
current_list.extend(new_list.iter());
load_indices(
&mut write_cursor,
current_partial,
&mut current_list,
&sharded_key_factory,
append_only,
LoadMode::KeepLast,
)?;
}
// There will be one remaining shard that needs to be flushed to DB.
load_indices(
&mut write_cursor,
current_partial,
&mut current_list,
&sharded_key_factory,
append_only,
LoadMode::Flush,
)?;
Ok(())
}
/// Shard and insert the indices list according to [`LoadMode`] and its length.
pub(crate) fn load_indices<H, C, P>(
cursor: &mut C,
partial_key: P,
list: &mut Vec<BlockNumber>,
sharded_key_factory: &impl Fn(P, BlockNumber) -> <H as Table>::Key,
append_only: bool,
mode: LoadMode,
) -> Result<(), StageError>
where
C: DbCursorRO<H> + DbCursorRW<H>,
H: Table<Value = BlockNumberList>,
P: Copy,
{
if list.len() > NUM_OF_INDICES_IN_SHARD || mode.is_flush() {
let chunks = list
.chunks(NUM_OF_INDICES_IN_SHARD)
.map(|chunks| chunks.to_vec())
.collect::<Vec<Vec<u64>>>();
let mut iter = chunks.into_iter().peekable();
while let Some(chunk) = iter.next() {
let mut highest = *chunk.last().expect("at least one index");
if !mode.is_flush() && iter.peek().is_none() {
*list = chunk;
} else {
if iter.peek().is_none() {
highest = u64::MAX;
}
let key = sharded_key_factory(partial_key, highest);
let value = BlockNumberList::new_pre_sorted(chunk);
if append_only {
cursor.append(key, &value)?;
} else {
cursor.upsert(key, &value)?;
}
}
}
}
Ok(())
}
/// Mode on how to load index shards into the database.
pub(crate) enum LoadMode {
/// Keep the last shard in memory and don't flush it to the database.
KeepLast,
/// Flush all shards into the database.
Flush,
}
impl LoadMode {
const fn is_flush(&self) -> bool {
matches!(self, Self::Flush)
}
}
/// Called when database is ahead of static files. Attempts to find the first block we are missing
/// transactions for.
pub(crate) fn missing_static_data_error<Provider>(
last_tx_num: TxNumber,
static_file_provider: &StaticFileProvider<Provider::Primitives>,
provider: &Provider,
segment: StaticFileSegment,
) -> Result<StageError, ProviderError>
where
Provider: BlockReader + StaticFileProviderFactory,
{
let mut last_block =
static_file_provider.get_highest_static_file_block(segment).unwrap_or_default();
// To be extra safe, we make sure that the last tx num matches the last block from its indices.
// If not, get it.
loop {
if let Some(indices) = provider.block_body_indices(last_block)? {
if indices.last_tx_num() <= last_tx_num {
break
}
}
if last_block == 0 {
break
}
last_block -= 1;
}
let missing_block = Box::new(provider.sealed_header(last_block + 1)?.unwrap_or_default());
Ok(StageError::MissingStaticFileData {
block: Box::new(missing_block.block_with_parent()),
segment,
})
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/stages/src/stages/bodies.rs | crates/stages/stages/src/stages/bodies.rs | use super::missing_static_data_error;
use futures_util::TryStreamExt;
use reth_db_api::{
cursor::DbCursorRO,
tables,
transaction::{DbTx, DbTxMut},
};
use reth_network_p2p::bodies::{downloader::BodyDownloader, response::BlockResponse};
use reth_provider::{
providers::StaticFileWriter, BlockReader, BlockWriter, DBProvider, ProviderError,
StaticFileProviderFactory, StatsReader, StorageLocation,
};
use reth_stages_api::{
EntitiesCheckpoint, ExecInput, ExecOutput, Stage, StageCheckpoint, StageError, StageId,
UnwindInput, UnwindOutput,
};
use reth_static_file_types::StaticFileSegment;
use reth_storage_errors::provider::ProviderResult;
use std::{
cmp::Ordering,
task::{ready, Context, Poll},
};
use tracing::*;
/// The body stage downloads block bodies.
///
/// The body stage downloads block bodies for all block headers stored locally in storage.
///
/// # Empty blocks
///
/// Blocks with an ommers hash corresponding to no ommers *and* a transaction root corresponding to
/// no transactions will not have a block body downloaded for them, since it would be meaningless to
/// do so.
///
/// This also means that if there is no body for the block in storage (assuming the
/// block number <= the synced block of this stage), then the block can be considered empty.
///
/// # Tables
///
/// The bodies are processed and data is inserted into these tables:
///
/// - [`BlockOmmers`][reth_db_api::tables::BlockOmmers]
/// - [`BlockBodies`][reth_db_api::tables::BlockBodyIndices]
/// - [`Transactions`][reth_db_api::tables::Transactions]
/// - [`TransactionBlocks`][reth_db_api::tables::TransactionBlocks]
///
/// # Genesis
///
/// This stage expects that the genesis has been inserted into the appropriate tables:
///
/// - The header tables (see [`HeaderStage`][crate::stages::HeaderStage])
/// - The [`BlockOmmers`][reth_db_api::tables::BlockOmmers] table
/// - The [`BlockBodies`][reth_db_api::tables::BlockBodyIndices] table
/// - The [`Transactions`][reth_db_api::tables::Transactions] table
#[derive(Debug)]
pub struct BodyStage<D: BodyDownloader> {
/// The body downloader.
downloader: D,
/// Block response buffer.
buffer: Option<Vec<BlockResponse<D::Block>>>,
}
impl<D: BodyDownloader> BodyStage<D> {
/// Create new bodies stage from downloader.
pub const fn new(downloader: D) -> Self {
Self { downloader, buffer: None }
}
}
/// Ensures that static files and database are in sync.
pub(crate) fn ensure_consistency<Provider>(
provider: &Provider,
unwind_block: Option<u64>,
) -> Result<(), StageError>
where
Provider: DBProvider<Tx: DbTxMut> + BlockReader + StaticFileProviderFactory,
{
// Get id for the next tx_num of zero if there are no transactions.
let next_tx_num = provider
.tx_ref()
.cursor_read::<tables::TransactionBlocks>()?
.last()?
.map(|(id, _)| id + 1)
.unwrap_or_default();
let static_file_provider = provider.static_file_provider();
// Make sure Transactions static file is at the same height. If it's further, this
// input execution was interrupted previously and we need to unwind the static file.
let next_static_file_tx_num = static_file_provider
.get_highest_static_file_tx(StaticFileSegment::Transactions)
.map(|id| id + 1)
.unwrap_or_default();
match next_static_file_tx_num.cmp(&next_tx_num) {
// If static files are ahead, we are currently unwinding the stage or we didn't reach
// the database commit in a previous stage run. So, our only solution is to unwind the
// static files and proceed from the database expected height.
Ordering::Greater => {
let highest_db_block = provider.tx_ref().entries::<tables::BlockBodyIndices>()? as u64;
let mut static_file_producer =
static_file_provider.latest_writer(StaticFileSegment::Transactions)?;
static_file_producer
.prune_transactions(next_static_file_tx_num - next_tx_num, highest_db_block)?;
// Since this is a database <-> static file inconsistency, we commit the change
// straight away.
static_file_producer.commit()?;
}
// If static files are behind, then there was some corruption or loss of files. This
// error will trigger an unwind, that will bring the database to the same height as the
// static files.
Ordering::Less => {
// If we are already in the process of unwind, this might be fine because we will
// fix the inconsistency right away.
if let Some(unwind_to) = unwind_block {
let next_tx_num_after_unwind = provider
.block_body_indices(unwind_to)?
.map(|b| b.next_tx_num())
.ok_or(ProviderError::BlockBodyIndicesNotFound(unwind_to))?;
// This means we need a deeper unwind.
if next_tx_num_after_unwind > next_static_file_tx_num {
return Err(missing_static_data_error(
next_static_file_tx_num.saturating_sub(1),
&static_file_provider,
provider,
StaticFileSegment::Transactions,
)?)
}
} else {
return Err(missing_static_data_error(
next_static_file_tx_num.saturating_sub(1),
&static_file_provider,
provider,
StaticFileSegment::Transactions,
)?)
}
}
Ordering::Equal => {}
}
Ok(())
}
impl<Provider, D> Stage<Provider> for BodyStage<D>
where
Provider: DBProvider<Tx: DbTxMut>
+ StaticFileProviderFactory
+ StatsReader
+ BlockReader
+ BlockWriter<Block = D::Block>,
D: BodyDownloader,
{
/// Return the id of the stage
fn id(&self) -> StageId {
StageId::Bodies
}
fn poll_execute_ready(
&mut self,
cx: &mut Context<'_>,
input: ExecInput,
) -> Poll<Result<(), StageError>> {
if input.target_reached() || self.buffer.is_some() {
return Poll::Ready(Ok(()))
}
// Update the header range on the downloader
self.downloader.set_download_range(input.next_block_range())?;
// Poll next downloader item.
let maybe_next_result = ready!(self.downloader.try_poll_next_unpin(cx));
// Task downloader can return `None` only if the response relaying channel was closed. This
// is a fatal error to prevent the pipeline from running forever.
let response = match maybe_next_result {
Some(Ok(downloaded)) => {
self.buffer = Some(downloaded);
Ok(())
}
Some(Err(err)) => Err(err.into()),
None => Err(StageError::ChannelClosed),
};
Poll::Ready(response)
}
/// Download block bodies from the last checkpoint for this stage up until the latest synced
/// header, limited by the stage's batch size.
fn execute(&mut self, provider: &Provider, input: ExecInput) -> Result<ExecOutput, StageError> {
if input.target_reached() {
return Ok(ExecOutput::done(input.checkpoint()))
}
let (from_block, to_block) = input.next_block_range().into_inner();
ensure_consistency(provider, None)?;
debug!(target: "sync::stages::bodies", stage_progress = from_block, target = to_block, "Commencing sync");
let buffer = self.buffer.take().ok_or(StageError::MissingDownloadBuffer)?;
trace!(target: "sync::stages::bodies", bodies_len = buffer.len(), "Writing blocks");
let highest_block = buffer.last().map(|r| r.block_number()).unwrap_or(from_block);
// Write bodies to database.
provider.append_block_bodies(
buffer
.into_iter()
.map(|response| (response.block_number(), response.into_body()))
.collect(),
// We are writing transactions directly to static files.
StorageLocation::StaticFiles,
)?;
// The stage is "done" if:
// - We got fewer blocks than our target
// - We reached our target and the target was not limited by the batch size of the stage
let done = highest_block == to_block;
Ok(ExecOutput {
checkpoint: StageCheckpoint::new(highest_block)
.with_entities_stage_checkpoint(stage_checkpoint(provider)?),
done,
})
}
/// Unwind the stage.
fn unwind(
&mut self,
provider: &Provider,
input: UnwindInput,
) -> Result<UnwindOutput, StageError> {
self.buffer.take();
ensure_consistency(provider, Some(input.unwind_to))?;
provider.remove_bodies_above(input.unwind_to, StorageLocation::Both)?;
Ok(UnwindOutput {
checkpoint: StageCheckpoint::new(input.unwind_to)
.with_entities_stage_checkpoint(stage_checkpoint(provider)?),
})
}
}
// TODO(alexey): ideally, we want to measure Bodies stage progress in bytes, but it's hard to know
// beforehand how many bytes we need to download. So the good solution would be to measure the
// progress in gas as a proxy to size. Execution stage uses a similar approach.
fn stage_checkpoint<Provider>(provider: &Provider) -> ProviderResult<EntitiesCheckpoint>
where
Provider: StatsReader + StaticFileProviderFactory,
{
Ok(EntitiesCheckpoint {
processed: provider.count_entries::<tables::BlockBodyIndices>()? as u64,
// Count only static files entries. If we count the database entries too, we may have
// duplicates. We're sure that the static files have all entries that database has,
// because we run the `StaticFileProducer` before starting the pipeline.
total: provider.static_file_provider().count_entries::<tables::Headers>()? as u64,
})
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::{
stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, UnwindStageTestRunner,
};
use assert_matches::assert_matches;
use reth_provider::StaticFileProviderFactory;
use reth_stages_api::StageUnitCheckpoint;
use test_utils::*;
stage_test_suite_ext!(BodyTestRunner, body);
/// Checks that the stage downloads at most `batch_size` blocks.
#[tokio::test]
async fn partial_body_download() {
let (stage_progress, previous_stage) = (1, 200);
// Set up test runner
let mut runner = BodyTestRunner::default();
let input = ExecInput {
target: Some(previous_stage),
checkpoint: Some(StageCheckpoint::new(stage_progress)),
};
runner.seed_execution(input).expect("failed to seed execution");
// Set the batch size (max we sync per stage execution) to less than the number of blocks
// the previous stage synced (10 vs 20)
let batch_size = 10;
runner.set_batch_size(batch_size);
// Run the stage
let rx = runner.execute(input);
// Check that we only synced around `batch_size` blocks even though the number of blocks
// synced by the previous stage is higher
let output = rx.await.unwrap();
runner.db().factory.static_file_provider().commit().unwrap();
assert_matches!(
output,
Ok(ExecOutput { checkpoint: StageCheckpoint {
block_number,
stage_checkpoint: Some(StageUnitCheckpoint::Entities(EntitiesCheckpoint {
processed, // 1 seeded block body + batch size
total // seeded headers
}))
}, done: false }) if block_number < 200 &&
processed == batch_size + 1 && total == previous_stage + 1
);
assert!(runner.validate_execution(input, output.ok()).is_ok(), "execution validation");
}
/// Same as [`partial_body_download`] except the `batch_size` is not hit.
#[tokio::test]
async fn full_body_download() {
let (stage_progress, previous_stage) = (1, 20);
// Set up test runner
let mut runner = BodyTestRunner::default();
let input = ExecInput {
target: Some(previous_stage),
checkpoint: Some(StageCheckpoint::new(stage_progress)),
};
runner.seed_execution(input).expect("failed to seed execution");
// Set the batch size to more than what the previous stage synced (40 vs 20)
runner.set_batch_size(40);
// Run the stage
let rx = runner.execute(input);
// Check that we synced all blocks successfully, even though our `batch_size` allows us to
// sync more (if there were more headers)
let output = rx.await.unwrap();
runner.db().factory.static_file_provider().commit().unwrap();
assert_matches!(
output,
Ok(ExecOutput {
checkpoint: StageCheckpoint {
block_number: 20,
stage_checkpoint: Some(StageUnitCheckpoint::Entities(EntitiesCheckpoint {
processed,
total
}))
},
done: true
}) if processed + 1 == total && total == previous_stage + 1
);
assert!(runner.validate_execution(input, output.ok()).is_ok(), "execution validation");
}
/// Same as [`full_body_download`] except we have made progress before
#[tokio::test]
async fn sync_from_previous_progress() {
let (stage_progress, previous_stage) = (1, 21);
// Set up test runner
let mut runner = BodyTestRunner::default();
let input = ExecInput {
target: Some(previous_stage),
checkpoint: Some(StageCheckpoint::new(stage_progress)),
};
runner.seed_execution(input).expect("failed to seed execution");
let batch_size = 10;
runner.set_batch_size(batch_size);
// Run the stage
let rx = runner.execute(input);
// Check that we synced at least 10 blocks
let first_run = rx.await.unwrap();
runner.db().factory.static_file_provider().commit().unwrap();
assert_matches!(
first_run,
Ok(ExecOutput { checkpoint: StageCheckpoint {
block_number,
stage_checkpoint: Some(StageUnitCheckpoint::Entities(EntitiesCheckpoint {
processed,
total
}))
}, done: false }) if block_number >= 10 &&
processed - 1 == batch_size && total == previous_stage + 1
);
let first_run_checkpoint = first_run.unwrap().checkpoint;
// Execute again on top of the previous run
let input =
ExecInput { target: Some(previous_stage), checkpoint: Some(first_run_checkpoint) };
let rx = runner.execute(input);
// Check that we synced more blocks
let output = rx.await.unwrap();
runner.db().factory.static_file_provider().commit().unwrap();
assert_matches!(
output,
Ok(ExecOutput { checkpoint: StageCheckpoint {
block_number,
stage_checkpoint: Some(StageUnitCheckpoint::Entities(EntitiesCheckpoint {
processed,
total
}))
}, done: true }) if block_number > first_run_checkpoint.block_number &&
processed + 1 == total && total == previous_stage + 1
);
assert_matches!(
runner.validate_execution(input, output.ok()),
Ok(_),
"execution validation"
);
}
/// Checks that the stage unwinds correctly, even if a transaction in a block is missing.
#[tokio::test]
async fn unwind_missing_tx() {
let (stage_progress, previous_stage) = (1, 20);
// Set up test runner
let mut runner = BodyTestRunner::default();
let input = ExecInput {
target: Some(previous_stage),
checkpoint: Some(StageCheckpoint::new(stage_progress)),
};
runner.seed_execution(input).expect("failed to seed execution");
// Set the batch size to more than what the previous stage synced (40 vs 20)
runner.set_batch_size(40);
// Run the stage
let rx = runner.execute(input);
// Check that we synced all blocks successfully, even though our `batch_size` allows us to
// sync more (if there were more headers)
let output = rx.await.unwrap();
runner.db().factory.static_file_provider().commit().unwrap();
assert_matches!(
output,
Ok(ExecOutput { checkpoint: StageCheckpoint {
block_number,
stage_checkpoint: Some(StageUnitCheckpoint::Entities(EntitiesCheckpoint {
processed,
total
}))
}, done: true }) if block_number == previous_stage &&
processed + 1 == total && total == previous_stage + 1
);
let checkpoint = output.unwrap().checkpoint;
runner
.validate_db_blocks(input.checkpoint().block_number, checkpoint.block_number)
.expect("Written block data invalid");
// Delete a transaction
let static_file_provider = runner.db().factory.static_file_provider();
{
let mut static_file_producer =
static_file_provider.latest_writer(StaticFileSegment::Transactions).unwrap();
static_file_producer.prune_transactions(1, checkpoint.block_number).unwrap();
static_file_producer.commit().unwrap();
}
// Unwind all of it
let unwind_to = 1;
let input = UnwindInput { bad_block: None, checkpoint, unwind_to };
let res = runner.unwind(input).await;
assert_matches!(
res,
Ok(UnwindOutput { checkpoint: StageCheckpoint {
block_number: 1,
stage_checkpoint: Some(StageUnitCheckpoint::Entities(EntitiesCheckpoint {
processed: 1,
total
}))
}}) if total == previous_stage + 1
);
assert_matches!(runner.validate_unwind(input), Ok(_), "unwind validation");
}
mod test_utils {
use crate::{
stages::bodies::BodyStage,
test_utils::{
ExecuteStageTestRunner, StageTestRunner, TestRunnerError, TestStageDB,
UnwindStageTestRunner,
},
};
use alloy_consensus::{BlockHeader, Header};
use alloy_primitives::{BlockNumber, TxNumber, B256};
use futures_util::Stream;
use reth_db::{static_file::HeaderWithHashMask, tables};
use reth_db_api::{
cursor::DbCursorRO,
models::{StoredBlockBodyIndices, StoredBlockOmmers},
transaction::{DbTx, DbTxMut},
};
use reth_ethereum_primitives::{Block, BlockBody};
use reth_network_p2p::{
bodies::{
downloader::{BodyDownloader, BodyDownloaderResult},
response::BlockResponse,
},
error::DownloadResult,
};
use reth_primitives_traits::{SealedBlock, SealedHeader};
use reth_provider::{
providers::StaticFileWriter, test_utils::MockNodeTypesWithDB, HeaderProvider,
ProviderFactory, StaticFileProviderFactory, TransactionsProvider,
};
use reth_stages_api::{ExecInput, ExecOutput, UnwindInput};
use reth_static_file_types::StaticFileSegment;
use reth_testing_utils::generators::{
self, random_block_range, random_signed_tx, BlockRangeParams,
};
use std::{
collections::{HashMap, VecDeque},
ops::RangeInclusive,
pin::Pin,
task::{Context, Poll},
};
/// The block hash of the genesis block.
pub(crate) const GENESIS_HASH: B256 = B256::ZERO;
/// A helper to create a collection of block bodies keyed by their hash.
pub(crate) fn body_by_hash(block: &SealedBlock<Block>) -> (B256, BlockBody) {
(block.hash(), block.body().clone())
}
/// A helper struct for running the [`BodyStage`].
pub(crate) struct BodyTestRunner {
responses: HashMap<B256, BlockBody>,
db: TestStageDB,
batch_size: u64,
}
impl Default for BodyTestRunner {
fn default() -> Self {
Self { responses: HashMap::default(), db: TestStageDB::default(), batch_size: 1000 }
}
}
impl BodyTestRunner {
pub(crate) fn set_batch_size(&mut self, batch_size: u64) {
self.batch_size = batch_size;
}
pub(crate) fn set_responses(&mut self, responses: HashMap<B256, BlockBody>) {
self.responses = responses;
}
}
impl StageTestRunner for BodyTestRunner {
type S = BodyStage<TestBodyDownloader>;
fn db(&self) -> &TestStageDB {
&self.db
}
fn stage(&self) -> Self::S {
BodyStage::new(TestBodyDownloader::new(
self.db.factory.clone(),
self.responses.clone(),
self.batch_size,
))
}
}
impl ExecuteStageTestRunner for BodyTestRunner {
type Seed = Vec<SealedBlock<Block>>;
fn seed_execution(&mut self, input: ExecInput) -> Result<Self::Seed, TestRunnerError> {
let start = input.checkpoint().block_number;
let end = input.target();
let static_file_provider = self.db.factory.static_file_provider();
let mut rng = generators::rng();
// Static files do not support gaps in headers, so we need to generate 0 to end
let blocks = random_block_range(
&mut rng,
0..=end,
BlockRangeParams {
parent: Some(GENESIS_HASH),
tx_count: 0..2,
..Default::default()
},
);
self.db.insert_headers_with_td(blocks.iter().map(|block| block.sealed_header()))?;
if let Some(progress) = blocks.get(start as usize) {
// Insert last progress data
{
let tx = self.db.factory.provider_rw()?.into_tx();
let mut static_file_producer = static_file_provider
.get_writer(start, StaticFileSegment::Transactions)?;
let body = StoredBlockBodyIndices {
first_tx_num: 0,
tx_count: progress.transaction_count() as u64,
};
static_file_producer.set_block_range(0..=progress.number);
body.tx_num_range().try_for_each(|tx_num| {
let transaction = random_signed_tx(&mut rng);
static_file_producer.append_transaction(tx_num, &transaction).map(drop)
})?;
if body.tx_count != 0 {
tx.put::<tables::TransactionBlocks>(
body.last_tx_num(),
progress.number,
)?;
}
tx.put::<tables::BlockBodyIndices>(progress.number, body)?;
if !progress.ommers_hash_is_empty() {
tx.put::<tables::BlockOmmers>(
progress.number,
StoredBlockOmmers { ommers: progress.body().ommers.clone() },
)?;
}
static_file_producer.commit()?;
tx.commit()?;
}
}
self.set_responses(blocks.iter().map(body_by_hash).collect());
Ok(blocks)
}
fn validate_execution(
&self,
input: ExecInput,
output: Option<ExecOutput>,
) -> Result<(), TestRunnerError> {
let highest_block = match output.as_ref() {
Some(output) => output.checkpoint,
None => input.checkpoint(),
}
.block_number;
self.validate_db_blocks(highest_block, highest_block)
}
}
impl UnwindStageTestRunner for BodyTestRunner {
fn validate_unwind(&self, input: UnwindInput) -> Result<(), TestRunnerError> {
self.db.ensure_no_entry_above::<tables::BlockBodyIndices, _>(
input.unwind_to,
|key| key,
)?;
self.db
.ensure_no_entry_above::<tables::BlockOmmers, _>(input.unwind_to, |key| key)?;
if let Some(last_tx_id) = self.get_last_tx_id()? {
self.db
.ensure_no_entry_above::<tables::Transactions, _>(last_tx_id, |key| key)?;
self.db.ensure_no_entry_above::<tables::TransactionBlocks, _>(
last_tx_id,
|key| key,
)?;
}
Ok(())
}
}
impl BodyTestRunner {
/// Get the last available tx id if any
pub(crate) fn get_last_tx_id(&self) -> Result<Option<TxNumber>, TestRunnerError> {
let last_body = self.db.query(|tx| {
let v = tx.cursor_read::<tables::BlockBodyIndices>()?.last()?;
Ok(v)
})?;
Ok(match last_body {
Some((_, body)) if body.tx_count != 0 => {
Some(body.first_tx_num + body.tx_count - 1)
}
_ => None,
})
}
/// Validate that the inserted block data is valid
pub(crate) fn validate_db_blocks(
&self,
prev_progress: BlockNumber,
highest_block: BlockNumber,
) -> Result<(), TestRunnerError> {
let static_file_provider = self.db.factory.static_file_provider();
self.db.query(|tx| {
// Acquire cursors on body related tables
let mut bodies_cursor = tx.cursor_read::<tables::BlockBodyIndices>()?;
let mut ommers_cursor = tx.cursor_read::<tables::BlockOmmers>()?;
let mut tx_block_cursor = tx.cursor_read::<tables::TransactionBlocks>()?;
let first_body_key = match bodies_cursor.first()? {
Some((key, _)) => key,
None => return Ok(()),
};
let mut prev_number: Option<BlockNumber> = None;
for entry in bodies_cursor.walk(Some(first_body_key))? {
let (number, body) = entry?;
// Validate sequentiality only after prev progress,
// since the data before is mocked and can contain gaps
if number > prev_progress {
if let Some(prev_key) = prev_number {
assert_eq!(prev_key + 1, number, "Body entries must be sequential");
}
}
// Validate that the current entry is below or equals to the highest allowed block
assert!(
number <= highest_block,
"We wrote a block body outside of our synced range. Found block with number {number}, highest block according to stage is {highest_block}",
);
let header = static_file_provider.header_by_number(number)?.expect("to be present");
// Validate that ommers exist if any
let stored_ommers = ommers_cursor.seek_exact(number)?;
if header.ommers_hash_is_empty() {
assert!(stored_ommers.is_none(), "Unexpected ommers entry");
} else {
assert!(stored_ommers.is_some(), "Missing ommers entry");
}
let tx_block_id = tx_block_cursor.seek_exact(body.last_tx_num())?.map(|(_,b)| b);
if body.tx_count == 0 {
assert_ne!(tx_block_id,Some(number));
} else {
assert_eq!(tx_block_id, Some(number));
}
for tx_id in body.tx_num_range() {
assert!(static_file_provider.transaction_by_id(tx_id)?.is_some(), "Transaction is missing.");
}
prev_number = Some(number);
}
Ok(())
})?;
Ok(())
}
}
/// A [`BodyDownloader`] that is backed by an internal [`HashMap`] for testing.
#[derive(Debug)]
pub(crate) struct TestBodyDownloader {
provider_factory: ProviderFactory<MockNodeTypesWithDB>,
responses: HashMap<B256, BlockBody>,
headers: VecDeque<SealedHeader>,
batch_size: u64,
}
impl TestBodyDownloader {
pub(crate) fn new(
provider_factory: ProviderFactory<MockNodeTypesWithDB>,
responses: HashMap<B256, BlockBody>,
batch_size: u64,
) -> Self {
Self { provider_factory, responses, headers: VecDeque::default(), batch_size }
}
}
impl BodyDownloader for TestBodyDownloader {
type Block = Block;
fn set_download_range(
&mut self,
range: RangeInclusive<BlockNumber>,
) -> DownloadResult<()> {
let static_file_provider = self.provider_factory.static_file_provider();
for header in static_file_provider.fetch_range_iter(
StaticFileSegment::Headers,
*range.start()..*range.end() + 1,
|cursor, number| cursor.get_two::<HeaderWithHashMask<Header>>(number.into()),
)? {
let (header, hash) = header?;
self.headers.push_back(SealedHeader::new(header, hash));
}
Ok(())
}
}
impl Stream for TestBodyDownloader {
type Item = BodyDownloaderResult<Block>;
fn poll_next(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
if this.headers.is_empty() {
return Poll::Ready(None)
}
let mut response =
Vec::with_capacity(std::cmp::min(this.headers.len(), this.batch_size as usize));
while let Some(header) = this.headers.pop_front() {
if header.is_empty() {
response.push(BlockResponse::Empty(header))
} else {
let body =
this.responses.remove(&header.hash()).expect("requested unknown body");
response.push(BlockResponse::Full(SealedBlock::from_sealed_parts(
header, body,
)));
}
if response.len() as u64 >= this.batch_size {
break
}
}
if !response.is_empty() {
return Poll::Ready(Some(Ok(response)))
}
panic!("requested bodies without setting headers")
}
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/stages/src/stages/mod.rs | crates/stages/stages/src/stages/mod.rs | /// The bodies stage.
mod bodies;
/// The execution stage that generates state diff.
mod execution;
/// The finish stage
mod finish;
/// Account hashing stage.
mod hashing_account;
/// Storage hashing stage.
mod hashing_storage;
/// The headers stage.
mod headers;
/// Index history of account changes
mod index_account_history;
/// Index history of storage changes
mod index_storage_history;
/// Stage for computing state root.
mod merkle;
mod prune;
/// The sender recovery stage.
mod sender_recovery;
/// The transaction lookup stage
mod tx_lookup;
pub use bodies::*;
pub use era::*;
pub use execution::*;
pub use finish::*;
pub use hashing_account::*;
pub use hashing_storage::*;
pub use headers::*;
pub use index_account_history::*;
pub use index_storage_history::*;
pub use merkle::*;
pub use prune::*;
pub use sender_recovery::*;
pub use tx_lookup::*;
mod era;
mod utils;
use utils::*;
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::{StorageKind, TestStageDB};
use alloy_consensus::{SignableTransaction, TxLegacy};
use alloy_primitives::{
address, hex_literal::hex, keccak256, BlockNumber, Signature, B256, U256,
};
use alloy_rlp::Decodable;
use reth_chainspec::ChainSpecBuilder;
use reth_db::mdbx::{cursor::Cursor, RW};
use reth_db_api::{
cursor::{DbCursorRO, DbCursorRW},
table::Table,
tables,
transaction::{DbTx, DbTxMut},
AccountsHistory,
};
use reth_ethereum_consensus::EthBeaconConsensus;
use reth_ethereum_primitives::Block;
use reth_evm_ethereum::EthEvmConfig;
use reth_exex::ExExManagerHandle;
use reth_primitives_traits::{Account, Bytecode, SealedBlock};
use reth_provider::{
providers::{StaticFileProvider, StaticFileWriter},
test_utils::MockNodeTypesWithDB,
AccountExtReader, BlockBodyIndicesProvider, DatabaseProviderFactory, ProviderFactory,
ProviderResult, ReceiptProvider, StageCheckpointWriter, StaticFileProviderFactory,
StorageReader,
};
use reth_prune_types::{PruneMode, PruneModes};
use reth_stages_api::{
ExecInput, ExecutionStageThresholds, PipelineTarget, Stage, StageCheckpoint, StageId,
};
use reth_static_file_types::StaticFileSegment;
use reth_testing_utils::generators::{
self, random_block, random_block_range, random_receipt, BlockRangeParams,
};
use std::{io::Write, sync::Arc};
#[tokio::test]
#[ignore]
async fn test_prune() {
let test_db = TestStageDB::default();
let provider_rw = test_db.factory.provider_rw().unwrap();
let tip = 66;
let input = ExecInput { target: Some(tip), checkpoint: None };
let mut genesis_rlp = hex!("f901faf901f5a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa045571b40ae66ca7480791bbb2887286e4e4c4b1b298b191c889d6959023a32eda056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000808502540be400808000a00000000000000000000000000000000000000000000000000000000000000000880000000000000000c0c0").as_slice();
let genesis = SealedBlock::<Block>::decode(&mut genesis_rlp).unwrap();
let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice();
let block = SealedBlock::<Block>::decode(&mut block_rlp).unwrap();
provider_rw.insert_historical_block(genesis.try_recover().unwrap()).unwrap();
provider_rw.insert_historical_block(block.clone().try_recover().unwrap()).unwrap();
// Fill with bogus blocks to respect PruneMode distance.
let mut head = block.hash();
let mut rng = generators::rng();
for block_number in 2..=tip {
let nblock = random_block(
&mut rng,
block_number,
generators::BlockParams { parent: Some(head), ..Default::default() },
);
head = nblock.hash();
provider_rw.insert_historical_block(nblock.try_recover().unwrap()).unwrap();
}
provider_rw
.static_file_provider()
.latest_writer(StaticFileSegment::Headers)
.unwrap()
.commit()
.unwrap();
provider_rw.commit().unwrap();
// insert pre state
let provider_rw = test_db.factory.provider_rw().unwrap();
let code = hex!("5a465a905090036002900360015500");
let code_hash = keccak256(hex!("5a465a905090036002900360015500"));
provider_rw
.tx_ref()
.put::<tables::PlainAccountState>(
address!("0x1000000000000000000000000000000000000000"),
Account { nonce: 0, balance: U256::ZERO, bytecode_hash: Some(code_hash) },
)
.unwrap();
provider_rw
.tx_ref()
.put::<tables::PlainAccountState>(
address!("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b"),
Account {
nonce: 0,
balance: U256::from(0x3635c9adc5dea00000u128),
bytecode_hash: None,
},
)
.unwrap();
provider_rw
.tx_ref()
.put::<tables::Bytecodes>(code_hash, Bytecode::new_raw(code.to_vec().into()))
.unwrap();
provider_rw.commit().unwrap();
let check_pruning = |factory: ProviderFactory<MockNodeTypesWithDB>,
prune_modes: PruneModes,
expect_num_receipts: usize,
expect_num_acc_changesets: usize,
expect_num_storage_changesets: usize| async move {
let provider = factory.database_provider_rw().unwrap();
// Check execution and create receipts and changesets according to the pruning
// configuration
let mut execution_stage = ExecutionStage::new(
EthEvmConfig::ethereum(Arc::new(
ChainSpecBuilder::mainnet().berlin_activated().build(),
)),
Arc::new(EthBeaconConsensus::new(Arc::new(
ChainSpecBuilder::mainnet().berlin_activated().build(),
))),
ExecutionStageThresholds {
max_blocks: Some(100),
max_changes: None,
max_cumulative_gas: None,
max_duration: None,
},
MERKLE_STAGE_DEFAULT_REBUILD_THRESHOLD,
ExExManagerHandle::empty(),
);
execution_stage.execute(&provider, input).unwrap();
assert_eq!(
provider.receipts_by_block(1.into()).unwrap().unwrap().len(),
expect_num_receipts
);
assert_eq!(
provider.changed_storages_and_blocks_with_range(0..=1000).unwrap().len(),
expect_num_storage_changesets
);
assert_eq!(
provider.changed_accounts_and_blocks_with_range(0..=1000).unwrap().len(),
expect_num_acc_changesets
);
// Check AccountHistory
let mut acc_indexing_stage = IndexAccountHistoryStage {
prune_mode: prune_modes.account_history,
..Default::default()
};
if prune_modes.account_history == Some(PruneMode::Full) {
// Full is not supported
assert!(acc_indexing_stage.execute(&provider, input).is_err());
} else {
acc_indexing_stage.execute(&provider, input).unwrap();
let mut account_history: Cursor<RW, AccountsHistory> =
provider.tx_ref().cursor_read::<tables::AccountsHistory>().unwrap();
assert_eq!(account_history.walk(None).unwrap().count(), expect_num_acc_changesets);
}
// Check StorageHistory
let mut storage_indexing_stage = IndexStorageHistoryStage {
prune_mode: prune_modes.storage_history,
..Default::default()
};
if prune_modes.storage_history == Some(PruneMode::Full) {
// Full is not supported
assert!(storage_indexing_stage.execute(&provider, input).is_err());
} else {
storage_indexing_stage.execute(&provider, input).unwrap();
let mut storage_history =
provider.tx_ref().cursor_read::<tables::StoragesHistory>().unwrap();
assert_eq!(
storage_history.walk(None).unwrap().count(),
expect_num_storage_changesets
);
}
};
// In an unpruned configuration there is 1 receipt, 3 changed accounts and 1 changed
// storage.
let mut prune = PruneModes::none();
check_pruning(test_db.factory.clone(), prune.clone(), 1, 3, 1).await;
prune.receipts = Some(PruneMode::Full);
prune.account_history = Some(PruneMode::Full);
prune.storage_history = Some(PruneMode::Full);
// This will result in error for account_history and storage_history, which is caught.
check_pruning(test_db.factory.clone(), prune.clone(), 0, 0, 0).await;
prune.receipts = Some(PruneMode::Before(1));
prune.account_history = Some(PruneMode::Before(1));
prune.storage_history = Some(PruneMode::Before(1));
check_pruning(test_db.factory.clone(), prune.clone(), 1, 3, 1).await;
prune.receipts = Some(PruneMode::Before(2));
prune.account_history = Some(PruneMode::Before(2));
prune.storage_history = Some(PruneMode::Before(2));
// The one account is the miner
check_pruning(test_db.factory.clone(), prune.clone(), 0, 1, 0).await;
prune.receipts = Some(PruneMode::Distance(66));
prune.account_history = Some(PruneMode::Distance(66));
prune.storage_history = Some(PruneMode::Distance(66));
check_pruning(test_db.factory.clone(), prune.clone(), 1, 3, 1).await;
prune.receipts = Some(PruneMode::Distance(64));
prune.account_history = Some(PruneMode::Distance(64));
prune.storage_history = Some(PruneMode::Distance(64));
// The one account is the miner
check_pruning(test_db.factory.clone(), prune.clone(), 0, 1, 0).await;
}
/// It will generate `num_blocks`, push them to static files and set all stage checkpoints to
/// `num_blocks - 1`.
fn seed_data(num_blocks: usize) -> ProviderResult<TestStageDB> {
let db = TestStageDB::default();
let mut rng = generators::rng();
let genesis_hash = B256::ZERO;
let tip = (num_blocks - 1) as u64;
let blocks = random_block_range(
&mut rng,
0..=tip,
BlockRangeParams { parent: Some(genesis_hash), tx_count: 2..3, ..Default::default() },
);
db.insert_blocks(blocks.iter(), StorageKind::Static)?;
let mut receipts = Vec::with_capacity(blocks.len());
let mut tx_num = 0u64;
for block in &blocks {
let mut block_receipts = Vec::with_capacity(block.transaction_count());
for transaction in &block.body().transactions {
block_receipts.push((tx_num, random_receipt(&mut rng, transaction, Some(0), None)));
tx_num += 1;
}
receipts.push((block.number, block_receipts));
}
db.insert_receipts_by_block(receipts, StorageKind::Static)?;
// simulate pipeline by setting all checkpoints to inserted height.
let provider_rw = db.factory.provider_rw()?;
for stage in StageId::ALL {
provider_rw.save_stage_checkpoint(stage, StageCheckpoint::new(tip))?;
}
provider_rw.commit()?;
Ok(db)
}
/// Simulates losing data to corruption and compare the check consistency result
/// against the expected one.
fn simulate_behind_checkpoint_corruption(
db: &TestStageDB,
prune_count: usize,
segment: StaticFileSegment,
is_full_node: bool,
expected: Option<PipelineTarget>,
) {
// We recreate the static file provider, since consistency heals are done on fetching the
// writer for the first time.
let mut static_file_provider = db.factory.static_file_provider();
static_file_provider = StaticFileProvider::read_write(static_file_provider.path()).unwrap();
// Simulate corruption by removing `prune_count` rows from the data file without updating
// its offset list and configuration.
{
let mut headers_writer = static_file_provider.latest_writer(segment).unwrap();
let reader = headers_writer.inner().jar().open_data_reader().unwrap();
let columns = headers_writer.inner().jar().columns();
let data_file = headers_writer.inner().data_file();
let last_offset = reader.reverse_offset(prune_count * columns).unwrap();
data_file.get_mut().set_len(last_offset).unwrap();
data_file.flush().unwrap();
data_file.get_ref().sync_all().unwrap();
}
// We recreate the static file provider, since consistency heals are done on fetching the
// writer for the first time.
let mut static_file_provider = db.factory.static_file_provider();
static_file_provider = StaticFileProvider::read_write(static_file_provider.path()).unwrap();
assert!(matches!(
static_file_provider
.check_consistency(&db.factory.database_provider_ro().unwrap(), is_full_node,),
Ok(e) if e == expected
));
}
/// Saves a checkpoint with `checkpoint_block_number` and compare the check consistency result
/// against the expected one.
fn save_checkpoint_and_check(
db: &TestStageDB,
stage_id: StageId,
checkpoint_block_number: BlockNumber,
expected: Option<PipelineTarget>,
) {
let provider_rw = db.factory.provider_rw().unwrap();
provider_rw
.save_stage_checkpoint(stage_id, StageCheckpoint::new(checkpoint_block_number))
.unwrap();
provider_rw.commit().unwrap();
assert!(matches!(
db.factory
.static_file_provider()
.check_consistency(&db.factory.database_provider_ro().unwrap(), false,),
Ok(e) if e == expected
));
}
/// Inserts a dummy value at key and compare the check consistency result against the expected
/// one.
fn update_db_and_check<T: Table<Key = u64>>(
db: &TestStageDB,
key: u64,
expected: Option<PipelineTarget>,
) where
<T as Table>::Value: Default,
{
update_db_with_and_check::<T>(db, key, expected, &Default::default());
}
/// Inserts the given value at key and compare the check consistency result against the expected
/// one.
fn update_db_with_and_check<T: Table<Key = u64>>(
db: &TestStageDB,
key: u64,
expected: Option<PipelineTarget>,
value: &T::Value,
) {
let provider_rw = db.factory.provider_rw().unwrap();
let mut cursor = provider_rw.tx_ref().cursor_write::<T>().unwrap();
cursor.insert(key, value).unwrap();
provider_rw.commit().unwrap();
assert!(matches!(
db.factory
.static_file_provider()
.check_consistency(&db.factory.database_provider_ro().unwrap(), false),
Ok(e) if e == expected
));
}
#[test]
fn test_consistency() {
let db = seed_data(90).unwrap();
let db_provider = db.factory.database_provider_ro().unwrap();
assert!(matches!(
db.factory.static_file_provider().check_consistency(&db_provider, false),
Ok(None)
));
}
#[test]
fn test_consistency_no_commit_prune() {
let db = seed_data(90).unwrap();
let full_node = true;
let archive_node = !full_node;
// Full node does not use receipts, therefore doesn't check for consistency on receipts
// segment
simulate_behind_checkpoint_corruption(&db, 1, StaticFileSegment::Receipts, full_node, None);
// there are 2 to 3 transactions per block. however, if we lose one tx, we need to unwind to
// the previous block.
simulate_behind_checkpoint_corruption(
&db,
1,
StaticFileSegment::Receipts,
archive_node,
Some(PipelineTarget::Unwind(88)),
);
simulate_behind_checkpoint_corruption(
&db,
3,
StaticFileSegment::Headers,
archive_node,
Some(PipelineTarget::Unwind(86)),
);
}
#[test]
fn test_consistency_checkpoints() {
let db = seed_data(90).unwrap();
// When a checkpoint is behind, we delete data from static files.
let block = 87;
save_checkpoint_and_check(&db, StageId::Bodies, block, None);
assert_eq!(
db.factory
.static_file_provider()
.get_highest_static_file_block(StaticFileSegment::Transactions),
Some(block)
);
assert_eq!(
db.factory
.static_file_provider()
.get_highest_static_file_tx(StaticFileSegment::Transactions),
db.factory.block_body_indices(block).unwrap().map(|b| b.last_tx_num())
);
let block = 86;
save_checkpoint_and_check(&db, StageId::Execution, block, None);
assert_eq!(
db.factory
.static_file_provider()
.get_highest_static_file_block(StaticFileSegment::Receipts),
Some(block)
);
assert_eq!(
db.factory
.static_file_provider()
.get_highest_static_file_tx(StaticFileSegment::Receipts),
db.factory.block_body_indices(block).unwrap().map(|b| b.last_tx_num())
);
let block = 80;
save_checkpoint_and_check(&db, StageId::Headers, block, None);
assert_eq!(
db.factory
.static_file_provider()
.get_highest_static_file_block(StaticFileSegment::Headers),
Some(block)
);
// When a checkpoint is ahead, we request a pipeline unwind.
save_checkpoint_and_check(&db, StageId::Headers, 91, Some(PipelineTarget::Unwind(block)));
}
#[test]
fn test_consistency_headers_gap() {
let db = seed_data(90).unwrap();
let current = db
.factory
.static_file_provider()
.get_highest_static_file_block(StaticFileSegment::Headers)
.unwrap();
// Creates a gap of one header: static_file <missing> db
update_db_and_check::<tables::Headers>(&db, current + 2, Some(PipelineTarget::Unwind(89)));
// Fill the gap, and ensure no unwind is necessary.
update_db_and_check::<tables::Headers>(&db, current + 1, None);
}
#[test]
fn test_consistency_tx_gap() {
let db = seed_data(90).unwrap();
let current = db
.factory
.static_file_provider()
.get_highest_static_file_tx(StaticFileSegment::Transactions)
.unwrap();
// Creates a gap of one transaction: static_file <missing> db
update_db_with_and_check::<tables::Transactions>(
&db,
current + 2,
Some(PipelineTarget::Unwind(89)),
&TxLegacy::default().into_signed(Signature::test_signature()).into(),
);
// Fill the gap, and ensure no unwind is necessary.
update_db_with_and_check::<tables::Transactions>(
&db,
current + 1,
None,
&TxLegacy::default().into_signed(Signature::test_signature()).into(),
);
}
#[test]
fn test_consistency_receipt_gap() {
let db = seed_data(90).unwrap();
let current = db
.factory
.static_file_provider()
.get_highest_static_file_tx(StaticFileSegment::Receipts)
.unwrap();
// Creates a gap of one receipt: static_file <missing> db
update_db_and_check::<tables::Receipts>(&db, current + 2, Some(PipelineTarget::Unwind(89)));
// Fill the gap, and ensure no unwind is necessary.
update_db_and_check::<tables::Receipts>(&db, current + 1, None);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/stages/src/stages/hashing_storage.rs | crates/stages/stages/src/stages/hashing_storage.rs | use alloy_primitives::{bytes::BufMut, keccak256, B256};
use itertools::Itertools;
use reth_config::config::{EtlConfig, HashingConfig};
use reth_db_api::{
cursor::{DbCursorRO, DbDupCursorRW},
models::{BlockNumberAddress, CompactU256},
table::Decompress,
tables,
transaction::{DbTx, DbTxMut},
};
use reth_etl::Collector;
use reth_primitives_traits::StorageEntry;
use reth_provider::{DBProvider, HashingWriter, StatsReader, StorageReader};
use reth_stages_api::{
EntitiesCheckpoint, ExecInput, ExecOutput, Stage, StageCheckpoint, StageError, StageId,
StorageHashingCheckpoint, UnwindInput, UnwindOutput,
};
use reth_storage_errors::provider::ProviderResult;
use std::{
fmt::Debug,
sync::mpsc::{self, Receiver},
};
use tracing::*;
/// Maximum number of channels that can exist in memory.
const MAXIMUM_CHANNELS: usize = 10_000;
/// Maximum number of storage entries to hash per rayon worker job.
const WORKER_CHUNK_SIZE: usize = 100;
/// Storage hashing stage hashes plain storage.
/// This is preparation before generating intermediate hashes and calculating Merkle tree root.
#[derive(Debug)]
pub struct StorageHashingStage {
/// The threshold (in number of blocks) for switching between incremental
/// hashing and full storage hashing.
pub clean_threshold: u64,
/// The maximum number of slots to process before committing during unwind.
pub commit_threshold: u64,
/// ETL configuration
pub etl_config: EtlConfig,
}
impl StorageHashingStage {
/// Create new instance of [`StorageHashingStage`].
pub const fn new(config: HashingConfig, etl_config: EtlConfig) -> Self {
Self {
clean_threshold: config.clean_threshold,
commit_threshold: config.commit_threshold,
etl_config,
}
}
}
impl Default for StorageHashingStage {
fn default() -> Self {
Self {
clean_threshold: 500_000,
commit_threshold: 100_000,
etl_config: EtlConfig::default(),
}
}
}
impl<Provider> Stage<Provider> for StorageHashingStage
where
Provider: DBProvider<Tx: DbTxMut> + StorageReader + HashingWriter + StatsReader,
{
/// Return the id of the stage
fn id(&self) -> StageId {
StageId::StorageHashing
}
/// Execute the stage.
fn execute(&mut self, provider: &Provider, input: ExecInput) -> Result<ExecOutput, StageError> {
let tx = provider.tx_ref();
if input.target_reached() {
return Ok(ExecOutput::done(input.checkpoint()))
}
let (from_block, to_block) = input.next_block_range().into_inner();
// if there are more blocks then threshold it is faster to go over Plain state and hash all
// account otherwise take changesets aggregate the sets and apply hashing to
// AccountHashing table. Also, if we start from genesis, we need to hash from scratch, as
// genesis accounts are not in changeset, along with their storages.
if to_block - from_block > self.clean_threshold || from_block == 1 {
// clear table, load all accounts and hash it
tx.clear::<tables::HashedStorages>()?;
let mut storage_cursor = tx.cursor_read::<tables::PlainStorageState>()?;
let mut collector =
Collector::new(self.etl_config.file_size, self.etl_config.dir.clone());
let mut channels = Vec::with_capacity(MAXIMUM_CHANNELS);
for chunk in &storage_cursor.walk(None)?.chunks(WORKER_CHUNK_SIZE) {
// An _unordered_ channel to receive results from a rayon job
let (tx, rx) = mpsc::channel();
channels.push(rx);
let chunk = chunk.collect::<Result<Vec<_>, _>>()?;
// Spawn the hashing task onto the global rayon pool
rayon::spawn(move || {
for (address, slot) in chunk {
let mut addr_key_is_private = Vec::with_capacity(64);
addr_key_is_private.put_slice(keccak256(address).as_slice());
addr_key_is_private.put_slice(keccak256(slot.key).as_slice());
addr_key_is_private.put_u8(slot.value.is_private as u8);
let _ = tx.send((addr_key_is_private, CompactU256::from(slot.value.value)));
}
});
// Flush to ETL when channels length reaches MAXIMUM_CHANNELS
if !channels.is_empty() && channels.len().is_multiple_of(MAXIMUM_CHANNELS) {
collect(&mut channels, &mut collector)?;
}
}
collect(&mut channels, &mut collector)?;
let total_hashes = collector.len();
let interval = (total_hashes / 10).max(1);
let mut cursor = tx.cursor_dup_write::<tables::HashedStorages>()?;
for (index, item) in collector.iter()?.enumerate() {
if index > 0 && index.is_multiple_of(interval) {
info!(
target: "sync::stages::hashing_storage",
progress = %format!("{:.2}%", (index as f64 / total_hashes as f64) * 100.0),
"Inserting hashes"
);
}
let (addr_key_is_private, val) = item?;
cursor.append_dup(
B256::from_slice(&addr_key_is_private[..32]),
StorageEntry {
key: B256::from_slice(&addr_key_is_private[32..64]),
value: alloy_primitives::FlaggedStorage {
value: CompactU256::decompress(&val)?.into(),
is_private: addr_key_is_private[64] != 0,
},
},
)?;
}
} else {
// Aggregate all changesets and make list of storages that have been
// changed.
let lists = provider.changed_storages_with_range(from_block..=to_block)?;
// iterate over plain state and get newest storage value.
// Assumption we are okay with is that plain state represent
// `previous_stage_progress` state.
let storages = provider.plain_state_storages(lists)?;
provider.insert_storage_for_hashing(storages)?;
}
// We finished the hashing stage, no future iterations is expected for the same block range,
// so no checkpoint is needed.
let checkpoint = StageCheckpoint::new(input.target())
.with_storage_hashing_stage_checkpoint(StorageHashingCheckpoint {
progress: stage_checkpoint_progress(provider)?,
..Default::default()
});
Ok(ExecOutput { checkpoint, done: true })
}
/// Unwind the stage.
fn unwind(
&mut self,
provider: &Provider,
input: UnwindInput,
) -> Result<UnwindOutput, StageError> {
let (range, unwind_progress, _) =
input.unwind_block_range_with_threshold(self.commit_threshold);
provider.unwind_storage_hashing_range(BlockNumberAddress::range(range))?;
let mut stage_checkpoint =
input.checkpoint.storage_hashing_stage_checkpoint().unwrap_or_default();
stage_checkpoint.progress = stage_checkpoint_progress(provider)?;
Ok(UnwindOutput {
checkpoint: StageCheckpoint::new(unwind_progress)
.with_storage_hashing_stage_checkpoint(stage_checkpoint),
})
}
}
/// Flushes channels hashes to ETL collector.
fn collect(
channels: &mut Vec<Receiver<(Vec<u8>, CompactU256)>>,
collector: &mut Collector<Vec<u8>, CompactU256>,
) -> Result<(), StageError> {
for channel in channels.iter_mut() {
while let Ok((key, v)) = channel.recv() {
collector.insert(key, v)?;
}
}
info!(target: "sync::stages::hashing_storage", "Hashed {} entries", collector.len());
channels.clear();
Ok(())
}
fn stage_checkpoint_progress(provider: &impl StatsReader) -> ProviderResult<EntitiesCheckpoint> {
Ok(EntitiesCheckpoint {
processed: provider.count_entries::<tables::HashedStorages>()? as u64,
total: provider.count_entries::<tables::PlainStorageState>()? as u64,
})
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::{
stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, TestRunnerError,
TestStageDB, UnwindStageTestRunner,
};
use alloy_primitives::{Address, FlaggedStorage};
use assert_matches::assert_matches;
use rand::Rng;
use reth_db_api::{
cursor::{DbCursorRW, DbDupCursorRO},
models::StoredBlockBodyIndices,
};
use reth_ethereum_primitives::Block;
use reth_primitives_traits::SealedBlock;
use reth_provider::providers::StaticFileWriter;
use reth_testing_utils::generators::{
self, random_block_range, random_contract_account_range, BlockRangeParams,
};
stage_test_suite_ext!(StorageHashingTestRunner, storage_hashing);
/// Execute with low clean threshold so as to hash whole storage
#[tokio::test]
async fn execute_clean_storage_hashing() {
let (previous_stage, stage_progress) = (500, 100);
// Set up the runner
let mut runner = StorageHashingTestRunner::default();
// set low clean threshold so we hash the whole storage
runner.set_clean_threshold(1);
// set low commit threshold so we force each entry to be a tx.commit and make sure we don't
// hang on one key. Seed execution inserts more than one storage entry per address.
runner.set_commit_threshold(1);
let mut input = ExecInput {
target: Some(previous_stage),
checkpoint: Some(StageCheckpoint::new(stage_progress)),
};
runner.seed_execution(input).expect("failed to seed execution");
loop {
if let Ok(result @ ExecOutput { checkpoint, done }) =
runner.execute(input).await.unwrap()
{
if !done {
let previous_checkpoint = input
.checkpoint
.and_then(|checkpoint| checkpoint.storage_hashing_stage_checkpoint())
.unwrap_or_default();
assert_matches!(checkpoint.storage_hashing_stage_checkpoint(), Some(StorageHashingCheckpoint {
progress: EntitiesCheckpoint {
processed,
total,
},
..
}) if processed == previous_checkpoint.progress.processed + 1 &&
total == runner.db.table::<tables::PlainStorageState>().unwrap().len() as u64);
// Continue from checkpoint
input.checkpoint = Some(checkpoint);
continue
}
assert_eq!(checkpoint.block_number, previous_stage);
assert_matches!(checkpoint.storage_hashing_stage_checkpoint(), Some(StorageHashingCheckpoint {
progress: EntitiesCheckpoint {
processed,
total,
},
..
}) if processed == total &&
total == runner.db.table::<tables::PlainStorageState>().unwrap().len() as u64);
// Validate the stage execution
assert!(
runner.validate_execution(input, Some(result)).is_ok(),
"execution validation"
);
break
}
panic!("Failed execution");
}
}
struct StorageHashingTestRunner {
db: TestStageDB,
commit_threshold: u64,
clean_threshold: u64,
etl_config: EtlConfig,
}
impl Default for StorageHashingTestRunner {
fn default() -> Self {
Self {
db: TestStageDB::default(),
commit_threshold: 1000,
clean_threshold: 1000,
etl_config: EtlConfig::default(),
}
}
}
impl StageTestRunner for StorageHashingTestRunner {
type S = StorageHashingStage;
fn db(&self) -> &TestStageDB {
&self.db
}
fn stage(&self) -> Self::S {
Self::S {
commit_threshold: self.commit_threshold,
clean_threshold: self.clean_threshold,
etl_config: self.etl_config.clone(),
}
}
}
impl ExecuteStageTestRunner for StorageHashingTestRunner {
type Seed = Vec<SealedBlock<Block>>;
fn seed_execution(&mut self, input: ExecInput) -> Result<Self::Seed, TestRunnerError> {
let stage_progress = input.next_block();
let end = input.target();
let mut rng = generators::rng();
let n_accounts = 31;
let mut accounts = random_contract_account_range(&mut rng, &mut (0..n_accounts));
let blocks = random_block_range(
&mut rng,
stage_progress..=end,
BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..3, ..Default::default() },
);
self.db.insert_headers(blocks.iter().map(|block| block.sealed_header()))?;
let iter = blocks.iter();
let mut next_tx_num = 0;
let mut first_tx_num = next_tx_num;
for progress in iter {
// Insert last progress data
let block_number = progress.number;
self.db.commit(|tx| {
progress.body().transactions.iter().try_for_each(
|transaction| -> Result<(), reth_db::DatabaseError> {
tx.put::<tables::TransactionHashNumbers>(
*transaction.tx_hash(),
next_tx_num,
)?;
tx.put::<tables::Transactions>(next_tx_num, transaction.clone())?;
let (addr, _) = accounts
.get_mut((rng.random::<u64>() % n_accounts) as usize)
.unwrap();
for _ in 0..2 {
let new_entry = StorageEntry {
key: keccak256([rng.random::<u8>()]),
value: alloy_primitives::FlaggedStorage::public(
rng.random::<u8>() % 30 + 1,
),
};
self.insert_storage_entry(
tx,
(block_number, *addr).into(),
new_entry,
progress.number == stage_progress,
)?;
}
next_tx_num += 1;
Ok(())
},
)?;
// Randomize rewards
let has_reward: bool = rng.random();
if has_reward {
self.insert_storage_entry(
tx,
(block_number, Address::random()).into(),
StorageEntry {
key: keccak256("mining"),
value: alloy_primitives::FlaggedStorage::public(
rng.random::<u32>(),
),
},
progress.number == stage_progress,
)?;
}
let body = StoredBlockBodyIndices {
first_tx_num,
tx_count: progress.transaction_count() as u64,
};
first_tx_num = next_tx_num;
tx.put::<tables::BlockBodyIndices>(progress.number, body)?;
Ok(())
})?;
}
Ok(blocks)
}
fn validate_execution(
&self,
input: ExecInput,
output: Option<ExecOutput>,
) -> Result<(), TestRunnerError> {
if let Some(output) = output {
let start_block = input.checkpoint().block_number + 1;
let end_block = output.checkpoint.block_number;
if start_block > end_block {
return Ok(())
}
}
self.check_hashed_storage()
}
}
impl UnwindStageTestRunner for StorageHashingTestRunner {
fn validate_unwind(&self, input: UnwindInput) -> Result<(), TestRunnerError> {
self.unwind_storage(input)?;
self.check_hashed_storage()
}
}
impl StorageHashingTestRunner {
fn set_clean_threshold(&mut self, threshold: u64) {
self.clean_threshold = threshold;
}
fn set_commit_threshold(&mut self, threshold: u64) {
self.commit_threshold = threshold;
}
fn check_hashed_storage(&self) -> Result<(), TestRunnerError> {
self.db
.query(|tx| {
let mut storage_cursor = tx.cursor_dup_read::<tables::PlainStorageState>()?;
let mut hashed_storage_cursor =
tx.cursor_dup_read::<tables::HashedStorages>()?;
let mut expected = 0;
while let Some((address, entry)) = storage_cursor.next()? {
let key = keccak256(entry.key);
let got =
hashed_storage_cursor.seek_by_key_subkey(keccak256(address), key)?;
assert_eq!(
got,
Some(StorageEntry { key, ..entry }),
"{expected}: {address:?}"
);
expected += 1;
}
let count = tx.cursor_dup_read::<tables::HashedStorages>()?.walk(None)?.count();
assert_eq!(count, expected);
Ok(())
})
.map_err(|e| e.into())
}
fn insert_storage_entry<TX: DbTxMut>(
&self,
tx: &TX,
bn_address: BlockNumberAddress,
entry: StorageEntry,
hash: bool,
) -> Result<(), reth_db::DatabaseError> {
let mut storage_cursor = tx.cursor_dup_write::<tables::PlainStorageState>()?;
let prev_entry =
match storage_cursor.seek_by_key_subkey(bn_address.address(), entry.key)? {
Some(e) if e.key == entry.key => {
tx.delete::<tables::PlainStorageState>(bn_address.address(), Some(e))
.expect("failed to delete entry");
e
}
_ => StorageEntry { key: entry.key, value: FlaggedStorage::ZERO },
};
tx.put::<tables::PlainStorageState>(bn_address.address(), entry)?;
if hash {
let hashed_address = keccak256(bn_address.address());
let hashed_entry = StorageEntry {
key: keccak256(entry.key),
value: entry.value,
..Default::default()
};
if let Some(e) = tx
.cursor_dup_write::<tables::HashedStorages>()?
.seek_by_key_subkey(hashed_address, hashed_entry.key)?
.filter(|e| e.key == hashed_entry.key)
{
tx.delete::<tables::HashedStorages>(hashed_address, Some(e))
.expect("failed to delete entry");
}
tx.put::<tables::HashedStorages>(hashed_address, hashed_entry)?;
}
tx.put::<tables::StorageChangeSets>(bn_address, prev_entry)?;
Ok(())
}
fn unwind_storage(&self, input: UnwindInput) -> Result<(), TestRunnerError> {
tracing::debug!("unwinding storage...");
let target_block = input.unwind_to;
self.db.commit(|tx| {
let mut storage_cursor = tx.cursor_dup_write::<tables::PlainStorageState>()?;
let mut changeset_cursor = tx.cursor_dup_read::<tables::StorageChangeSets>()?;
let mut rev_changeset_walker = changeset_cursor.walk_back(None)?;
while let Some((bn_address, entry)) = rev_changeset_walker.next().transpose()? {
if bn_address.block_number() < target_block {
break
}
if storage_cursor
.seek_by_key_subkey(bn_address.address(), entry.key)?
.filter(|e| e.key == entry.key)
.is_some()
{
storage_cursor.delete_current()?;
}
if !entry.value.is_zero() {
storage_cursor.upsert(bn_address.address(), &entry)?;
}
}
Ok(())
})?;
Ok(())
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/stages/src/stages/execution.rs | crates/stages/stages/src/stages/execution.rs | use crate::stages::MERKLE_STAGE_DEFAULT_INCREMENTAL_THRESHOLD;
use alloy_consensus::BlockHeader;
use alloy_primitives::BlockNumber;
use num_traits::Zero;
use reth_config::config::ExecutionConfig;
use reth_consensus::{ConsensusError, FullConsensus};
use reth_db::{static_file::HeaderMask, tables};
use reth_evm::{execute::Executor, metrics::ExecutorMetrics, ConfigureEvm};
use reth_execution_types::Chain;
use reth_exex::{ExExManagerHandle, ExExNotification, ExExNotificationSource};
use reth_primitives_traits::{format_gas_throughput, BlockBody, NodePrimitives};
use reth_provider::{
providers::{StaticFileProvider, StaticFileWriter},
BlockHashReader, BlockReader, DBProvider, ExecutionOutcome, HeaderProvider,
LatestStateProviderRef, OriginalValuesKnown, ProviderError, StateWriter,
StaticFileProviderFactory, StatsReader, StorageLocation, TransactionVariant,
};
use reth_revm::database::StateProviderDatabase;
use reth_stages_api::{
BlockErrorKind, CheckpointBlockRange, EntitiesCheckpoint, ExecInput, ExecOutput,
ExecutionCheckpoint, ExecutionStageThresholds, Stage, StageCheckpoint, StageError, StageId,
UnwindInput, UnwindOutput,
};
use reth_static_file_types::StaticFileSegment;
use std::{
cmp::Ordering,
ops::RangeInclusive,
sync::Arc,
task::{ready, Context, Poll},
time::{Duration, Instant},
};
use tracing::*;
use super::missing_static_data_error;
/// The execution stage executes all transactions and
/// update history indexes.
///
/// Input tables:
/// - [`tables::CanonicalHeaders`] get next block to execute.
/// - [`tables::Headers`] get for revm environment variables.
/// - [`tables::HeaderTerminalDifficulties`]
/// - [`tables::BlockBodyIndices`] to get tx number
/// - [`tables::Transactions`] to execute
///
/// For state access [`LatestStateProviderRef`] provides us latest state and history state
/// For latest most recent state [`LatestStateProviderRef`] would need (Used for execution Stage):
/// - [`tables::PlainAccountState`]
/// - [`tables::Bytecodes`]
/// - [`tables::PlainStorageState`]
///
/// Tables updated after state finishes execution:
/// - [`tables::PlainAccountState`]
/// - [`tables::PlainStorageState`]
/// - [`tables::Bytecodes`]
/// - [`tables::AccountChangeSets`]
/// - [`tables::StorageChangeSets`]
///
/// For unwinds we are accessing:
/// - [`tables::BlockBodyIndices`] get tx index to know what needs to be unwinded
/// - [`tables::AccountsHistory`] to remove change set and apply old values to
/// - [`tables::PlainAccountState`] [`tables::StoragesHistory`] to remove change set and apply old
/// values to [`tables::PlainStorageState`]
// false positive, we cannot derive it if !DB: Debug.
#[derive(Debug)]
pub struct ExecutionStage<E>
where
E: ConfigureEvm,
{
/// The stage's internal block executor
evm_config: E,
/// The consensus instance for validating blocks.
consensus: Arc<dyn FullConsensus<E::Primitives, Error = ConsensusError>>,
/// The commit thresholds of the execution stage.
thresholds: ExecutionStageThresholds,
/// The highest threshold (in number of blocks) for switching between incremental
/// and full calculations across [`super::MerkleStage`], [`super::AccountHashingStage`] and
/// [`super::StorageHashingStage`]. This is required to figure out if can prune or not
/// changesets on subsequent pipeline runs.
external_clean_threshold: u64,
/// Input for the post execute commit hook.
/// Set after every [`ExecutionStage::execute`] and cleared after
/// [`ExecutionStage::post_execute_commit`].
post_execute_commit_input: Option<Chain<E::Primitives>>,
/// Input for the post unwind commit hook.
/// Set after every [`ExecutionStage::unwind`] and cleared after
/// [`ExecutionStage::post_unwind_commit`].
post_unwind_commit_input: Option<Chain<E::Primitives>>,
/// Handle to communicate with `ExEx` manager.
exex_manager_handle: ExExManagerHandle<E::Primitives>,
/// Executor metrics.
metrics: ExecutorMetrics,
}
impl<E> ExecutionStage<E>
where
E: ConfigureEvm,
{
/// Create new execution stage with specified config.
pub fn new(
evm_config: E,
consensus: Arc<dyn FullConsensus<E::Primitives, Error = ConsensusError>>,
thresholds: ExecutionStageThresholds,
external_clean_threshold: u64,
exex_manager_handle: ExExManagerHandle<E::Primitives>,
) -> Self {
Self {
external_clean_threshold,
evm_config,
consensus,
thresholds,
post_execute_commit_input: None,
post_unwind_commit_input: None,
exex_manager_handle,
metrics: ExecutorMetrics::default(),
}
}
/// Create an execution stage with the provided executor.
///
/// The commit threshold will be set to [`MERKLE_STAGE_DEFAULT_INCREMENTAL_THRESHOLD`].
pub fn new_with_executor(
evm_config: E,
consensus: Arc<dyn FullConsensus<E::Primitives, Error = ConsensusError>>,
) -> Self {
Self::new(
evm_config,
consensus,
ExecutionStageThresholds::default(),
MERKLE_STAGE_DEFAULT_INCREMENTAL_THRESHOLD,
ExExManagerHandle::empty(),
)
}
/// Create new instance of [`ExecutionStage`] from configuration.
pub fn from_config(
evm_config: E,
consensus: Arc<dyn FullConsensus<E::Primitives, Error = ConsensusError>>,
config: ExecutionConfig,
external_clean_threshold: u64,
) -> Self {
Self::new(
evm_config,
consensus,
config.into(),
external_clean_threshold,
ExExManagerHandle::empty(),
)
}
/// Returns whether we can perform pruning of [`tables::AccountChangeSets`] and
/// [`tables::StorageChangeSets`].
///
/// This function verifies whether the [`super::MerkleStage`] or Hashing stages will run from
/// scratch. If at least one stage isn't starting anew, it implies that pruning of
/// changesets cannot occur. This is determined by checking the highest clean threshold
/// (`self.external_clean_threshold`) across the stages.
///
/// Given that `start_block` changes with each checkpoint, it's necessary to inspect
/// [`tables::AccountsTrie`] to ensure that [`super::MerkleStage`] hasn't
/// been previously executed.
fn can_prune_changesets(
&self,
provider: impl StatsReader,
start_block: u64,
max_block: u64,
) -> Result<bool, StageError> {
// We can only prune changesets if we're not executing MerkleStage from scratch (by
// threshold or first-sync)
Ok(max_block - start_block > self.external_clean_threshold ||
provider.count_entries::<tables::AccountsTrie>()?.is_zero())
}
/// Performs consistency check on static files.
///
/// This function compares the highest receipt number recorded in the database with that in the
/// static file to detect any discrepancies due to unexpected shutdowns or database rollbacks.
/// **If the height in the static file is higher**, it rolls back (unwinds) the static file.
/// **Conversely, if the height in the database is lower**, it triggers a rollback in the
/// database (by returning [`StageError`]) until the heights in both the database and static
/// file match.
fn ensure_consistency<Provider>(
&self,
provider: &Provider,
checkpoint: u64,
unwind_to: Option<u64>,
) -> Result<(), StageError>
where
Provider: StaticFileProviderFactory + DBProvider + BlockReader + HeaderProvider,
{
// If there's any receipts pruning configured, receipts are written directly to database and
// inconsistencies are expected.
if provider.prune_modes_ref().has_receipts_pruning() {
return Ok(())
}
// Get next expected receipt number
let next_receipt_num =
provider.block_body_indices(checkpoint)?.map(|b| b.next_tx_num()).unwrap_or(0);
let static_file_provider = provider.static_file_provider();
// Get next expected receipt number in static files
let next_static_file_receipt_num = static_file_provider
.get_highest_static_file_tx(StaticFileSegment::Receipts)
.map(|num| num + 1)
.unwrap_or(0);
// Check if we had any unexpected shutdown after committing to static files, but
// NOT committing to database.
match next_static_file_receipt_num.cmp(&next_receipt_num) {
// It can be equal when it's a chain of empty blocks, but we still need to update the
// last block in the range.
Ordering::Greater | Ordering::Equal => {
let mut static_file_producer =
static_file_provider.latest_writer(StaticFileSegment::Receipts)?;
static_file_producer
.prune_receipts(next_static_file_receipt_num - next_receipt_num, checkpoint)?;
// Since this is a database <-> static file inconsistency, we commit the change
// straight away.
static_file_producer.commit()?;
}
Ordering::Less => {
// If we are already in the process of unwind, this might be fine because we will
// fix the inconsistency right away.
if let Some(unwind_to) = unwind_to {
let next_receipt_num_after_unwind = provider
.block_body_indices(unwind_to)?
.map(|b| b.next_tx_num())
.ok_or(ProviderError::BlockBodyIndicesNotFound(unwind_to))?;
if next_receipt_num_after_unwind > next_static_file_receipt_num {
// This means we need a deeper unwind.
} else {
return Ok(())
}
}
return Err(missing_static_data_error(
next_static_file_receipt_num.saturating_sub(1),
&static_file_provider,
provider,
StaticFileSegment::Receipts,
)?)
}
}
Ok(())
}
}
impl<E, Provider> Stage<Provider> for ExecutionStage<E>
where
E: ConfigureEvm,
Provider: DBProvider
+ BlockReader<
Block = <E::Primitives as NodePrimitives>::Block,
Header = <E::Primitives as NodePrimitives>::BlockHeader,
> + StaticFileProviderFactory<
Primitives: NodePrimitives<BlockHeader: reth_db_api::table::Value>,
> + StatsReader
+ BlockHashReader
+ StateWriter<Receipt = <E::Primitives as NodePrimitives>::Receipt>,
{
/// Return the id of the stage
fn id(&self) -> StageId {
StageId::Execution
}
fn poll_execute_ready(
&mut self,
cx: &mut Context<'_>,
_: ExecInput,
) -> Poll<Result<(), StageError>> {
ready!(self.exex_manager_handle.poll_ready(cx));
Poll::Ready(Ok(()))
}
/// Execute the stage
fn execute(&mut self, provider: &Provider, input: ExecInput) -> Result<ExecOutput, StageError> {
if input.target_reached() {
return Ok(ExecOutput::done(input.checkpoint()))
}
let start_block = input.next_block();
let max_block = input.target();
let static_file_provider = provider.static_file_provider();
self.ensure_consistency(provider, input.checkpoint().block_number, None)?;
let db = StateProviderDatabase(LatestStateProviderRef::new(provider));
let mut executor = self.evm_config.batch_executor(db);
// Progress tracking
let mut stage_progress = start_block;
let mut stage_checkpoint = execution_checkpoint(
&static_file_provider,
start_block,
max_block,
input.checkpoint(),
)?;
let mut fetch_block_duration = Duration::default();
let mut execution_duration = Duration::default();
let mut last_block = start_block;
let mut last_execution_duration = Duration::default();
let mut last_cumulative_gas = 0;
let mut last_log_instant = Instant::now();
let log_duration = Duration::from_secs(10);
debug!(target: "sync::stages::execution", start = start_block, end = max_block, "Executing range");
// Execute block range
let mut cumulative_gas = 0;
let batch_start = Instant::now();
let mut blocks = Vec::new();
let mut results = Vec::new();
for block_number in start_block..=max_block {
// Fetch the block
let fetch_block_start = Instant::now();
// we need the block's transactions but we don't need the transaction hashes
let block = provider
.recovered_block(block_number.into(), TransactionVariant::NoHash)?
.ok_or_else(|| ProviderError::HeaderNotFound(block_number.into()))?;
fetch_block_duration += fetch_block_start.elapsed();
cumulative_gas += block.header().gas_used();
// Configure the executor to use the current state.
trace!(target: "sync::stages::execution", number = block_number, txs = block.body().transactions().len(), "Executing block");
// Execute the block
let execute_start = Instant::now();
let result = self.metrics.metered_one(&block, |input| {
executor.execute_one(input).map_err(|error| StageError::Block {
block: Box::new(block.block_with_parent()),
error: BlockErrorKind::Execution(error),
})
})?;
if let Err(err) = self.consensus.validate_block_post_execution(&block, &result) {
return Err(StageError::Block {
block: Box::new(block.block_with_parent()),
error: BlockErrorKind::Validation(err),
})
}
results.push(result);
execution_duration += execute_start.elapsed();
// Log execution throughput
if last_log_instant.elapsed() >= log_duration {
info!(
target: "sync::stages::execution",
start = last_block,
end = block_number,
throughput = format_gas_throughput(cumulative_gas - last_cumulative_gas, execution_duration - last_execution_duration),
"Executed block range"
);
last_block = block_number + 1;
last_execution_duration = execution_duration;
last_cumulative_gas = cumulative_gas;
last_log_instant = Instant::now();
}
stage_progress = block_number;
stage_checkpoint.progress.processed += block.header().gas_used();
// If we have ExExes we need to save the block in memory for later
if self.exex_manager_handle.has_exexs() {
blocks.push(block);
}
// Check if we should commit now
if self.thresholds.is_end_of_batch(
block_number - start_block,
executor.size_hint() as u64,
cumulative_gas,
batch_start.elapsed(),
) {
break
}
}
// prepare execution output for writing
let time = Instant::now();
let mut state = ExecutionOutcome::from_blocks(
start_block,
executor.into_state().take_bundle(),
results,
);
let write_preparation_duration = time.elapsed();
// log the gas per second for the range we just executed
debug!(
target: "sync::stages::execution",
start = start_block,
end = stage_progress,
throughput = format_gas_throughput(cumulative_gas, execution_duration),
"Finished executing block range"
);
// Prepare the input for post execute commit hook, where an `ExExNotification` will be sent.
//
// Note: Since we only write to `blocks` if there are any ExExes, we don't need to perform
// the `has_exexs` check here as well
if !blocks.is_empty() {
let previous_input =
self.post_execute_commit_input.replace(Chain::new(blocks, state.clone(), None));
if previous_input.is_some() {
// Not processing the previous post execute commit input is a critical error, as it
// means that we didn't send the notification to ExExes
return Err(StageError::PostExecuteCommit(
"Previous post execute commit input wasn't processed",
))
}
}
let time = Instant::now();
if self.can_prune_changesets(provider, start_block, max_block)? {
let prune_modes = provider.prune_modes_ref();
// Iterate over all reverts and clear them if pruning is configured.
for block_number in start_block..=max_block {
let Some(reverts) =
state.bundle.reverts.get_mut((block_number - start_block) as usize)
else {
break
};
// If both account history and storage history pruning is configured, clear reverts
// for this block.
if prune_modes
.account_history
.is_some_and(|m| m.should_prune(block_number, max_block)) &&
prune_modes
.storage_history
.is_some_and(|m| m.should_prune(block_number, max_block))
{
reverts.clear();
}
}
}
// write output
provider.write_state(&state, OriginalValuesKnown::Yes, StorageLocation::StaticFiles)?;
let db_write_duration = time.elapsed();
debug!(
target: "sync::stages::execution",
block_fetch = ?fetch_block_duration,
execution = ?execution_duration,
write_preparation = ?write_preparation_duration,
write = ?db_write_duration,
"Execution time"
);
let done = stage_progress == max_block;
Ok(ExecOutput {
checkpoint: StageCheckpoint::new(stage_progress)
.with_execution_stage_checkpoint(stage_checkpoint),
done,
})
}
fn post_execute_commit(&mut self) -> Result<(), StageError> {
let Some(chain) = self.post_execute_commit_input.take() else { return Ok(()) };
// NOTE: We can ignore the error here, since an error means that the channel is closed,
// which means the manager has died, which then in turn means the node is shutting down.
let _ = self.exex_manager_handle.send(
ExExNotificationSource::Pipeline,
ExExNotification::ChainCommitted { new: Arc::new(chain) },
);
Ok(())
}
/// Unwind the stage.
fn unwind(
&mut self,
provider: &Provider,
input: UnwindInput,
) -> Result<UnwindOutput, StageError> {
let (range, unwind_to, _) =
input.unwind_block_range_with_threshold(self.thresholds.max_blocks.unwrap_or(u64::MAX));
if range.is_empty() {
return Ok(UnwindOutput {
checkpoint: input.checkpoint.with_block_number(input.unwind_to),
})
}
self.ensure_consistency(provider, input.checkpoint.block_number, Some(unwind_to))?;
// Unwind account and storage changesets, as well as receipts.
//
// This also updates `PlainStorageState` and `PlainAccountState`.
let bundle_state_with_receipts =
provider.take_state_above(unwind_to, StorageLocation::Both)?;
// Prepare the input for post unwind commit hook, where an `ExExNotification` will be sent.
if self.exex_manager_handle.has_exexs() {
// Get the blocks for the unwound range.
let blocks = provider.recovered_block_range(range.clone())?;
let previous_input = self.post_unwind_commit_input.replace(Chain::new(
blocks,
bundle_state_with_receipts,
None,
));
debug_assert!(
previous_input.is_none(),
"Previous post unwind commit input wasn't processed"
);
if let Some(previous_input) = previous_input {
tracing::debug!(target: "sync::stages::execution", ?previous_input, "Previous post unwind commit input wasn't processed");
}
}
// Update the checkpoint.
let mut stage_checkpoint = input.checkpoint.execution_stage_checkpoint();
if let Some(stage_checkpoint) = stage_checkpoint.as_mut() {
for block_number in range {
stage_checkpoint.progress.processed -= provider
.header_by_number(block_number)?
.ok_or_else(|| ProviderError::HeaderNotFound(block_number.into()))?
.gas_used();
}
}
let checkpoint = if let Some(stage_checkpoint) = stage_checkpoint {
StageCheckpoint::new(unwind_to).with_execution_stage_checkpoint(stage_checkpoint)
} else {
StageCheckpoint::new(unwind_to)
};
Ok(UnwindOutput { checkpoint })
}
fn post_unwind_commit(&mut self) -> Result<(), StageError> {
let Some(chain) = self.post_unwind_commit_input.take() else { return Ok(()) };
// NOTE: We can ignore the error here, since an error means that the channel is closed,
// which means the manager has died, which then in turn means the node is shutting down.
let _ = self.exex_manager_handle.send(
ExExNotificationSource::Pipeline,
ExExNotification::ChainReverted { old: Arc::new(chain) },
);
Ok(())
}
}
fn execution_checkpoint<N>(
provider: &StaticFileProvider<N>,
start_block: BlockNumber,
max_block: BlockNumber,
checkpoint: StageCheckpoint,
) -> Result<ExecutionCheckpoint, ProviderError>
where
N: NodePrimitives<BlockHeader: reth_db_api::table::Value>,
{
Ok(match checkpoint.execution_stage_checkpoint() {
// If checkpoint block range fully matches our range,
// we take the previously used stage checkpoint as-is.
Some(stage_checkpoint @ ExecutionCheckpoint { block_range, .. })
if block_range == CheckpointBlockRange::from(start_block..=max_block) =>
{
stage_checkpoint
}
// If checkpoint block range precedes our range seamlessly, we take the previously used
// stage checkpoint and add the amount of gas from our range to the checkpoint total.
Some(ExecutionCheckpoint {
block_range: CheckpointBlockRange { to, .. },
progress: EntitiesCheckpoint { processed, total },
}) if to == start_block - 1 => ExecutionCheckpoint {
block_range: CheckpointBlockRange { from: start_block, to: max_block },
progress: EntitiesCheckpoint {
processed,
total: total + calculate_gas_used_from_headers(provider, start_block..=max_block)?,
},
},
// If checkpoint block range ends on the same block as our range, we take the previously
// used stage checkpoint.
Some(ExecutionCheckpoint { block_range: CheckpointBlockRange { to, .. }, progress })
if to == max_block =>
{
ExecutionCheckpoint {
block_range: CheckpointBlockRange { from: start_block, to: max_block },
progress,
}
}
// If there's any other non-empty checkpoint, we calculate the remaining amount of total gas
// to be processed not including the checkpoint range.
Some(ExecutionCheckpoint { progress: EntitiesCheckpoint { processed, .. }, .. }) => {
let after_checkpoint_block_number =
calculate_gas_used_from_headers(provider, checkpoint.block_number + 1..=max_block)?;
ExecutionCheckpoint {
block_range: CheckpointBlockRange { from: start_block, to: max_block },
progress: EntitiesCheckpoint {
processed,
total: processed + after_checkpoint_block_number,
},
}
}
// Otherwise, we recalculate the whole stage checkpoint including the amount of gas
// already processed, if there's any.
_ => {
let processed = calculate_gas_used_from_headers(provider, 0..=start_block - 1)?;
ExecutionCheckpoint {
block_range: CheckpointBlockRange { from: start_block, to: max_block },
progress: EntitiesCheckpoint {
processed,
total: processed +
calculate_gas_used_from_headers(provider, start_block..=max_block)?,
},
}
}
})
}
/// Calculates the total amount of gas used from the headers in the given range.
pub fn calculate_gas_used_from_headers<N>(
provider: &StaticFileProvider<N>,
range: RangeInclusive<BlockNumber>,
) -> Result<u64, ProviderError>
where
N: NodePrimitives<BlockHeader: reth_db_api::table::Value>,
{
debug!(target: "sync::stages::execution", ?range, "Calculating gas used from headers");
let mut gas_total = 0;
let start = Instant::now();
for entry in provider.fetch_range_iter(
StaticFileSegment::Headers,
*range.start()..*range.end() + 1,
|cursor, number| cursor.get_one::<HeaderMask<N::BlockHeader>>(number.into()),
)? {
let entry = entry?;
gas_total += entry.gas_used();
}
let duration = start.elapsed();
debug!(target: "sync::stages::execution", ?range, ?duration, "Finished calculating gas used from headers");
Ok(gas_total)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{stages::MERKLE_STAGE_DEFAULT_REBUILD_THRESHOLD, test_utils::TestStageDB};
use alloy_primitives::{address, hex_literal::hex, keccak256, Address, B256, U256};
use alloy_rlp::Decodable;
use assert_matches::assert_matches;
use reth_chainspec::ChainSpecBuilder;
use reth_db_api::{
models::AccountBeforeTx,
transaction::{DbTx, DbTxMut},
};
use reth_ethereum_consensus::EthBeaconConsensus;
use reth_ethereum_primitives::Block;
use reth_evm_ethereum::EthEvmConfig;
use reth_primitives_traits::{Account, Bytecode, SealedBlock, StorageEntry};
use reth_provider::{
test_utils::create_test_provider_factory, AccountReader, DatabaseProviderFactory,
ReceiptProvider, StaticFileProviderFactory,
};
use reth_prune::PruneModes;
use reth_prune_types::{PruneMode, ReceiptsLogPruneConfig};
use reth_stages_api::StageUnitCheckpoint;
use std::collections::BTreeMap;
fn stage() -> ExecutionStage<EthEvmConfig> {
let evm_config =
EthEvmConfig::new(Arc::new(ChainSpecBuilder::mainnet().berlin_activated().build()));
let consensus = Arc::new(EthBeaconConsensus::new(Arc::new(
ChainSpecBuilder::mainnet().berlin_activated().build(),
)));
ExecutionStage::new(
evm_config,
consensus,
ExecutionStageThresholds {
max_blocks: Some(100),
max_changes: None,
max_cumulative_gas: None,
max_duration: None,
},
MERKLE_STAGE_DEFAULT_REBUILD_THRESHOLD,
ExExManagerHandle::empty(),
)
}
#[test]
fn execution_checkpoint_matches() {
let factory = create_test_provider_factory();
let previous_stage_checkpoint = ExecutionCheckpoint {
block_range: CheckpointBlockRange { from: 0, to: 0 },
progress: EntitiesCheckpoint { processed: 1, total: 2 },
};
let previous_checkpoint = StageCheckpoint {
block_number: 0,
stage_checkpoint: Some(StageUnitCheckpoint::Execution(previous_stage_checkpoint)),
};
let stage_checkpoint = execution_checkpoint(
&factory.static_file_provider(),
previous_stage_checkpoint.block_range.from,
previous_stage_checkpoint.block_range.to,
previous_checkpoint,
);
assert!(
matches!(stage_checkpoint, Ok(checkpoint) if checkpoint == previous_stage_checkpoint)
);
}
#[test]
fn execution_checkpoint_precedes() {
let factory = create_test_provider_factory();
let provider = factory.provider_rw().unwrap();
let mut genesis_rlp = hex!("f901faf901f5a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa045571b40ae66ca7480791bbb2887286e4e4c4b1b298b191c889d6959023a32eda056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000808502540be400808000a00000000000000000000000000000000000000000000000000000000000000000880000000000000000c0c0").as_slice();
let genesis = SealedBlock::<Block>::decode(&mut genesis_rlp).unwrap();
let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice();
let block = SealedBlock::<Block>::decode(&mut block_rlp).unwrap();
provider.insert_historical_block(genesis.try_recover().unwrap()).unwrap();
provider.insert_historical_block(block.clone().try_recover().unwrap()).unwrap();
provider
.static_file_provider()
.latest_writer(StaticFileSegment::Headers)
.unwrap()
.commit()
.unwrap();
provider.commit().unwrap();
let previous_stage_checkpoint = ExecutionCheckpoint {
block_range: CheckpointBlockRange { from: 0, to: 0 },
progress: EntitiesCheckpoint { processed: 1, total: 1 },
};
let previous_checkpoint = StageCheckpoint {
block_number: 1,
stage_checkpoint: Some(StageUnitCheckpoint::Execution(previous_stage_checkpoint)),
};
let stage_checkpoint =
execution_checkpoint(&factory.static_file_provider(), 1, 1, previous_checkpoint);
assert_matches!(stage_checkpoint, Ok(ExecutionCheckpoint {
block_range: CheckpointBlockRange { from: 1, to: 1 },
progress: EntitiesCheckpoint {
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/stages/src/stages/finish.rs | crates/stages/stages/src/stages/finish.rs | use reth_stages_api::{
ExecInput, ExecOutput, Stage, StageCheckpoint, StageError, StageId, UnwindInput, UnwindOutput,
};
/// The finish stage.
///
/// This stage does not write anything; it's checkpoint is used to denote the highest fully synced
/// block.
#[derive(Default, Debug, Clone)]
#[non_exhaustive]
pub struct FinishStage;
impl<Provider> Stage<Provider> for FinishStage {
fn id(&self) -> StageId {
StageId::Finish
}
fn execute(
&mut self,
_provider: &Provider,
input: ExecInput,
) -> Result<ExecOutput, StageError> {
Ok(ExecOutput { checkpoint: StageCheckpoint::new(input.target()), done: true })
}
fn unwind(
&mut self,
_provider: &Provider,
input: UnwindInput,
) -> Result<UnwindOutput, StageError> {
Ok(UnwindOutput { checkpoint: StageCheckpoint::new(input.unwind_to) })
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::{
stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, TestRunnerError,
TestStageDB, UnwindStageTestRunner,
};
use reth_primitives_traits::SealedHeader;
use reth_provider::providers::StaticFileWriter;
use reth_testing_utils::{
generators,
generators::{random_header, random_header_range},
};
stage_test_suite_ext!(FinishTestRunner, finish);
#[derive(Default)]
struct FinishTestRunner {
db: TestStageDB,
}
impl StageTestRunner for FinishTestRunner {
type S = FinishStage;
fn db(&self) -> &TestStageDB {
&self.db
}
fn stage(&self) -> Self::S {
FinishStage
}
}
impl ExecuteStageTestRunner for FinishTestRunner {
type Seed = Vec<SealedHeader>;
fn seed_execution(&mut self, input: ExecInput) -> Result<Self::Seed, TestRunnerError> {
let start = input.checkpoint().block_number;
let mut rng = generators::rng();
let head = random_header(&mut rng, start, None);
self.db.insert_headers_with_td(std::iter::once(&head))?;
// use previous progress as seed size
let end = input.target.unwrap_or_default() + 1;
if start + 1 >= end {
return Ok(Vec::default())
}
let mut headers = random_header_range(&mut rng, start + 1..end, head.hash());
self.db.insert_headers_with_td(headers.iter())?;
headers.insert(0, head);
Ok(headers)
}
fn validate_execution(
&self,
input: ExecInput,
output: Option<ExecOutput>,
) -> Result<(), TestRunnerError> {
if let Some(output) = output {
assert!(output.done, "stage should always be done");
assert_eq!(
output.checkpoint.block_number,
input.target(),
"stage progress should always match progress of previous stage"
);
}
Ok(())
}
}
impl UnwindStageTestRunner for FinishTestRunner {
fn validate_unwind(&self, _input: UnwindInput) -> Result<(), TestRunnerError> {
Ok(())
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/stages/src/stages/era.rs | crates/stages/stages/src/stages/era.rs | use crate::{StageCheckpoint, StageId};
use alloy_primitives::{BlockHash, BlockNumber};
use futures_util::{Stream, StreamExt};
use reqwest::{Client, Url};
use reth_config::config::EtlConfig;
use reth_db_api::{table::Value, transaction::DbTxMut};
use reth_era::{era1_file::Era1Reader, era_file_ops::StreamReader};
use reth_era_downloader::{read_dir, EraClient, EraMeta, EraStream, EraStreamConfig};
use reth_era_utils as era;
use reth_etl::Collector;
use reth_primitives_traits::{FullBlockBody, FullBlockHeader, NodePrimitives};
use reth_provider::{
BlockReader, BlockWriter, DBProvider, HeaderProvider, StageCheckpointWriter,
StaticFileProviderFactory, StaticFileWriter,
};
use reth_stages_api::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput};
use reth_static_file_types::StaticFileSegment;
use reth_storage_errors::ProviderError;
use std::{
fmt::{Debug, Formatter},
iter,
path::Path,
task::{ready, Context, Poll},
};
type Item<Header, Body> =
Box<dyn Iterator<Item = eyre::Result<(Header, Body)>> + Send + Sync + Unpin>;
type ThreadSafeEraStream<Header, Body> =
Box<dyn Stream<Item = eyre::Result<Item<Header, Body>>> + Send + Sync + Unpin>;
/// The [ERA1](https://github.com/eth-clients/e2store-format-specs/blob/main/formats/era1.md)
/// pre-merge history stage.
///
/// Imports block headers and bodies from genesis up to the last pre-merge block. Receipts are
/// generated by execution. Execution is not done in this stage.
pub struct EraStage<Header, Body, StreamFactory> {
/// The `source` creates `stream`.
source: Option<StreamFactory>,
/// A map of block hash to block height collected when processing headers and inserted into
/// database afterward.
hash_collector: Collector<BlockHash, BlockNumber>,
/// Last extracted iterator of block `Header` and `Body` pairs.
item: Option<Item<Header, Body>>,
/// A stream of [`Item`]s, i.e. iterators over block `Header` and `Body` pairs.
stream: Option<ThreadSafeEraStream<Header, Body>>,
}
trait EraStreamFactory<Header, Body> {
fn create(self, input: ExecInput) -> Result<ThreadSafeEraStream<Header, Body>, StageError>;
}
impl<Header, Body> EraStreamFactory<Header, Body> for EraImportSource
where
Header: FullBlockHeader + Value,
Body: FullBlockBody<OmmerHeader = Header>,
{
fn create(self, input: ExecInput) -> Result<ThreadSafeEraStream<Header, Body>, StageError> {
match self {
Self::Path(path) => Self::convert(
read_dir(path, input.next_block()).map_err(|e| StageError::Fatal(e.into()))?,
),
Self::Url(url, folder) => {
let _ = reth_fs_util::create_dir_all(&folder);
let client = EraClient::new(Client::new(), url, folder);
Self::convert(EraStream::new(
client,
EraStreamConfig::default().start_from(input.next_block()),
))
}
}
}
}
impl EraImportSource {
fn convert<Header, Body>(
stream: impl Stream<Item = eyre::Result<impl EraMeta + Send + Sync + 'static + Unpin>>
+ Send
+ Sync
+ 'static
+ Unpin,
) -> Result<ThreadSafeEraStream<Header, Body>, StageError>
where
Header: FullBlockHeader + Value,
Body: FullBlockBody<OmmerHeader = Header>,
{
Ok(Box::new(Box::pin(stream.map(|meta| {
meta.and_then(|meta| {
let file = reth_fs_util::open(meta.path())?;
let reader = Era1Reader::new(file);
let iter = reader.iter();
let iter = iter.map(era::decode);
let iter = iter.chain(
iter::once_with(move || match meta.mark_as_processed() {
Ok(..) => None,
Err(e) => Some(Err(e)),
})
.flatten(),
);
Ok(Box::new(iter) as Item<Header, Body>)
})
}))))
}
}
impl<Header: Debug, Body: Debug, F: Debug> Debug for EraStage<Header, Body, F> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("EraStage")
.field("source", &self.source)
.field("hash_collector", &self.hash_collector)
.field("item", &self.item.is_some())
.field("stream", &"dyn Stream")
.finish()
}
}
impl<Header, Body, F> EraStage<Header, Body, F> {
/// Creates a new [`EraStage`].
pub fn new(source: Option<F>, etl_config: EtlConfig) -> Self {
Self {
source,
item: None,
stream: None,
hash_collector: Collector::new(etl_config.file_size, etl_config.dir),
}
}
}
impl<Provider, N, F> Stage<Provider> for EraStage<N::BlockHeader, N::BlockBody, F>
where
Provider: DBProvider<Tx: DbTxMut>
+ StaticFileProviderFactory<Primitives = N>
+ BlockWriter<Block = N::Block>
+ BlockReader<Block = N::Block>
+ StageCheckpointWriter,
F: EraStreamFactory<N::BlockHeader, N::BlockBody> + Send + Sync + Clone,
N: NodePrimitives<BlockHeader: Value>,
{
fn id(&self) -> StageId {
StageId::Era
}
fn poll_execute_ready(
&mut self,
cx: &mut Context<'_>,
input: ExecInput,
) -> Poll<Result<(), StageError>> {
if input.target_reached() || self.item.is_some() {
return Poll::Ready(Ok(()));
}
if self.stream.is_none() {
if let Some(source) = self.source.clone() {
self.stream.replace(source.create(input)?);
}
}
if let Some(stream) = &mut self.stream {
if let Some(next) = ready!(stream.poll_next_unpin(cx))
.transpose()
.map_err(|e| StageError::Fatal(e.into()))?
{
self.item.replace(next);
}
}
Poll::Ready(Ok(()))
}
fn execute(&mut self, provider: &Provider, input: ExecInput) -> Result<ExecOutput, StageError> {
let height = if let Some(era) = self.item.take() {
let static_file_provider = provider.static_file_provider();
// Consistency check of expected headers in static files vs DB is done on
// provider::sync_gap when poll_execute_ready is polled.
let last_header_number = static_file_provider
.get_highest_static_file_block(StaticFileSegment::Headers)
.unwrap_or_default();
// Find the latest total difficulty
let mut td = static_file_provider
.header_td_by_number(last_header_number)?
.ok_or(ProviderError::TotalDifficultyNotFound(last_header_number))?;
// Although headers were downloaded in reverse order, the collector iterates it in
// ascending order
let mut writer = static_file_provider.latest_writer(StaticFileSegment::Headers)?;
let height = era::process_iter(
era,
&mut writer,
provider,
&mut self.hash_collector,
&mut td,
last_header_number..=input.target(),
)
.map_err(|e| StageError::Fatal(e.into()))?;
if !self.hash_collector.is_empty() {
era::build_index(provider, &mut self.hash_collector)
.map_err(|e| StageError::Recoverable(e.into()))?;
self.hash_collector.clear();
}
era::save_stage_checkpoints(
&provider,
input.checkpoint().block_number,
height,
height,
input.target(),
)?;
height
} else {
input.target()
};
Ok(ExecOutput { checkpoint: StageCheckpoint::new(height), done: height == input.target() })
}
fn unwind(
&mut self,
_provider: &Provider,
input: UnwindInput,
) -> Result<UnwindOutput, StageError> {
Ok(UnwindOutput { checkpoint: input.checkpoint.with_block_number(input.unwind_to) })
}
}
/// Describes where to get the era files from.
#[derive(Debug, Clone)]
pub enum EraImportSource {
/// Remote HTTP accessible host.
Url(Url, Box<Path>),
/// Local directory.
Path(Box<Path>),
}
impl EraImportSource {
/// Maybe constructs a new `EraImportSource` depending on the arguments.
///
/// Only one of `url` or `path` should be provided, but upholding this invariant is delegated
/// above so that both parameters can be accepted.
///
/// # Arguments
/// * The `path` uses a directory as the import source. It and its contents must be readable.
/// * The `url` uses an HTTP client to list and download files.
/// * The `default` gives the default [`Url`] if none of the previous parameters are provided.
/// * For any [`Url`] the `folder` is used as the download directory for storing files
/// temporarily. It and its contents must be readable and writable.
pub fn maybe_new(
path: Option<Box<Path>>,
url: Option<Url>,
default: impl FnOnce() -> Option<Url>,
folder: impl FnOnce() -> Box<Path>,
) -> Option<Self> {
path.map(Self::Path).or_else(|| url.or_else(default).map(|url| Self::Url(url, folder())))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::{
stage_test_suite, ExecuteStageTestRunner, StageTestRunner, UnwindStageTestRunner,
};
use alloy_primitives::B256;
use assert_matches::assert_matches;
use reth_db_api::tables;
use reth_provider::BlockHashReader;
use reth_testing_utils::generators::{self, random_header};
use test_runner::EraTestRunner;
#[tokio::test]
async fn test_era_range_ends_below_target() {
let era_cap = 2;
let target = 20000;
let mut runner = EraTestRunner::default();
let input = ExecInput { target: Some(era_cap), checkpoint: None };
runner.seed_execution(input).unwrap();
let input = ExecInput { target: Some(target), checkpoint: None };
let output = runner.execute(input).await.unwrap();
runner.commit();
assert_matches!(
output,
Ok(ExecOutput {
checkpoint: StageCheckpoint { block_number, stage_checkpoint: None },
done: false
}) if block_number == era_cap
);
let output = output.unwrap();
let validation_output = runner.validate_execution(input, Some(output.clone()));
assert_matches!(validation_output, Ok(()));
runner.take_responses();
let input = ExecInput { target: Some(target), checkpoint: Some(output.checkpoint) };
let output = runner.execute(input).await.unwrap();
runner.commit();
assert_matches!(
output,
Ok(ExecOutput {
checkpoint: StageCheckpoint { block_number, stage_checkpoint: None },
done: true
}) if block_number == target
);
let validation_output = runner.validate_execution(input, output.ok());
assert_matches!(validation_output, Ok(()));
}
mod test_runner {
use super::*;
use crate::test_utils::{TestRunnerError, TestStageDB};
use alloy_consensus::{BlockBody, Header};
use futures_util::stream;
use reth_db_api::{
cursor::DbCursorRO,
models::{StoredBlockBodyIndices, StoredBlockOmmers},
transaction::DbTx,
};
use reth_ethereum_primitives::TransactionSigned;
use reth_primitives_traits::{SealedBlock, SealedHeader};
use reth_provider::{BlockNumReader, TransactionsProvider};
use reth_testing_utils::generators::{
random_block_range, random_signed_tx, BlockRangeParams,
};
use tokio::sync::watch;
pub(crate) struct EraTestRunner {
channel: (watch::Sender<B256>, watch::Receiver<B256>),
db: TestStageDB,
responses: Option<Vec<(Header, BlockBody<TransactionSigned>)>>,
}
impl Default for EraTestRunner {
fn default() -> Self {
Self {
channel: watch::channel(B256::ZERO),
db: TestStageDB::default(),
responses: Default::default(),
}
}
}
impl StageTestRunner for EraTestRunner {
type S = EraStage<Header, BlockBody<TransactionSigned>, StubResponses>;
fn db(&self) -> &TestStageDB {
&self.db
}
fn stage(&self) -> Self::S {
EraStage::new(self.responses.clone().map(StubResponses), EtlConfig::default())
}
}
impl ExecuteStageTestRunner for EraTestRunner {
type Seed = Vec<SealedBlock<reth_ethereum_primitives::Block>>;
fn seed_execution(&mut self, input: ExecInput) -> Result<Self::Seed, TestRunnerError> {
let start = input.checkpoint().block_number;
let end = input.target();
let static_file_provider = self.db.factory.static_file_provider();
let mut rng = generators::rng();
// Static files do not support gaps in headers, so we need to generate 0 to end
let blocks = random_block_range(
&mut rng,
0..=end,
BlockRangeParams {
parent: Some(B256::ZERO),
tx_count: 0..2,
..Default::default()
},
);
self.db.insert_headers_with_td(blocks.iter().map(|block| block.sealed_header()))?;
if let Some(progress) = blocks.get(start as usize) {
// Insert last progress data
{
let tx = self.db.factory.provider_rw()?.into_tx();
let mut static_file_producer = static_file_provider
.get_writer(start, StaticFileSegment::Transactions)?;
let body = StoredBlockBodyIndices {
first_tx_num: 0,
tx_count: progress.transaction_count() as u64,
};
static_file_producer.set_block_range(0..=progress.number);
body.tx_num_range().try_for_each(|tx_num| {
let transaction = random_signed_tx(&mut rng);
static_file_producer.append_transaction(tx_num, &transaction).map(drop)
})?;
if body.tx_count != 0 {
tx.put::<tables::TransactionBlocks>(
body.last_tx_num(),
progress.number,
)?;
}
tx.put::<tables::BlockBodyIndices>(progress.number, body)?;
if !progress.ommers_hash_is_empty() {
tx.put::<tables::BlockOmmers>(
progress.number,
StoredBlockOmmers { ommers: progress.body().ommers.clone() },
)?;
}
static_file_producer.commit()?;
tx.commit()?;
}
}
self.responses.replace(
blocks.iter().map(|v| (v.header().clone(), v.body().clone())).collect(),
);
Ok(blocks)
}
/// Validate stored headers and bodies
fn validate_execution(
&self,
input: ExecInput,
output: Option<ExecOutput>,
) -> Result<(), TestRunnerError> {
let initial_checkpoint = input.checkpoint().block_number;
match output {
Some(output) if output.checkpoint.block_number > initial_checkpoint => {
let provider = self.db.factory.provider()?;
let mut td = provider
.header_td_by_number(initial_checkpoint.saturating_sub(1))?
.unwrap_or_default();
for block_num in initial_checkpoint..
output
.checkpoint
.block_number
.min(self.responses.as_ref().map(|v| v.len()).unwrap_or_default()
as BlockNumber)
{
// look up the header hash
let hash = provider.block_hash(block_num)?.expect("no header hash");
// validate the header number
assert_eq!(provider.block_number(hash)?, Some(block_num));
// validate the header
let header = provider.header_by_number(block_num)?;
assert!(header.is_some());
let header = SealedHeader::seal_slow(header.unwrap());
assert_eq!(header.hash(), hash);
// validate the header total difficulty
td += header.difficulty;
assert_eq!(provider.header_td_by_number(block_num)?, Some(td));
}
self.validate_db_blocks(
output.checkpoint.block_number,
output.checkpoint.block_number,
)?;
}
_ => self.check_no_header_entry_above(initial_checkpoint)?,
};
Ok(())
}
async fn after_execution(&self, headers: Self::Seed) -> Result<(), TestRunnerError> {
let tip = if headers.is_empty() {
let tip = random_header(&mut generators::rng(), 0, None);
self.db.insert_headers(iter::once(&tip))?;
tip.hash()
} else {
headers.last().unwrap().hash()
};
self.send_tip(tip);
Ok(())
}
}
impl UnwindStageTestRunner for EraTestRunner {
fn validate_unwind(&self, _input: UnwindInput) -> Result<(), TestRunnerError> {
Ok(())
}
}
impl EraTestRunner {
pub(crate) fn check_no_header_entry_above(
&self,
block: BlockNumber,
) -> Result<(), TestRunnerError> {
self.db
.ensure_no_entry_above_by_value::<tables::HeaderNumbers, _>(block, |val| val)?;
self.db.ensure_no_entry_above::<tables::CanonicalHeaders, _>(block, |key| key)?;
self.db.ensure_no_entry_above::<tables::Headers, _>(block, |key| key)?;
self.db.ensure_no_entry_above::<tables::HeaderTerminalDifficulties, _>(
block,
|num| num,
)?;
Ok(())
}
pub(crate) fn send_tip(&self, tip: B256) {
self.channel.0.send(tip).expect("failed to send tip");
}
/// Validate that the inserted block data is valid
pub(crate) fn validate_db_blocks(
&self,
prev_progress: BlockNumber,
highest_block: BlockNumber,
) -> Result<(), TestRunnerError> {
let static_file_provider = self.db.factory.static_file_provider();
self.db.query(|tx| {
// Acquire cursors on body related tables
let mut bodies_cursor = tx.cursor_read::<tables::BlockBodyIndices>()?;
let mut ommers_cursor = tx.cursor_read::<tables::BlockOmmers>()?;
let mut tx_block_cursor = tx.cursor_read::<tables::TransactionBlocks>()?;
let first_body_key = match bodies_cursor.first()? {
Some((key, _)) => key,
None => return Ok(()),
};
let mut prev_number: Option<BlockNumber> = None;
for entry in bodies_cursor.walk(Some(first_body_key))? {
let (number, body) = entry?;
// Validate sequentiality only after prev progress,
// since the data before is mocked and can contain gaps
if number > prev_progress {
if let Some(prev_key) = prev_number {
assert_eq!(prev_key + 1, number, "Body entries must be sequential");
}
}
// Validate that the current entry is below or equals to the highest allowed block
assert!(
number <= highest_block,
"We wrote a block body outside of our synced range. Found block with number {number}, highest block according to stage is {highest_block}",
);
let header = static_file_provider.header_by_number(number)?.expect("to be present");
// Validate that ommers exist if any
let stored_ommers = ommers_cursor.seek_exact(number)?;
if header.ommers_hash_is_empty() {
assert!(stored_ommers.is_none(), "Unexpected ommers entry");
} else {
assert!(stored_ommers.is_some(), "Missing ommers entry");
}
let tx_block_id = tx_block_cursor.seek_exact(body.last_tx_num())?.map(|(_,b)| b);
if body.tx_count == 0 {
assert_ne!(tx_block_id,Some(number));
} else {
assert_eq!(tx_block_id, Some(number));
}
for tx_id in body.tx_num_range() {
assert!(static_file_provider.transaction_by_id(tx_id)?.is_some(), "Transaction is missing.");
}
prev_number = Some(number);
}
Ok(())
})?;
Ok(())
}
pub(crate) fn take_responses(&mut self) {
self.responses.take();
}
pub(crate) fn commit(&self) {
self.db.factory.static_file_provider().commit().unwrap();
}
}
#[derive(Clone)]
pub(crate) struct StubResponses(Vec<(Header, BlockBody<TransactionSigned>)>);
impl EraStreamFactory<Header, BlockBody<TransactionSigned>> for StubResponses {
fn create(
self,
_input: ExecInput,
) -> Result<ThreadSafeEraStream<Header, BlockBody<TransactionSigned>>, StageError>
{
let stream = stream::iter(vec![self.0]);
Ok(Box::new(Box::pin(stream.map(|meta| {
Ok(Box::new(meta.into_iter().map(Ok))
as Item<Header, BlockBody<TransactionSigned>>)
}))))
}
}
}
stage_test_suite!(EraTestRunner, era);
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/stages/src/test_utils/runner.rs | crates/stages/stages/src/test_utils/runner.rs | use super::TestStageDB;
use reth_db::{test_utils::TempDatabase, Database, DatabaseEnv};
use reth_provider::{test_utils::MockNodeTypesWithDB, DatabaseProvider, ProviderError};
use reth_stages_api::{
ExecInput, ExecOutput, Stage, StageError, StageExt, UnwindInput, UnwindOutput,
};
use reth_storage_errors::db::DatabaseError;
use tokio::sync::oneshot;
#[derive(thiserror::Error, Debug)]
pub(crate) enum TestRunnerError {
#[error(transparent)]
Database(#[from] DatabaseError),
#[error(transparent)]
Internal(#[from] Box<dyn core::error::Error>),
#[error(transparent)]
Provider(#[from] ProviderError),
}
/// A generic test runner for stages.
pub(crate) trait StageTestRunner {
type S: Stage<DatabaseProvider<<TempDatabase<DatabaseEnv> as Database>::TXMut, MockNodeTypesWithDB>>
+ 'static;
/// Return a reference to the database.
fn db(&self) -> &TestStageDB;
/// Return an instance of a Stage.
fn stage(&self) -> Self::S;
}
pub(crate) trait ExecuteStageTestRunner: StageTestRunner {
type Seed: Send + Sync;
/// Seed database for stage execution
fn seed_execution(&mut self, input: ExecInput) -> Result<Self::Seed, TestRunnerError>;
/// Validate stage execution
fn validate_execution(
&self,
input: ExecInput,
output: Option<ExecOutput>,
) -> Result<(), TestRunnerError>;
/// Run [`Stage::execute`] and return a receiver for the result.
fn execute(&self, input: ExecInput) -> oneshot::Receiver<Result<ExecOutput, StageError>> {
let (tx, rx) = oneshot::channel();
let (db, mut stage) = (self.db().factory.clone(), self.stage());
tokio::spawn(async move {
let result = stage.execute_ready(input).await.and_then(|_| {
let provider_rw = db.provider_rw().unwrap();
let result = stage.execute(&provider_rw, input);
provider_rw.commit().expect("failed to commit");
result
});
tx.send(result).expect("failed to send message")
});
rx
}
/// Run a hook after [`Stage::execute`]. Required for Headers & Bodies stages.
async fn after_execution(&self, _seed: Self::Seed) -> Result<(), TestRunnerError> {
Ok(())
}
}
pub(crate) trait UnwindStageTestRunner: StageTestRunner {
/// Validate the unwind
fn validate_unwind(&self, input: UnwindInput) -> Result<(), TestRunnerError>;
/// Run [`Stage::unwind`] and return a receiver for the result.
async fn unwind(&self, input: UnwindInput) -> Result<UnwindOutput, StageError> {
let (tx, rx) = oneshot::channel();
let (db, mut stage) = (self.db().factory.clone(), self.stage());
tokio::spawn(async move {
let provider = db.provider_rw().unwrap();
let result = stage.unwind(&provider, input);
provider.commit().expect("failed to commit");
tx.send(result).expect("failed to send result");
});
rx.await.unwrap()
}
/// Run a hook before [`Stage::unwind`]. Required for `MerkleStage`.
fn before_unwind(&self, _input: UnwindInput) -> Result<(), TestRunnerError> {
Ok(())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/stages/src/test_utils/test_db.rs | crates/stages/stages/src/test_utils/test_db.rs | use alloy_primitives::{keccak256, Address, BlockNumber, TxHash, TxNumber, B256, U256};
use reth_chainspec::MAINNET;
use reth_db::{
test_utils::{create_test_rw_db, create_test_rw_db_with_path, create_test_static_files_dir},
DatabaseEnv,
};
use reth_db_api::{
common::KeyValue,
cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO},
database::Database,
models::{AccountBeforeTx, StoredBlockBodyIndices},
table::Table,
tables,
transaction::{DbTx, DbTxMut},
DatabaseError as DbError,
};
use reth_ethereum_primitives::{Block, EthPrimitives, Receipt};
use reth_primitives_traits::{Account, SealedBlock, SealedHeader, StorageEntry};
use reth_provider::{
providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter},
test_utils::MockNodeTypesWithDB,
HistoryWriter, ProviderError, ProviderFactory, StaticFileProviderFactory,
};
use reth_static_file_types::StaticFileSegment;
use reth_storage_errors::provider::ProviderResult;
use reth_testing_utils::generators::ChangeSet;
use std::{collections::BTreeMap, fmt::Debug, path::Path};
use tempfile::TempDir;
/// Test database that is used for testing stage implementations.
#[derive(Debug)]
pub struct TestStageDB {
pub factory: ProviderFactory<MockNodeTypesWithDB>,
pub temp_static_files_dir: TempDir,
}
impl Default for TestStageDB {
/// Create a new instance of [`TestStageDB`]
fn default() -> Self {
let (static_dir, static_dir_path) = create_test_static_files_dir();
Self {
temp_static_files_dir: static_dir,
factory: ProviderFactory::new(
create_test_rw_db(),
MAINNET.clone(),
StaticFileProvider::read_write(static_dir_path).unwrap(),
),
}
}
}
impl TestStageDB {
pub fn new(path: &Path) -> Self {
let (static_dir, static_dir_path) = create_test_static_files_dir();
Self {
temp_static_files_dir: static_dir,
factory: ProviderFactory::new(
create_test_rw_db_with_path(path),
MAINNET.clone(),
StaticFileProvider::read_write(static_dir_path).unwrap(),
),
}
}
/// Invoke a callback with transaction committing it afterwards
pub fn commit<F>(&self, f: F) -> ProviderResult<()>
where
F: FnOnce(&<DatabaseEnv as Database>::TXMut) -> ProviderResult<()>,
{
let tx = self.factory.provider_rw()?;
f(tx.tx_ref())?;
tx.commit().expect("failed to commit");
Ok(())
}
/// Invoke a callback with a read transaction
pub fn query<F, Ok>(&self, f: F) -> ProviderResult<Ok>
where
F: FnOnce(&<DatabaseEnv as Database>::TX) -> ProviderResult<Ok>,
{
f(self.factory.provider()?.tx_ref())
}
/// Check if the table is empty
pub fn table_is_empty<T: Table>(&self) -> ProviderResult<bool> {
self.query(|tx| {
let last = tx.cursor_read::<T>()?.last()?;
Ok(last.is_none())
})
}
/// Return full table as Vec
pub fn table<T: Table>(&self) -> ProviderResult<Vec<KeyValue<T>>>
where
T::Key: Default + Ord,
{
self.query(|tx| {
Ok(tx
.cursor_read::<T>()?
.walk(Some(T::Key::default()))?
.collect::<Result<Vec<_>, DbError>>()?)
})
}
/// Check that there is no table entry above a given
/// number by [`Table::Key`]
pub fn ensure_no_entry_above<T, F>(&self, num: u64, mut selector: F) -> ProviderResult<()>
where
T: Table,
F: FnMut(T::Key) -> BlockNumber,
{
self.query(|tx| {
let mut cursor = tx.cursor_read::<T>()?;
if let Some((key, _)) = cursor.last()? {
assert!(selector(key) <= num);
}
Ok(())
})
}
/// Check that there is no table entry above a given
/// number by [`Table::Value`]
pub fn ensure_no_entry_above_by_value<T, F>(
&self,
num: u64,
mut selector: F,
) -> ProviderResult<()>
where
T: Table,
F: FnMut(T::Value) -> BlockNumber,
{
self.query(|tx| {
let mut cursor = tx.cursor_read::<T>()?;
let mut rev_walker = cursor.walk_back(None)?;
while let Some((_, value)) = rev_walker.next().transpose()? {
assert!(selector(value) <= num);
}
Ok(())
})
}
/// Insert header to static file if `writer` exists, otherwise to DB.
pub fn insert_header<TX: DbTx + DbTxMut>(
writer: Option<&mut StaticFileProviderRWRefMut<'_, EthPrimitives>>,
tx: &TX,
header: &SealedHeader,
td: U256,
) -> ProviderResult<()> {
if let Some(writer) = writer {
// Backfill: some tests start at a forward block number, but static files require no
// gaps.
let segment_header = writer.user_header();
if segment_header.block_end().is_none() && segment_header.expected_block_start() == 0 {
for block_number in 0..header.number {
let mut prev = header.clone_header();
prev.number = block_number;
writer.append_header(&prev, U256::ZERO, &B256::ZERO)?;
}
}
writer.append_header(header.header(), td, &header.hash())?;
} else {
tx.put::<tables::CanonicalHeaders>(header.number, header.hash())?;
tx.put::<tables::HeaderTerminalDifficulties>(header.number, td.into())?;
tx.put::<tables::Headers>(header.number, header.header().clone())?;
}
tx.put::<tables::HeaderNumbers>(header.hash(), header.number)?;
Ok(())
}
fn insert_headers_inner<'a, I, const TD: bool>(&self, headers: I) -> ProviderResult<()>
where
I: IntoIterator<Item = &'a SealedHeader>,
{
let provider = self.factory.static_file_provider();
let mut writer = provider.latest_writer(StaticFileSegment::Headers)?;
let tx = self.factory.provider_rw()?.into_tx();
let mut td = U256::ZERO;
for header in headers {
if TD {
td += header.difficulty;
}
Self::insert_header(Some(&mut writer), &tx, header, td)?;
}
writer.commit()?;
tx.commit()?;
Ok(())
}
/// Insert ordered collection of [`SealedHeader`] into the corresponding static file and tables
/// that are supposed to be populated by the headers stage.
pub fn insert_headers<'a, I>(&self, headers: I) -> ProviderResult<()>
where
I: IntoIterator<Item = &'a SealedHeader>,
{
self.insert_headers_inner::<I, false>(headers)
}
/// Inserts total difficulty of headers into the corresponding static file and tables.
///
/// Superset functionality of [`TestStageDB::insert_headers`].
pub fn insert_headers_with_td<'a, I>(&self, headers: I) -> ProviderResult<()>
where
I: IntoIterator<Item = &'a SealedHeader>,
{
self.insert_headers_inner::<I, true>(headers)
}
/// Insert ordered collection of [`SealedBlock`] into corresponding tables.
/// Superset functionality of [`TestStageDB::insert_headers`].
///
/// If `tx_offset` is set to `None`, then transactions will be stored on static files, otherwise
/// database.
///
/// Assumes that there's a single transition for each transaction (i.e. no block rewards).
pub fn insert_blocks<'a, I>(&self, blocks: I, storage_kind: StorageKind) -> ProviderResult<()>
where
I: IntoIterator<Item = &'a SealedBlock<Block>>,
{
let provider = self.factory.static_file_provider();
let tx = self.factory.provider_rw().unwrap().into_tx();
let mut next_tx_num = storage_kind.tx_offset();
let blocks = blocks.into_iter().collect::<Vec<_>>();
{
let mut headers_writer = storage_kind
.is_static()
.then(|| provider.latest_writer(StaticFileSegment::Headers).unwrap());
blocks.iter().try_for_each(|block| {
Self::insert_header(headers_writer.as_mut(), &tx, block.sealed_header(), U256::ZERO)
})?;
if let Some(mut writer) = headers_writer {
writer.commit()?;
}
}
{
let mut txs_writer = storage_kind
.is_static()
.then(|| provider.latest_writer(StaticFileSegment::Transactions).unwrap());
blocks.into_iter().try_for_each(|block| {
// Insert into body tables.
let block_body_indices = StoredBlockBodyIndices {
first_tx_num: next_tx_num,
tx_count: block.transaction_count() as u64,
};
if !block.body().transactions.is_empty() {
tx.put::<tables::TransactionBlocks>(
block_body_indices.last_tx_num(),
block.number,
)?;
}
tx.put::<tables::BlockBodyIndices>(block.number, block_body_indices)?;
let res = block.body().transactions.iter().try_for_each(|body_tx| {
if let Some(txs_writer) = &mut txs_writer {
txs_writer.append_transaction(next_tx_num, body_tx)?;
} else {
tx.put::<tables::Transactions>(next_tx_num, body_tx.clone())?
}
next_tx_num += 1;
Ok::<(), ProviderError>(())
});
if let Some(txs_writer) = &mut txs_writer {
// Backfill: some tests start at a forward block number, but static files
// require no gaps.
let segment_header = txs_writer.user_header();
if segment_header.block_end().is_none() &&
segment_header.expected_block_start() == 0
{
for block in 0..block.number {
txs_writer.increment_block(block)?;
}
}
txs_writer.increment_block(block.number)?;
}
res
})?;
if let Some(txs_writer) = &mut txs_writer {
txs_writer.commit()?;
}
}
tx.commit()?;
Ok(())
}
pub fn insert_tx_hash_numbers<I>(&self, tx_hash_numbers: I) -> ProviderResult<()>
where
I: IntoIterator<Item = (TxHash, TxNumber)>,
{
self.commit(|tx| {
tx_hash_numbers.into_iter().try_for_each(|(tx_hash, tx_num)| {
// Insert into tx hash numbers table.
Ok(tx.put::<tables::TransactionHashNumbers>(tx_hash, tx_num)?)
})
})
}
/// Insert collection of ([`TxNumber`], [Receipt]) into the corresponding table.
pub fn insert_receipts<I>(&self, receipts: I) -> ProviderResult<()>
where
I: IntoIterator<Item = (TxNumber, Receipt)>,
{
self.commit(|tx| {
receipts.into_iter().try_for_each(|(tx_num, receipt)| {
// Insert into receipts table.
Ok(tx.put::<tables::Receipts>(tx_num, receipt)?)
})
})
}
/// Insert collection of ([`TxNumber`], [Receipt]) organized by respective block numbers into
/// the corresponding table or static file segment.
pub fn insert_receipts_by_block<I, J>(
&self,
receipts: I,
storage_kind: StorageKind,
) -> ProviderResult<()>
where
I: IntoIterator<Item = (BlockNumber, J)>,
J: IntoIterator<Item = (TxNumber, Receipt)>,
{
match storage_kind {
StorageKind::Database(_) => self.commit(|tx| {
receipts.into_iter().try_for_each(|(_, receipts)| {
for (tx_num, receipt) in receipts {
tx.put::<tables::Receipts>(tx_num, receipt)?;
}
Ok(())
})
}),
StorageKind::Static => {
let provider = self.factory.static_file_provider();
let mut writer = provider.latest_writer(StaticFileSegment::Receipts)?;
let res = receipts.into_iter().try_for_each(|(block_num, receipts)| {
writer.increment_block(block_num)?;
writer.append_receipts(receipts.into_iter().map(Ok))?;
Ok(())
});
writer.commit_without_sync_all()?;
res
}
}
}
pub fn insert_transaction_senders<I>(&self, transaction_senders: I) -> ProviderResult<()>
where
I: IntoIterator<Item = (TxNumber, Address)>,
{
self.commit(|tx| {
transaction_senders.into_iter().try_for_each(|(tx_num, sender)| {
// Insert into receipts table.
Ok(tx.put::<tables::TransactionSenders>(tx_num, sender)?)
})
})
}
/// Insert collection of ([Address], [Account]) into corresponding tables.
pub fn insert_accounts_and_storages<I, S>(&self, accounts: I) -> ProviderResult<()>
where
I: IntoIterator<Item = (Address, (Account, S))>,
S: IntoIterator<Item = StorageEntry>,
{
self.commit(|tx| {
accounts.into_iter().try_for_each(|(address, (account, storage))| {
let hashed_address = keccak256(address);
// Insert into account tables.
tx.put::<tables::PlainAccountState>(address, account)?;
tx.put::<tables::HashedAccounts>(hashed_address, account)?;
// Insert into storage tables.
storage.into_iter().filter(|e| !e.value.is_zero()).try_for_each(|entry| {
let hashed_entry = StorageEntry { key: keccak256(entry.key), ..entry };
let mut cursor = tx.cursor_dup_write::<tables::PlainStorageState>()?;
if cursor
.seek_by_key_subkey(address, entry.key)?
.filter(|e| e.key == entry.key)
.is_some()
{
cursor.delete_current()?;
}
cursor.upsert(address, &entry)?;
let mut cursor = tx.cursor_dup_write::<tables::HashedStorages>()?;
if cursor
.seek_by_key_subkey(hashed_address, hashed_entry.key)?
.filter(|e| e.key == hashed_entry.key)
.is_some()
{
cursor.delete_current()?;
}
cursor.upsert(hashed_address, &hashed_entry)?;
Ok(())
})
})
})
}
/// Insert collection of [`ChangeSet`] into corresponding tables.
pub fn insert_changesets<I>(
&self,
changesets: I,
block_offset: Option<u64>,
) -> ProviderResult<()>
where
I: IntoIterator<Item = ChangeSet>,
{
let offset = block_offset.unwrap_or_default();
self.commit(|tx| {
changesets.into_iter().enumerate().try_for_each(|(block, changeset)| {
changeset.into_iter().try_for_each(|(address, old_account, old_storage)| {
let block = offset + block as u64;
// Insert into account changeset.
tx.put::<tables::AccountChangeSets>(
block,
AccountBeforeTx { address, info: Some(old_account) },
)?;
let block_address = (block, address).into();
// Insert into storage changeset.
old_storage.into_iter().try_for_each(|entry| {
Ok(tx.put::<tables::StorageChangeSets>(block_address, entry)?)
})
})
})
})
}
pub fn insert_history<I>(&self, changesets: I, _block_offset: Option<u64>) -> ProviderResult<()>
where
I: IntoIterator<Item = ChangeSet>,
{
let mut accounts = BTreeMap::<Address, Vec<u64>>::new();
let mut storages = BTreeMap::<(Address, B256), Vec<u64>>::new();
for (block, changeset) in changesets.into_iter().enumerate() {
for (address, _, storage_entries) in changeset {
accounts.entry(address).or_default().push(block as u64);
for storage_entry in storage_entries {
storages.entry((address, storage_entry.key)).or_default().push(block as u64);
}
}
}
let provider_rw = self.factory.provider_rw()?;
provider_rw.insert_account_history_index(accounts)?;
provider_rw.insert_storage_history_index(storages)?;
provider_rw.commit()?;
Ok(())
}
}
/// Used to identify where to store data when setting up a test.
#[derive(Debug)]
pub enum StorageKind {
Database(Option<u64>),
Static,
}
impl StorageKind {
#[expect(dead_code)]
const fn is_database(&self) -> bool {
matches!(self, Self::Database(_))
}
const fn is_static(&self) -> bool {
matches!(self, Self::Static)
}
fn tx_offset(&self) -> u64 {
if let Self::Database(offset) = self {
return offset.unwrap_or_default();
}
0
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/stages/src/test_utils/macros.rs | crates/stages/stages/src/test_utils/macros.rs | macro_rules! stage_test_suite {
($runner:ident, $name:ident) => {
paste::item! {
/// Check that the execution is short-circuited if the database is empty.
#[tokio::test]
async fn [< execute_empty_db_ $name>] () {
// Set up the runner
let runner = $runner::default();
// Execute the stage with empty database
let input = reth_stages_api::ExecInput::default();
// Run stage execution
let result = runner.execute(input).await;
reth_provider::StaticFileProviderFactory::static_file_provider(&runner.db().factory).commit().unwrap();
// Check that the result is returned and the stage does not panic.
// The return result with empty db is stage-specific.
assert_matches::assert_matches!(result, Ok(_));
// Validate the stage execution
assert_matches::assert_matches!(
runner.validate_execution(input, result.unwrap().ok()),
Ok(_),
"execution validation"
);
}
// Run the complete stage execution flow.
#[tokio::test]
async fn [< execute_ $name>] () {
let (target, current_checkpoint) = (500, 100);
// Set up the runner
let mut runner = $runner::default();
let input = reth_stages_api::ExecInput {
target: Some(target),
checkpoint: Some(crate::StageCheckpoint::new(current_checkpoint)),
};
let seed = runner.seed_execution(input).expect("failed to seed");
let rx = runner.execute(input);
// Run `after_execution` hook
runner.after_execution(seed).await.expect("failed to run after execution hook");
// Assert the successful result
let result = rx.await.unwrap();
reth_provider::StaticFileProviderFactory::static_file_provider(&runner.db().factory).commit().unwrap();
assert_matches::assert_matches!(
result,
Ok(ExecOutput { done, checkpoint })
if done && checkpoint.block_number == target
);
// Validate the stage execution
assert_matches::assert_matches!(
runner.validate_execution(input, result.ok()),
Ok(_),
"execution validation"
);
}
// Check that unwind does not panic on no new entries within the input range.
#[tokio::test]
async fn [< unwind_no_new_entries_ $name>] () {
// Set up the runner
let mut runner = $runner::default();
let input = reth_stages_api::UnwindInput::default();
// Seed the database
runner.seed_execution(reth_stages_api::ExecInput::default()).expect("failed to seed");
runner.before_unwind(input).expect("failed to execute before_unwind hook");
// Run stage unwind
let rx = runner.unwind(input).await;
reth_provider::StaticFileProviderFactory::static_file_provider(&runner.db().factory).commit().unwrap();
assert_matches::assert_matches!(
rx,
Ok(UnwindOutput { checkpoint }) if checkpoint.block_number == input.unwind_to
);
// Validate the stage unwind
assert_matches::assert_matches!(
runner.validate_unwind(input),
Ok(_),
"unwind validation"
);
}
// Run complete execute and unwind flow.
#[tokio::test]
async fn [< unwind_ $name>] () {
let (target, current_checkpoint) = (500, 100);
// Set up the runner
let mut runner = $runner::default();
let execute_input = reth_stages_api::ExecInput {
target: Some(target),
checkpoint: Some(crate::StageCheckpoint::new(current_checkpoint)),
};
let seed = runner.seed_execution(execute_input).expect("failed to seed");
// Run stage execution
let rx = runner.execute(execute_input);
runner.after_execution(seed).await.expect("failed to run after execution hook");
// Assert the successful execution result
let result = rx.await.unwrap();
reth_provider::StaticFileProviderFactory::static_file_provider(&runner.db().factory).commit().unwrap();
assert_matches::assert_matches!(
result,
Ok(ExecOutput { done, checkpoint })
if done && checkpoint.block_number == target
);
assert_matches::assert_matches!(
runner.validate_execution(execute_input, result.ok()),
Ok(_),
"execution validation"
);
// Run stage unwind
let unwind_input = reth_stages_api::UnwindInput {
unwind_to: current_checkpoint,
checkpoint: crate::StageCheckpoint::new(target),
bad_block: None,
};
runner.before_unwind(unwind_input).expect("Failed to unwind state");
let rx = runner.unwind(unwind_input).await;
// Assert the successful unwind result
assert_matches::assert_matches!(
rx,
Ok(UnwindOutput { checkpoint }) if checkpoint.block_number == unwind_input.unwind_to
);
// Validate the stage unwind
assert_matches::assert_matches!(
runner.validate_unwind(unwind_input),
Ok(_),
"unwind validation"
);
}
}
};
}
// `execute_already_reached_target` is not suitable for the headers stage thus
// included in the test suite extension
macro_rules! stage_test_suite_ext {
($runner:ident, $name:ident) => {
crate::test_utils::stage_test_suite!($runner, $name);
paste::item! {
/// Check that the execution is short-circuited if the target was already reached.
#[tokio::test]
async fn [< execute_already_reached_target_ $name>] () {
let current_checkpoint = 1000;
// Set up the runner
let mut runner = $runner::default();
let input = reth_stages_api::ExecInput {
target: Some(current_checkpoint),
checkpoint: Some(crate::StageCheckpoint::new(current_checkpoint)),
};
let seed = runner.seed_execution(input).expect("failed to seed");
// Run stage execution
let rx = runner.execute(input);
// Run `after_execution` hook
runner.after_execution(seed).await.expect("failed to run after execution hook");
// Assert the successful result
let result = rx.await.unwrap();
reth_provider::StaticFileProviderFactory::static_file_provider(&runner.db().factory).commit().unwrap();
assert_matches::assert_matches!(
result,
Ok(ExecOutput { done, checkpoint })
if done && checkpoint.block_number == current_checkpoint
);
// Validate the stage execution
assert_matches::assert_matches!(
runner.validate_execution(input, result.ok()),
Ok(_),
"execution validation"
);
}
}
};
}
pub(crate) use stage_test_suite;
pub(crate) use stage_test_suite_ext;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/stages/src/test_utils/mod.rs | crates/stages/stages/src/test_utils/mod.rs | use reth_stages_api::StageId;
#[cfg(test)]
mod macros;
#[cfg(test)]
pub(crate) use macros::*;
#[cfg(test)]
mod runner;
#[cfg(test)]
pub(crate) use runner::{
ExecuteStageTestRunner, StageTestRunner, TestRunnerError, UnwindStageTestRunner,
};
mod test_db;
pub use test_db::{StorageKind, TestStageDB};
mod set;
pub use set::TestStages;
/// The test stage id
pub const TEST_STAGE_ID: StageId = StageId::Other("TestStage");
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/stages/src/test_utils/set.rs | crates/stages/stages/src/test_utils/set.rs | use super::TEST_STAGE_ID;
use crate::{StageSet, StageSetBuilder};
use reth_stages_api::{test_utils::TestStage, ExecOutput, StageError, UnwindOutput};
use std::collections::VecDeque;
#[derive(Default, Debug)]
pub struct TestStages {
exec_outputs: VecDeque<Result<ExecOutput, StageError>>,
unwind_outputs: VecDeque<Result<UnwindOutput, StageError>>,
}
impl TestStages {
pub const fn new(
exec_outputs: VecDeque<Result<ExecOutput, StageError>>,
unwind_outputs: VecDeque<Result<UnwindOutput, StageError>>,
) -> Self {
Self { exec_outputs, unwind_outputs }
}
}
impl<Provider> StageSet<Provider> for TestStages {
fn builder(self) -> StageSetBuilder<Provider> {
StageSetBuilder::default().add_stage(
TestStage::new(TEST_STAGE_ID)
.with_exec(self.exec_outputs)
.with_unwind(self.unwind_outputs),
)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/stages/benches/criterion.rs | crates/stages/stages/benches/criterion.rs | #![allow(missing_docs)]
#![allow(unexpected_cfgs)]
use alloy_primitives::BlockNumber;
use criterion::{criterion_main, measurement::WallTime, BenchmarkGroup, Criterion};
use reth_config::config::{EtlConfig, TransactionLookupConfig};
use reth_db::{test_utils::TempDatabase, Database, DatabaseEnv};
use reth_provider::{test_utils::MockNodeTypesWithDB, DatabaseProvider, DatabaseProviderFactory};
use reth_stages::{
stages::{MerkleStage, SenderRecoveryStage, TransactionLookupStage},
test_utils::TestStageDB,
StageCheckpoint,
};
use reth_stages_api::{ExecInput, Stage, StageExt, UnwindInput};
use std::ops::RangeInclusive;
use tokio::runtime::Runtime;
mod setup;
use setup::StageRange;
// Expanded form of `criterion_group!`
//
// This is currently needed to only instantiate the tokio runtime once.
#[cfg(not(codspeed))]
fn benches() {
run_benches(&mut Criterion::default().configure_from_args());
}
fn run_benches(criterion: &mut Criterion) {
let runtime = Runtime::new().unwrap();
let _guard = runtime.enter();
transaction_lookup(criterion, &runtime);
account_hashing(criterion, &runtime);
senders(criterion, &runtime);
merkle(criterion, &runtime);
}
#[cfg(not(codspeed))]
criterion_main!(benches);
#[cfg(codspeed)]
criterion_main!(run_benches);
const DEFAULT_NUM_BLOCKS: u64 = 10_000;
fn account_hashing(c: &mut Criterion, runtime: &Runtime) {
let mut group = c.benchmark_group("Stages");
// don't need to run each stage for that many times
group.sample_size(10);
let num_blocks = 10_000;
let (db, stage, range) = setup::prepare_account_hashing(num_blocks);
measure_stage(
runtime,
&mut group,
&db,
setup::stage_unwind,
stage,
range,
"AccountHashing".to_string(),
);
}
fn senders(c: &mut Criterion, runtime: &Runtime) {
let mut group = c.benchmark_group("Stages");
// don't need to run each stage for that many times
group.sample_size(10);
let db = setup::txs_testdata(DEFAULT_NUM_BLOCKS);
let stage = SenderRecoveryStage { commit_threshold: DEFAULT_NUM_BLOCKS };
measure_stage(
runtime,
&mut group,
&db,
setup::stage_unwind,
stage,
0..=DEFAULT_NUM_BLOCKS,
"SendersRecovery".to_string(),
);
}
fn transaction_lookup(c: &mut Criterion, runtime: &Runtime) {
let mut group = c.benchmark_group("Stages");
// don't need to run each stage for that many times
group.sample_size(10);
let stage = TransactionLookupStage::new(
TransactionLookupConfig { chunk_size: DEFAULT_NUM_BLOCKS },
EtlConfig::default(),
None,
);
let db = setup::txs_testdata(DEFAULT_NUM_BLOCKS);
measure_stage(
runtime,
&mut group,
&db,
setup::stage_unwind,
stage,
0..=DEFAULT_NUM_BLOCKS,
"TransactionLookup".to_string(),
);
}
fn merkle(c: &mut Criterion, runtime: &Runtime) {
let mut group = c.benchmark_group("Stages");
// don't need to run each stage for that many times
group.sample_size(10);
let db = setup::txs_testdata(DEFAULT_NUM_BLOCKS);
let stage = MerkleStage::Both { rebuild_threshold: u64::MAX, incremental_threshold: u64::MAX };
measure_stage(
runtime,
&mut group,
&db,
setup::unwind_hashes,
stage,
1..=DEFAULT_NUM_BLOCKS,
"Merkle-incremental".to_string(),
);
let stage = MerkleStage::Both { rebuild_threshold: 0, incremental_threshold: 0 };
measure_stage(
runtime,
&mut group,
&db,
setup::unwind_hashes,
stage,
1..=DEFAULT_NUM_BLOCKS,
"Merkle-fullhash".to_string(),
);
}
fn measure_stage<F, S>(
runtime: &Runtime,
group: &mut BenchmarkGroup<'_, WallTime>,
db: &TestStageDB,
setup: F,
stage: S,
block_interval: RangeInclusive<BlockNumber>,
label: String,
) where
S: Clone
+ Stage<DatabaseProvider<<TempDatabase<DatabaseEnv> as Database>::TXMut, MockNodeTypesWithDB>>,
F: Fn(S, &TestStageDB, StageRange),
{
let stage_range = (
ExecInput {
target: Some(*block_interval.end()),
checkpoint: Some(StageCheckpoint::new(*block_interval.start())),
},
UnwindInput {
checkpoint: StageCheckpoint::new(*block_interval.end()),
unwind_to: *block_interval.start(),
bad_block: None,
},
);
let (input, _) = stage_range;
group.bench_function(label, move |b| {
b.to_async(runtime).iter_with_setup(
|| {
// criterion setup does not support async, so we have to use our own runtime
setup(stage.clone(), db, stage_range)
},
|_| async {
let mut stage = stage.clone();
let provider = db.factory.database_provider_rw().unwrap();
stage
.execute_ready(input)
.await
.and_then(|_| stage.execute(&provider, input))
.unwrap();
provider.commit().unwrap();
},
)
});
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/stages/benches/setup/mod.rs | crates/stages/stages/benches/setup/mod.rs | #![expect(unreachable_pub)]
use alloy_primitives::{Address, B256, U256};
use itertools::concat;
use reth_db::{test_utils::TempDatabase, Database, DatabaseEnv};
use reth_db_api::{
cursor::DbCursorRO,
tables,
transaction::{DbTx, DbTxMut},
};
use reth_primitives_traits::{Account, SealedBlock, SealedHeader};
use reth_provider::{
test_utils::MockNodeTypesWithDB, DatabaseProvider, DatabaseProviderFactory, TrieWriter,
};
use reth_stages::{
stages::{AccountHashingStage, StorageHashingStage},
test_utils::{StorageKind, TestStageDB},
};
use reth_testing_utils::generators::{
self, random_block_range, random_changeset_range, random_contract_account_range,
random_eoa_accounts, BlockRangeParams,
};
use reth_trie::StateRoot;
use std::{collections::BTreeMap, fs, path::Path};
use tokio::runtime::Handle;
mod constants;
mod account_hashing;
pub use account_hashing::*;
use reth_stages_api::{ExecInput, Stage, UnwindInput};
use reth_trie_db::DatabaseStateRoot;
pub(crate) type StageRange = (ExecInput, UnwindInput);
pub(crate) fn stage_unwind<
S: Clone
+ Stage<DatabaseProvider<<TempDatabase<DatabaseEnv> as Database>::TXMut, MockNodeTypesWithDB>>,
>(
stage: S,
db: &TestStageDB,
range: StageRange,
) {
let (_, unwind) = range;
// NOTE(onbjerg): This is unfortunately needed because Criterion does not support async setup
tokio::task::block_in_place(move || {
Handle::current().block_on(async move {
let mut stage = stage.clone();
let provider = db.factory.provider_rw().unwrap();
// Clear previous run
stage
.unwind(&provider, unwind)
.map_err(|e| {
format!(
"{e}\nMake sure your test database at `{}` isn't too old and incompatible with newer stage changes.",
db.factory.db_ref().path().display()
)
})
.unwrap();
provider.commit().unwrap();
})
});
}
pub(crate) fn unwind_hashes<S>(stage: S, db: &TestStageDB, range: StageRange)
where
S: Clone
+ Stage<DatabaseProvider<<TempDatabase<DatabaseEnv> as Database>::TXMut, MockNodeTypesWithDB>>,
{
let (input, unwind) = range;
let mut stage = stage;
let provider = db.factory.database_provider_rw().unwrap();
StorageHashingStage::default().unwind(&provider, unwind).unwrap();
AccountHashingStage::default().unwind(&provider, unwind).unwrap();
// Clear previous run
stage.unwind(&provider, unwind).unwrap();
AccountHashingStage::default().execute(&provider, input).unwrap();
StorageHashingStage::default().execute(&provider, input).unwrap();
provider.commit().unwrap();
}
// Helper for generating testdata for the benchmarks.
// Returns the path to the database file.
pub(crate) fn txs_testdata(num_blocks: u64) -> TestStageDB {
// This is way too slow.
#[expect(unexpected_cfgs)]
if cfg!(codspeed) {
std::process::exit(0);
}
let txs_range = 100..150;
// number of storage changes per transition
let n_changes = 0..3;
// range of possible values for a storage key
let key_range = 0..300;
// number of accounts
let n_eoa = 131;
let n_contract = 31;
// rng
let mut rng = generators::rng();
let path = Path::new(env!("CARGO_MANIFEST_DIR")).join("testdata").join("txs-bench");
let exists = path.exists();
let db = TestStageDB::new(&path);
if !exists {
// create the dirs
fs::create_dir_all(&path).unwrap();
println!("Transactions testdata not found, generating to {:?}", path.display());
let accounts: BTreeMap<Address, Account> = concat([
random_eoa_accounts(&mut rng, n_eoa),
random_contract_account_range(&mut rng, &mut (0..n_contract)),
])
.into_iter()
.collect();
let mut blocks = random_block_range(
&mut rng,
0..=num_blocks,
BlockRangeParams {
parent: Some(B256::ZERO),
tx_count: txs_range,
..Default::default()
},
);
let (transitions, start_state) = random_changeset_range(
&mut rng,
blocks.iter().take(2),
accounts.into_iter().map(|(addr, acc)| (addr, (acc, Vec::new()))),
n_changes.clone(),
key_range.clone(),
);
db.insert_accounts_and_storages(start_state.clone()).unwrap();
// make first block after genesis have valid state root
let (root, updates) = StateRoot::from_tx(db.factory.provider_rw().unwrap().tx_ref())
.root_with_updates()
.unwrap();
let second_block = blocks.get_mut(1).unwrap();
let cloned_second = second_block.clone();
let mut updated_header = cloned_second.header().clone();
updated_header.state_root = root;
*second_block = SealedBlock::from_sealed_parts(
SealedHeader::seal_slow(updated_header),
cloned_second.into_body(),
);
let offset = transitions.len() as u64;
db.insert_changesets(transitions, None).unwrap();
let provider_rw = db.factory.provider_rw().unwrap();
provider_rw.write_trie_updates(&updates).unwrap();
provider_rw.commit().unwrap();
let (transitions, final_state) = random_changeset_range(
&mut rng,
blocks.iter().skip(2),
start_state,
n_changes,
key_range,
);
db.insert_changesets(transitions, Some(offset)).unwrap();
db.insert_accounts_and_storages(final_state).unwrap();
// make last block have valid state root
let root = {
let tx_mut = db.factory.provider_rw().unwrap();
let root = StateRoot::from_tx(tx_mut.tx_ref()).root().unwrap();
tx_mut.commit().unwrap();
root
};
let last_block = blocks.last_mut().unwrap();
let cloned_last = last_block.clone();
let mut updated_header = cloned_last.header().clone();
updated_header.state_root = root;
*last_block = SealedBlock::from_sealed_parts(
SealedHeader::seal_slow(updated_header),
cloned_last.into_body(),
);
db.insert_blocks(blocks.iter(), StorageKind::Static).unwrap();
// initialize TD
db.commit(|tx| {
let (head, _) = tx.cursor_read::<tables::Headers>()?.first()?.unwrap_or_default();
Ok(tx.put::<tables::HeaderTerminalDifficulties>(head, U256::from(0).into())?)
})
.unwrap();
}
db
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/stages/benches/setup/constants.rs | crates/stages/stages/benches/setup/constants.rs | #![allow(unreachable_pub)]
pub const ACCOUNT_HASHING_DB: &str = "ACCOUNT_HASHING_DB";
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/stages/benches/setup/account_hashing.rs | crates/stages/stages/benches/setup/account_hashing.rs | #![allow(unreachable_pub)]
use super::constants;
use alloy_primitives::BlockNumber;
use reth_db_api::{
cursor::DbCursorRO, database::Database, tables, transaction::DbTx, DatabaseError as DbError,
};
use reth_stages::{
stages::{AccountHashingStage, SeedOpts},
test_utils::TestStageDB,
StageCheckpoint,
};
use std::{fs, ops::RangeInclusive, path::Path};
/// Prepares a database for [`AccountHashingStage`]
/// If the environment variable [`constants::ACCOUNT_HASHING_DB`] is set, it will use that one and
/// will get the stage execution range from [`tables::BlockBodyIndices`]. Otherwise, it will
/// generate its own random data.
///
/// Returns the path to the database file, stage and range of stage execution if it exists.
pub fn prepare_account_hashing(
num_blocks: u64,
) -> (TestStageDB, AccountHashingStage, RangeInclusive<BlockNumber>) {
let (db, stage_range) = match std::env::var(constants::ACCOUNT_HASHING_DB) {
Ok(db) => {
let path = Path::new(&db).to_path_buf();
let range = find_stage_range(&path);
(TestStageDB::new(&path), range)
}
Err(_) => generate_testdata_db(num_blocks),
};
(db, AccountHashingStage::default(), stage_range)
}
fn find_stage_range(db: &Path) -> RangeInclusive<BlockNumber> {
let mut stage_range = None;
TestStageDB::new(db)
.factory
.db_ref()
.view(|tx| {
let mut cursor = tx.cursor_read::<tables::BlockBodyIndices>()?;
let from = cursor.first()?.unwrap().0;
let to = StageCheckpoint::new(cursor.last()?.unwrap().0);
stage_range = Some(from..=to.block_number);
Ok::<(), DbError>(())
})
.unwrap()
.unwrap();
stage_range.expect("Could not find the stage range from the external DB.")
}
fn generate_testdata_db(num_blocks: u64) -> (TestStageDB, RangeInclusive<BlockNumber>) {
let opts = SeedOpts { blocks: 0..=num_blocks, accounts: 100_000, txs: 100..150 };
let path = Path::new(env!("CARGO_MANIFEST_DIR")).join("testdata").join("account-hashing-bench");
let exists = path.exists();
let db = TestStageDB::new(&path);
if !exists {
// create the dirs
fs::create_dir_all(&path).unwrap();
println!("Account Hashing testdata not found, generating to {:?}", path.display());
let provider = db.factory.provider_rw().unwrap();
let _accounts = AccountHashingStage::seed(&provider, opts.clone());
provider.commit().expect("failed to commit");
}
(db, opts.blocks)
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/api/src/stage.rs | crates/stages/api/src/stage.rs | use crate::{error::StageError, StageCheckpoint, StageId};
use alloy_primitives::{BlockNumber, TxNumber};
use reth_provider::{BlockReader, ProviderError};
use std::{
cmp::{max, min},
future::{poll_fn, Future},
ops::{Range, RangeInclusive},
task::{Context, Poll},
};
/// Stage execution input, see [`Stage::execute`].
#[derive(Debug, Default, PartialEq, Eq, Clone, Copy)]
pub struct ExecInput {
/// The target block number the stage needs to execute towards.
pub target: Option<BlockNumber>,
/// The checkpoint of this stage the last time it was executed.
pub checkpoint: Option<StageCheckpoint>,
}
impl ExecInput {
/// Return the checkpoint of the stage or default.
pub fn checkpoint(&self) -> StageCheckpoint {
self.checkpoint.unwrap_or_default()
}
/// Return the next block number after the current
/// +1 is needed to skip the present block and always start from block number 1, not 0.
pub fn next_block(&self) -> BlockNumber {
let current_block = self.checkpoint();
current_block.block_number + 1
}
/// Returns `true` if the target block number has already been reached.
pub fn target_reached(&self) -> bool {
self.checkpoint().block_number >= self.target()
}
/// Return the target block number or default.
pub fn target(&self) -> BlockNumber {
self.target.unwrap_or_default()
}
/// Return next block range that needs to be executed.
pub fn next_block_range(&self) -> RangeInclusive<BlockNumber> {
let (range, _) = self.next_block_range_with_threshold(u64::MAX);
range
}
/// Return true if this is the first block range to execute.
pub const fn is_first_range(&self) -> bool {
self.checkpoint.is_none()
}
/// Return the next block range to execute.
/// Return pair of the block range and if this is final block range.
pub fn next_block_range_with_threshold(
&self,
threshold: u64,
) -> (RangeInclusive<BlockNumber>, bool) {
let current_block = self.checkpoint();
let start = current_block.block_number + 1;
let target = self.target();
let end = min(target, current_block.block_number.saturating_add(threshold));
let is_final_range = end == target;
(start..=end, is_final_range)
}
/// Return the next block range determined the number of transactions within it.
/// This function walks the block indices until either the end of the range is reached or
/// the number of transactions exceeds the threshold.
pub fn next_block_range_with_transaction_threshold<Provider>(
&self,
provider: &Provider,
tx_threshold: u64,
) -> Result<(Range<TxNumber>, RangeInclusive<BlockNumber>, bool), StageError>
where
Provider: BlockReader,
{
let start_block = self.next_block();
let target_block = self.target();
let start_block_body = provider
.block_body_indices(start_block)?
.ok_or(ProviderError::BlockBodyIndicesNotFound(start_block))?;
let first_tx_num = start_block_body.first_tx_num();
let target_block_body = provider
.block_body_indices(target_block)?
.ok_or(ProviderError::BlockBodyIndicesNotFound(target_block))?;
// number of transactions left to execute.
let all_tx_cnt = target_block_body.next_tx_num() - first_tx_num;
if all_tx_cnt == 0 {
// if there is no more transaction return back.
return Ok((first_tx_num..first_tx_num, start_block..=target_block, true))
}
// get block of this tx
let (end_block, is_final_range, next_tx_num) = if all_tx_cnt <= tx_threshold {
(target_block, true, target_block_body.next_tx_num())
} else {
// get tx block number. next_tx_num in this case will be less than all_tx_cnt.
// So we are sure that transaction must exist.
let end_block_number = provider
.transaction_block(first_tx_num + tx_threshold)?
.expect("block of tx must exist");
// we want to get range of all transactions of this block, so we are fetching block
// body.
let end_block_body = provider
.block_body_indices(end_block_number)?
.ok_or(ProviderError::BlockBodyIndicesNotFound(end_block_number))?;
(end_block_number, false, end_block_body.next_tx_num())
};
let tx_range = first_tx_num..next_tx_num;
Ok((tx_range, start_block..=end_block, is_final_range))
}
}
/// Stage unwind input, see [`Stage::unwind`].
#[derive(Debug, Default, PartialEq, Eq, Clone, Copy)]
pub struct UnwindInput {
/// The current highest checkpoint of the stage.
pub checkpoint: StageCheckpoint,
/// The block to unwind to.
pub unwind_to: BlockNumber,
/// The bad block that caused the unwind, if any.
pub bad_block: Option<BlockNumber>,
}
impl UnwindInput {
/// Return next block range that needs to be unwound.
pub fn unwind_block_range(&self) -> RangeInclusive<BlockNumber> {
self.unwind_block_range_with_threshold(u64::MAX).0
}
/// Return the next block range to unwind and the block we're unwinding to.
pub fn unwind_block_range_with_threshold(
&self,
threshold: u64,
) -> (RangeInclusive<BlockNumber>, BlockNumber, bool) {
// +1 is to skip the block we're unwinding to
let mut start = self.unwind_to + 1;
let end = self.checkpoint;
start = max(start, end.block_number.saturating_sub(threshold));
let unwind_to = start - 1;
let is_final_range = unwind_to == self.unwind_to;
(start..=end.block_number, unwind_to, is_final_range)
}
}
/// The output of a stage execution.
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct ExecOutput {
/// How far the stage got.
pub checkpoint: StageCheckpoint,
/// Whether or not the stage is done.
pub done: bool,
}
impl ExecOutput {
/// Mark the stage as not done, checkpointing at the given place.
pub const fn in_progress(checkpoint: StageCheckpoint) -> Self {
Self { checkpoint, done: false }
}
/// Mark the stage as done, checkpointing at the given place.
pub const fn done(checkpoint: StageCheckpoint) -> Self {
Self { checkpoint, done: true }
}
}
/// The output of a stage unwinding.
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct UnwindOutput {
/// The checkpoint at which the stage has unwound to.
pub checkpoint: StageCheckpoint,
}
/// A stage is a segmented part of the syncing process of the node.
///
/// Each stage takes care of a well-defined task, such as downloading headers or executing
/// transactions, and persist their results to a database.
///
/// Stages must have a unique [ID][StageId] and implement a way to "roll forwards"
/// ([`Stage::execute`]) and a way to "roll back" ([`Stage::unwind`]).
///
/// Stages are executed as part of a pipeline where they are executed serially.
///
/// Stages receive [`DBProvider`](reth_provider::DBProvider).
#[auto_impl::auto_impl(Box)]
pub trait Stage<Provider>: Send + Sync {
/// Get the ID of the stage.
///
/// Stage IDs must be unique.
fn id(&self) -> StageId;
/// Returns `Poll::Ready(Ok(()))` when the stage is ready to execute the given range.
///
/// This method is heavily inspired by [tower](https://crates.io/crates/tower)'s `Service` trait.
/// Any asynchronous tasks or communication should be handled in `poll_execute_ready`, e.g.
/// moving downloaded items from downloaders to an internal buffer in the stage.
///
/// If the stage has any pending external state, then `Poll::Pending` is returned.
///
/// If `Poll::Ready(Err(_))` is returned, the stage may not be able to execute anymore
/// depending on the specific error. In that case, an unwind must be issued instead.
///
/// Once `Poll::Ready(Ok(()))` is returned, the stage may be executed once using `execute`.
/// Until the stage has been executed, repeated calls to `poll_execute_ready` must return either
/// `Poll::Ready(Ok(()))` or `Poll::Ready(Err(_))`.
///
/// Note that `poll_execute_ready` may reserve shared resources that are consumed in a
/// subsequent call of `execute`, e.g. internal buffers. It is crucial for implementations
/// to not assume that `execute` will always be invoked and to ensure that those resources
/// are appropriately released if the stage is dropped before `execute` is called.
///
/// For the same reason, it is also important that any shared resources do not exhibit
/// unbounded growth on repeated calls to `poll_execute_ready`.
///
/// Unwinds may happen without consulting `poll_execute_ready` first.
fn poll_execute_ready(
&mut self,
_cx: &mut Context<'_>,
_input: ExecInput,
) -> Poll<Result<(), StageError>> {
Poll::Ready(Ok(()))
}
/// Execute the stage.
/// It is expected that the stage will write all necessary data to the database
/// upon invoking this method.
fn execute(&mut self, provider: &Provider, input: ExecInput) -> Result<ExecOutput, StageError>;
/// Post execution commit hook.
///
/// This is called after the stage has been executed and the data has been committed by the
/// provider. The stage may want to pass some data from [`Self::execute`] via the internal
/// field.
fn post_execute_commit(&mut self) -> Result<(), StageError> {
Ok(())
}
/// Unwind the stage.
fn unwind(
&mut self,
provider: &Provider,
input: UnwindInput,
) -> Result<UnwindOutput, StageError>;
/// Post unwind commit hook.
///
/// This is called after the stage has been unwound and the data has been committed by the
/// provider. The stage may want to pass some data from [`Self::unwind`] via the internal
/// field.
fn post_unwind_commit(&mut self) -> Result<(), StageError> {
Ok(())
}
}
/// [Stage] trait extension.
pub trait StageExt<Provider>: Stage<Provider> {
/// Utility extension for the `Stage` trait that invokes `Stage::poll_execute_ready`
/// with [`poll_fn`] context. For more information see [`Stage::poll_execute_ready`].
fn execute_ready(
&mut self,
input: ExecInput,
) -> impl Future<Output = Result<(), StageError>> + Send {
poll_fn(move |cx| self.poll_execute_ready(cx, input))
}
}
impl<Provider, S: Stage<Provider> + ?Sized> StageExt<Provider> for S {}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/api/src/lib.rs | crates/stages/api/src/lib.rs | //! Staged syncing primitives for reth.
//!
//! ## Feature Flags
//!
//! - `test-utils`: Utilities for testing
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
mod error;
mod metrics;
mod pipeline;
mod stage;
#[cfg(any(test, feature = "test-utils"))]
pub mod test_utils;
mod util;
pub use crate::metrics::*;
pub use error::*;
pub use pipeline::*;
pub use stage::*;
use aquamarine as _;
// re-export the stages types for convenience
pub use reth_stages_types::*;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/api/src/test_utils.rs | crates/stages/api/src/test_utils.rs | #![allow(missing_docs)]
use crate::{ExecInput, ExecOutput, Stage, StageError, StageId, UnwindInput, UnwindOutput};
use std::{
collections::VecDeque,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
};
/// A test stage that can be used for testing.
///
/// This can be used to mock expected outputs of [`Stage::execute`] and [`Stage::unwind`]
#[derive(Debug)]
pub struct TestStage {
id: StageId,
exec_outputs: VecDeque<Result<ExecOutput, StageError>>,
unwind_outputs: VecDeque<Result<UnwindOutput, StageError>>,
post_execute_commit_counter: Arc<AtomicUsize>,
post_unwind_commit_counter: Arc<AtomicUsize>,
}
impl TestStage {
pub fn new(id: StageId) -> Self {
Self {
id,
exec_outputs: VecDeque::new(),
unwind_outputs: VecDeque::new(),
post_execute_commit_counter: Arc::new(AtomicUsize::new(0)),
post_unwind_commit_counter: Arc::new(AtomicUsize::new(0)),
}
}
pub fn with_exec(mut self, exec_outputs: VecDeque<Result<ExecOutput, StageError>>) -> Self {
self.exec_outputs = exec_outputs;
self
}
pub fn with_unwind(
mut self,
unwind_outputs: VecDeque<Result<UnwindOutput, StageError>>,
) -> Self {
self.unwind_outputs = unwind_outputs;
self
}
pub fn add_exec(mut self, output: Result<ExecOutput, StageError>) -> Self {
self.exec_outputs.push_back(output);
self
}
pub fn add_unwind(mut self, output: Result<UnwindOutput, StageError>) -> Self {
self.unwind_outputs.push_back(output);
self
}
pub fn with_post_execute_commit_counter(mut self) -> (Self, Arc<AtomicUsize>) {
let counter = Arc::new(AtomicUsize::new(0));
self.post_execute_commit_counter = counter.clone();
(self, counter)
}
pub fn with_post_unwind_commit_counter(mut self) -> (Self, Arc<AtomicUsize>) {
let counter = Arc::new(AtomicUsize::new(0));
self.post_unwind_commit_counter = counter.clone();
(self, counter)
}
}
impl<Provider> Stage<Provider> for TestStage {
fn id(&self) -> StageId {
self.id
}
fn execute(&mut self, _: &Provider, _input: ExecInput) -> Result<ExecOutput, StageError> {
self.exec_outputs
.pop_front()
.unwrap_or_else(|| panic!("Test stage {} executed too many times.", self.id))
}
fn post_execute_commit(&mut self) -> Result<(), StageError> {
self.post_execute_commit_counter.fetch_add(1, Ordering::Relaxed);
Ok(())
}
fn unwind(&mut self, _: &Provider, _input: UnwindInput) -> Result<UnwindOutput, StageError> {
self.unwind_outputs
.pop_front()
.unwrap_or_else(|| panic!("Test stage {} unwound too many times.", self.id))
}
fn post_unwind_commit(&mut self) -> Result<(), StageError> {
self.post_unwind_commit_counter.fetch_add(1, Ordering::Relaxed);
Ok(())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/api/src/error.rs | crates/stages/api/src/error.rs | use crate::PipelineEvent;
use alloy_eips::eip1898::BlockWithParent;
use reth_consensus::ConsensusError;
use reth_errors::{BlockExecutionError, DatabaseError, RethError};
use reth_network_p2p::error::DownloadError;
use reth_provider::ProviderError;
use reth_prune::{PruneSegment, PruneSegmentError, PrunerError, UnwindTargetPrunedError};
use reth_static_file_types::StaticFileSegment;
use thiserror::Error;
use tokio::sync::broadcast::error::SendError;
/// Represents the specific error type within a block error.
#[derive(Error, Debug)]
pub enum BlockErrorKind {
/// The block encountered a validation error.
#[error("validation error: {0}")]
Validation(#[from] ConsensusError),
/// The block encountered an execution error.
#[error("execution error: {0}")]
Execution(#[from] BlockExecutionError),
}
impl BlockErrorKind {
/// Returns `true` if the error is a state root error.
pub const fn is_state_root_error(&self) -> bool {
matches!(self, Self::Validation(err) if err.is_state_root_error())
}
}
/// A stage execution error.
#[derive(Error, Debug)]
pub enum StageError {
/// The stage encountered an error related to a block.
#[error("stage encountered an error in block #{number}: {error}", number = block.block.number)]
Block {
/// The block that caused the error.
block: Box<BlockWithParent>,
/// The specific error type, either consensus or execution error.
#[source]
error: BlockErrorKind,
},
/// The stage encountered a downloader error where the responses cannot be attached to the
/// current head.
#[error(
"stage encountered inconsistent chain: \
downloaded header #{header_number} ({header_hash}) is detached from \
local head #{head_number} ({head_hash}): {error}",
header_number = header.block.number,
header_hash = header.block.hash,
head_number = local_head.block.number,
head_hash = local_head.block.hash,
)]
DetachedHead {
/// The local head we attempted to attach to.
local_head: Box<BlockWithParent>,
/// The header we attempted to attach.
header: Box<BlockWithParent>,
/// The error that occurred when attempting to attach the header.
#[source]
error: Box<ConsensusError>,
},
/// The headers stage is missing sync gap.
#[error("missing sync gap")]
MissingSyncGap,
/// The stage encountered a database error.
#[error("internal database error occurred: {0}")]
Database(#[from] DatabaseError),
/// Invalid pruning configuration
#[error(transparent)]
PruningConfiguration(#[from] PruneSegmentError),
/// Pruner error
#[error(transparent)]
Pruner(#[from] PrunerError),
/// Invalid checkpoint passed to the stage
#[error("invalid stage checkpoint: {0}")]
StageCheckpoint(u64),
/// Missing download buffer on stage execution.
/// Returned if stage execution was called without polling for readiness.
#[error("missing download buffer")]
MissingDownloadBuffer,
/// Download channel closed
#[error("download channel closed")]
ChannelClosed,
/// The stage encountered a database integrity error.
#[error("database integrity error occurred: {0}")]
DatabaseIntegrity(#[from] ProviderError),
/// Invalid download response. Applicable for stages which
/// rely on external downloaders
#[error("invalid download response: {0}")]
Download(#[from] DownloadError),
/// Database is ahead of static file data.
#[error("missing static file data for block number: {number}", number = block.block.number)]
MissingStaticFileData {
/// Starting block with missing data.
block: Box<BlockWithParent>,
/// Static File segment
segment: StaticFileSegment,
},
/// The prune checkpoint for the given segment is missing.
#[error("missing prune checkpoint for {0}")]
MissingPruneCheckpoint(PruneSegment),
/// Post Execute Commit error
#[error("post execute commit error occurred: {_0}")]
PostExecuteCommit(&'static str),
/// Internal error
#[error(transparent)]
Internal(#[from] RethError),
/// The stage encountered a recoverable error.
///
/// These types of errors are caught by the [Pipeline][crate::Pipeline] and trigger a restart
/// of the stage.
#[error(transparent)]
Recoverable(Box<dyn core::error::Error + Send + Sync>),
/// The stage encountered a fatal error.
///
/// These types of errors stop the pipeline.
#[error(transparent)]
Fatal(Box<dyn core::error::Error + Send + Sync>),
}
impl StageError {
/// If the error is fatal the pipeline will stop.
pub const fn is_fatal(&self) -> bool {
matches!(
self,
Self::Database(_) |
Self::Download(_) |
Self::DatabaseIntegrity(_) |
Self::StageCheckpoint(_) |
Self::MissingDownloadBuffer |
Self::MissingSyncGap |
Self::ChannelClosed |
Self::Internal(_) |
Self::Fatal(_)
)
}
}
impl From<std::io::Error> for StageError {
fn from(source: std::io::Error) -> Self {
Self::Fatal(Box::new(source))
}
}
/// A pipeline execution error.
#[derive(Error, Debug)]
pub enum PipelineError {
/// The pipeline encountered an irrecoverable error in one of the stages.
#[error(transparent)]
Stage(#[from] StageError),
/// The pipeline encountered a database error.
#[error(transparent)]
Database(#[from] DatabaseError),
/// Provider error.
#[error(transparent)]
Provider(#[from] ProviderError),
/// The pipeline encountered an error while trying to send an event.
#[error("pipeline encountered an error while trying to send an event")]
Channel(#[from] Box<SendError<PipelineEvent>>),
/// Internal error
#[error(transparent)]
Internal(#[from] RethError),
/// The pipeline encountered an unwind when `fail_on_unwind` was set to `true`.
#[error("unexpected unwind")]
UnexpectedUnwind,
/// Unwind target pruned error.
#[error(transparent)]
UnwindTargetPruned(#[from] UnwindTargetPrunedError),
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/api/src/util.rs | crates/stages/api/src/util.rs | pub(crate) mod opt {
/// Get an [Option] with the maximum value, compared between the passed in value and the inner
/// value of the [Option]. If the [Option] is `None`, then an option containing the passed in
/// value will be returned.
pub(crate) fn max<T: Ord + Copy>(a: Option<T>, b: T) -> Option<T> {
a.map_or(Some(b), |v| Some(std::cmp::max(v, b)))
}
/// Get an [Option] with the minimum value, compared between the passed in value and the inner
/// value of the [Option]. If the [Option] is `None`, then an option containing the passed in
/// value will be returned.
pub(crate) fn min<T: Ord + Copy>(a: Option<T>, b: T) -> Option<T> {
a.map_or(Some(b), |v| Some(std::cmp::min(v, b)))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn opt_max() {
assert_eq!(max(None, 5), Some(5));
assert_eq!(max(Some(1), 5), Some(5));
assert_eq!(max(Some(10), 5), Some(10));
}
#[test]
fn opt_min() {
assert_eq!(min(None, 5), Some(5));
assert_eq!(min(Some(1), 5), Some(1));
assert_eq!(min(Some(10), 5), Some(5));
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/api/src/metrics/listener.rs | crates/stages/api/src/metrics/listener.rs | use crate::{metrics::SyncMetrics, StageCheckpoint, StageId};
use alloy_primitives::BlockNumber;
use std::{
future::Future,
pin::Pin,
task::{ready, Context, Poll},
time::Duration,
};
use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender};
use tracing::trace;
/// Alias type for metric producers to use.
pub type MetricEventsSender = UnboundedSender<MetricEvent>;
/// Collection of metric events.
#[derive(Clone, Copy, Debug)]
pub enum MetricEvent {
/// Sync reached new height. All stage checkpoints are updated.
SyncHeight {
/// Maximum height measured in block number that sync reached.
height: BlockNumber,
},
/// Stage reached new checkpoint.
StageCheckpoint {
/// Stage ID.
stage_id: StageId,
/// Stage checkpoint.
checkpoint: StageCheckpoint,
/// Maximum known block number reachable by this stage.
/// If specified, `entities_total` metric is updated.
max_block_number: Option<BlockNumber>,
/// The duration of stage iteration including database commit.
elapsed: Duration,
},
}
/// Metrics routine that listens to new metric events on the `events_rx` receiver.
/// Upon receiving new event, related metrics are updated.
#[derive(Debug)]
pub struct MetricsListener {
events_rx: UnboundedReceiver<MetricEvent>,
pub(crate) sync_metrics: SyncMetrics,
}
impl MetricsListener {
/// Creates a new [`MetricsListener`] with the provided receiver of [`MetricEvent`].
pub fn new(events_rx: UnboundedReceiver<MetricEvent>) -> Self {
Self { events_rx, sync_metrics: SyncMetrics::default() }
}
fn handle_event(&mut self, event: MetricEvent) {
trace!(target: "sync::metrics", ?event, "Metric event received");
match event {
MetricEvent::SyncHeight { height } => {
for stage_id in StageId::ALL {
self.handle_event(MetricEvent::StageCheckpoint {
stage_id,
checkpoint: StageCheckpoint {
block_number: height,
stage_checkpoint: None,
},
max_block_number: Some(height),
elapsed: Duration::default(),
});
}
}
MetricEvent::StageCheckpoint { stage_id, checkpoint, max_block_number, elapsed } => {
let stage_metrics = self.sync_metrics.get_stage_metrics(stage_id);
stage_metrics.total_elapsed.increment(elapsed.as_secs_f64());
stage_metrics.checkpoint.set(checkpoint.block_number as f64);
let (processed, total) = match checkpoint.entities() {
Some(entities) => (entities.processed, Some(entities.total)),
None => (checkpoint.block_number, max_block_number),
};
stage_metrics.entities_processed.set(processed as f64);
if let Some(total) = total {
stage_metrics.entities_total.set(total as f64);
}
}
}
}
}
impl Future for MetricsListener {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
// Loop until we drain the `events_rx` channel
loop {
let Some(event) = ready!(this.events_rx.poll_recv(cx)) else {
// Channel has closed
return Poll::Ready(())
};
this.handle_event(event);
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/api/src/metrics/mod.rs | crates/stages/api/src/metrics/mod.rs | mod listener;
mod sync_metrics;
pub use listener::{MetricEvent, MetricEventsSender, MetricsListener};
use sync_metrics::*;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/api/src/metrics/sync_metrics.rs | crates/stages/api/src/metrics/sync_metrics.rs | use crate::StageId;
use reth_metrics::{metrics::Gauge, Metrics};
use std::collections::HashMap;
#[derive(Debug, Default)]
pub(crate) struct SyncMetrics {
/// Stage metrics by stage.
pub(crate) stages: HashMap<StageId, StageMetrics>,
}
impl SyncMetrics {
/// Returns existing or initializes a new instance of [`StageMetrics`] for the provided
/// [`StageId`].
pub(crate) fn get_stage_metrics(&mut self, stage_id: StageId) -> &mut StageMetrics {
self.stages
.entry(stage_id)
.or_insert_with(|| StageMetrics::new_with_labels(&[("stage", stage_id.to_string())]))
}
}
#[derive(Metrics)]
#[metrics(scope = "sync")]
pub(crate) struct StageMetrics {
/// The block number of the last commit for a stage.
pub(crate) checkpoint: Gauge,
/// The number of processed entities of the last commit for a stage, if applicable.
pub(crate) entities_processed: Gauge,
/// The number of total entities of the last commit for a stage, if applicable.
pub(crate) entities_total: Gauge,
/// The number of seconds spent executing the stage and committing the data.
pub(crate) total_elapsed: Gauge,
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/api/src/pipeline/ctrl.rs | crates/stages/api/src/pipeline/ctrl.rs | use alloy_eips::eip1898::BlockWithParent;
use alloy_primitives::BlockNumber;
/// Determines the control flow during pipeline execution.
///
/// See [`Pipeline::run_loop`](crate::Pipeline::run_loop) for more information.
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum ControlFlow {
/// An unwind was requested and must be performed before continuing.
Unwind {
/// The block to unwind to.
///
/// This marks the highest block to which the stage should unwind to.
/// For example, unwinding to block 10, should remove all data for blocks above 10 (>=11).
target: BlockNumber,
/// The block that caused the unwind.
bad_block: Box<BlockWithParent>,
},
/// The pipeline made progress.
Continue {
/// Block number reached by the stage.
block_number: BlockNumber,
},
/// Pipeline made no progress
NoProgress {
/// Block number reached by the stage.
block_number: Option<BlockNumber>,
},
}
impl ControlFlow {
/// Whether the pipeline should continue executing stages.
pub const fn should_continue(&self) -> bool {
matches!(self, Self::Continue { .. } | Self::NoProgress { .. })
}
/// Returns true if the control flow is unwind.
pub const fn is_unwind(&self) -> bool {
matches!(self, Self::Unwind { .. })
}
/// Returns the pipeline block number the stage reached, if the state is not `Unwind`.
pub const fn block_number(&self) -> Option<BlockNumber> {
match self {
Self::Unwind { .. } => None,
Self::Continue { block_number } => Some(*block_number),
Self::NoProgress { block_number } => *block_number,
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/api/src/pipeline/event.rs | crates/stages/api/src/pipeline/event.rs | use crate::{
stage::{ExecOutput, UnwindInput, UnwindOutput},
StageCheckpoint, StageId,
};
use alloy_primitives::BlockNumber;
use std::fmt::{Display, Formatter};
/// An event emitted by a [Pipeline][crate::Pipeline].
///
/// It is possible for multiple of these events to be emitted over the duration of a pipeline's
/// execution since:
///
/// - Other stages may ask the pipeline to unwind
/// - The pipeline will loop indefinitely unless a target block is set
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum PipelineEvent {
/// Emitted when a stage is about to be prepared for a run.
Prepare {
/// Pipeline stages progress.
pipeline_stages_progress: PipelineStagesProgress,
/// The stage that is about to be run.
stage_id: StageId,
/// The previous checkpoint of the stage.
checkpoint: Option<StageCheckpoint>,
/// The block number up to which the stage is running, if known.
target: Option<BlockNumber>,
},
/// Emitted when a stage is about to be run.
Run {
/// Pipeline stages progress.
pipeline_stages_progress: PipelineStagesProgress,
/// The stage that is about to be run.
stage_id: StageId,
/// The previous checkpoint of the stage.
checkpoint: Option<StageCheckpoint>,
/// The block number up to which the stage is running, if known.
target: Option<BlockNumber>,
},
/// Emitted when a stage has run a single time.
Ran {
/// Pipeline stages progress.
pipeline_stages_progress: PipelineStagesProgress,
/// The stage that was run.
stage_id: StageId,
/// The result of executing the stage.
result: ExecOutput,
},
/// Emitted when a stage is about to be unwound.
Unwind {
/// The stage that is about to be unwound.
stage_id: StageId,
/// The unwind parameters.
input: UnwindInput,
},
/// Emitted when a stage has been unwound.
Unwound {
/// The stage that was unwound.
stage_id: StageId,
/// The result of unwinding the stage.
result: UnwindOutput,
},
/// Emitted when a stage encounters an error either during execution or unwinding.
Error {
/// The stage that encountered an error.
stage_id: StageId,
},
/// Emitted when a stage was skipped due to it's run conditions not being met:
///
/// - The stage might have progressed beyond the point of our target block
/// - The stage might not need to be unwound since it has not progressed past the unwind target
/// - The stage requires that the pipeline has reached the tip, but it has not done so yet
Skipped {
/// The stage that was skipped.
stage_id: StageId,
},
}
/// Pipeline stages progress.
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct PipelineStagesProgress {
/// 1-indexed ID of the stage that is about to be run out of total stages in the pipeline.
pub current: usize,
/// Total number of stages in the pipeline.
pub total: usize,
}
impl Display for PipelineStagesProgress {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "{}/{}", self.current, self.total)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/api/src/pipeline/builder.rs | crates/stages/api/src/pipeline/builder.rs | use crate::{pipeline::BoxedStage, MetricEventsSender, Pipeline, Stage, StageId, StageSet};
use alloy_primitives::{BlockNumber, B256};
use reth_provider::{providers::ProviderNodeTypes, DatabaseProviderFactory, ProviderFactory};
use reth_static_file::StaticFileProducer;
use tokio::sync::watch;
/// Builds a [`Pipeline`].
#[must_use = "call `build` to construct the pipeline"]
pub struct PipelineBuilder<Provider> {
/// All configured stages in the order they will be executed.
stages: Vec<BoxedStage<Provider>>,
/// The maximum block number to sync to.
max_block: Option<BlockNumber>,
/// A receiver for the current chain tip to sync to.
tip_tx: Option<watch::Sender<B256>>,
metrics_tx: Option<MetricEventsSender>,
fail_on_unwind: bool,
}
impl<Provider> PipelineBuilder<Provider> {
/// Add a stage to the pipeline.
pub fn add_stage<S>(mut self, stage: S) -> Self
where
S: Stage<Provider> + 'static,
{
self.stages.push(Box::new(stage));
self
}
/// Add a set of stages to the pipeline.
///
/// Stages can be grouped into a set by using a [`StageSet`].
///
/// To customize the stages in the set (reorder, disable, insert a stage) call
/// [`builder`][StageSet::builder] on the set which will convert it to a
/// [`StageSetBuilder`][crate::StageSetBuilder].
pub fn add_stages<Set: StageSet<Provider>>(mut self, set: Set) -> Self {
let states = set.builder().build();
self.stages.reserve_exact(states.len());
for stage in states {
self.stages.push(stage);
}
self
}
/// Set the target block.
///
/// Once this block is reached, the pipeline will stop.
pub const fn with_max_block(mut self, block: BlockNumber) -> Self {
self.max_block = Some(block);
self
}
/// Set the tip sender.
pub fn with_tip_sender(mut self, tip_tx: watch::Sender<B256>) -> Self {
self.tip_tx = Some(tip_tx);
self
}
/// Set the metric events sender.
pub fn with_metrics_tx(mut self, metrics_tx: MetricEventsSender) -> Self {
self.metrics_tx = Some(metrics_tx);
self
}
/// Set whether pipeline should fail on unwind.
pub const fn with_fail_on_unwind(mut self, yes: bool) -> Self {
self.fail_on_unwind = yes;
self
}
/// Builds the final [`Pipeline`] using the given database.
pub fn build<N>(
self,
provider_factory: ProviderFactory<N>,
static_file_producer: StaticFileProducer<ProviderFactory<N>>,
) -> Pipeline<N>
where
N: ProviderNodeTypes,
ProviderFactory<N>: DatabaseProviderFactory<ProviderRW = Provider>,
{
let Self { stages, max_block, tip_tx, metrics_tx, fail_on_unwind } = self;
Pipeline {
provider_factory,
stages,
max_block,
static_file_producer,
tip_tx,
event_sender: Default::default(),
progress: Default::default(),
metrics_tx,
fail_on_unwind,
last_detached_head_unwind_target: None,
detached_head_attempts: 0,
}
}
}
impl<Provider> Default for PipelineBuilder<Provider> {
fn default() -> Self {
Self {
stages: Vec::new(),
max_block: None,
tip_tx: None,
metrics_tx: None,
fail_on_unwind: false,
}
}
}
impl<Provider> std::fmt::Debug for PipelineBuilder<Provider> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("PipelineBuilder")
.field("stages", &self.stages.iter().map(|stage| stage.id()).collect::<Vec<StageId>>())
.field("max_block", &self.max_block)
.field("fail_on_unwind", &self.fail_on_unwind)
.finish()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/api/src/pipeline/progress.rs | crates/stages/api/src/pipeline/progress.rs | use crate::{util::opt, ControlFlow};
use alloy_primitives::BlockNumber;
#[derive(Debug, Default)]
pub(crate) struct PipelineProgress {
/// Block number reached by the stage.
pub(crate) block_number: Option<BlockNumber>,
/// The maximum block number achieved by any stage during the execution of the pipeline.
pub(crate) maximum_block_number: Option<BlockNumber>,
/// The minimum block number achieved by any stage during the execution of the pipeline.
pub(crate) minimum_block_number: Option<BlockNumber>,
}
impl PipelineProgress {
pub(crate) fn update(&mut self, block_number: BlockNumber) {
self.block_number = Some(block_number);
self.minimum_block_number = opt::min(self.minimum_block_number, block_number);
self.maximum_block_number = opt::max(self.maximum_block_number, block_number);
}
/// Get next control flow step
pub(crate) const fn next_ctrl(&self) -> ControlFlow {
match self.block_number {
Some(block_number) => ControlFlow::Continue { block_number },
None => ControlFlow::NoProgress { block_number: None },
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/api/src/pipeline/mod.rs | crates/stages/api/src/pipeline/mod.rs | mod ctrl;
mod event;
pub use crate::pipeline::ctrl::ControlFlow;
use crate::{PipelineTarget, StageCheckpoint, StageId};
use alloy_primitives::{BlockNumber, B256};
pub use event::*;
use futures_util::Future;
use reth_primitives_traits::constants::BEACON_CONSENSUS_REORG_UNWIND_DEPTH;
use reth_provider::{
providers::ProviderNodeTypes, writer::UnifiedStorageWriter, BlockHashReader, BlockNumReader,
ChainStateBlockReader, ChainStateBlockWriter, DatabaseProviderFactory, ProviderFactory,
PruneCheckpointReader, StageCheckpointReader, StageCheckpointWriter,
};
use reth_prune::PrunerBuilder;
use reth_static_file::StaticFileProducer;
use reth_tokio_util::{EventSender, EventStream};
use std::{
pin::Pin,
time::{Duration, Instant},
};
use tokio::sync::watch;
use tracing::*;
mod builder;
mod progress;
mod set;
use crate::{
BlockErrorKind, ExecInput, ExecOutput, MetricEvent, MetricEventsSender, PipelineError, Stage,
StageError, StageExt, UnwindInput,
};
pub use builder::*;
use progress::*;
use reth_errors::RethResult;
pub use set::*;
/// A container for a queued stage.
pub(crate) type BoxedStage<DB> = Box<dyn Stage<DB>>;
/// The future that returns the owned pipeline and the result of the pipeline run. See
/// [`Pipeline::run_as_fut`].
pub type PipelineFut<N> = Pin<Box<dyn Future<Output = PipelineWithResult<N>> + Send>>;
/// The pipeline type itself with the result of [`Pipeline::run_as_fut`]
pub type PipelineWithResult<N> = (Pipeline<N>, Result<ControlFlow, PipelineError>);
#[cfg_attr(doc, aquamarine::aquamarine)]
/// A staged sync pipeline.
///
/// The pipeline executes queued [stages][Stage] serially. An external component determines the tip
/// of the chain and the pipeline then executes each stage in order from the current local chain tip
/// and the external chain tip. When a stage is executed, it will run until it reaches the chain
/// tip.
///
/// After the entire pipeline has been run, it will run again unless asked to stop (see
/// [`Pipeline::set_max_block`]).
///
/// `include_mmd!("docs/mermaid/pipeline.mmd`")
///
/// # Unwinding
///
/// In case of a validation error (as determined by the consensus engine) in one of the stages, the
/// pipeline will unwind the stages in reverse order of execution. It is also possible to
/// request an unwind manually (see [`Pipeline::unwind`]).
///
/// # Defaults
///
/// The [`DefaultStages`](crate::sets::DefaultStages) are used to fully sync reth.
pub struct Pipeline<N: ProviderNodeTypes> {
/// Provider factory.
provider_factory: ProviderFactory<N>,
/// All configured stages in the order they will be executed.
stages: Vec<BoxedStage<<ProviderFactory<N> as DatabaseProviderFactory>::ProviderRW>>,
/// The maximum block number to sync to.
max_block: Option<BlockNumber>,
static_file_producer: StaticFileProducer<ProviderFactory<N>>,
/// Sender for events the pipeline emits.
event_sender: EventSender<PipelineEvent>,
/// Keeps track of the progress of the pipeline.
progress: PipelineProgress,
/// A Sender for the current chain tip to sync to.
///
/// This is used to notify the headers stage about a new sync target.
tip_tx: Option<watch::Sender<B256>>,
metrics_tx: Option<MetricEventsSender>,
/// Whether an unwind should fail the syncing process. Should only be set when downloading
/// blocks from trusted sources and expecting them to be valid.
fail_on_unwind: bool,
/// Block that was chosen as a target of the last unwind triggered by
/// [`StageError::DetachedHead`] error.
last_detached_head_unwind_target: Option<B256>,
/// Number of consecutive unwind attempts due to [`StageError::DetachedHead`] for the current
/// fork.
detached_head_attempts: u64,
}
impl<N: ProviderNodeTypes> Pipeline<N> {
/// Construct a pipeline using a [`PipelineBuilder`].
pub fn builder() -> PipelineBuilder<<ProviderFactory<N> as DatabaseProviderFactory>::ProviderRW>
{
PipelineBuilder::default()
}
/// Return the minimum block number achieved by
/// any stage during the execution of the pipeline.
pub const fn minimum_block_number(&self) -> Option<u64> {
self.progress.minimum_block_number
}
/// Set tip for reverse sync.
#[track_caller]
pub fn set_tip(&self, tip: B256) {
let _ = self.tip_tx.as_ref().expect("tip sender is set").send(tip).map_err(|_| {
warn!(target: "sync::pipeline", "Chain tip channel closed");
});
}
/// Listen for events on the pipeline.
pub fn events(&self) -> EventStream<PipelineEvent> {
self.event_sender.new_listener()
}
/// Get a mutable reference to a stage by index.
pub fn stage(
&mut self,
idx: usize,
) -> &mut dyn Stage<<ProviderFactory<N> as DatabaseProviderFactory>::ProviderRW> {
&mut self.stages[idx]
}
}
impl<N: ProviderNodeTypes> Pipeline<N> {
/// Registers progress metrics for each registered stage
pub fn register_metrics(&mut self) -> Result<(), PipelineError> {
let Some(metrics_tx) = &mut self.metrics_tx else { return Ok(()) };
let provider = self.provider_factory.provider()?;
for stage in &self.stages {
let stage_id = stage.id();
let _ = metrics_tx.send(MetricEvent::StageCheckpoint {
stage_id,
checkpoint: provider.get_stage_checkpoint(stage_id)?.unwrap_or_default(),
max_block_number: None,
elapsed: Duration::default(),
});
}
Ok(())
}
/// Consume the pipeline and run it until it reaches the provided tip, if set. Return the
/// pipeline and its result as a future.
#[track_caller]
pub fn run_as_fut(mut self, target: Option<PipelineTarget>) -> PipelineFut<N> {
let _ = self.register_metrics();
Box::pin(async move {
// NOTE: the tip should only be None if we are in continuous sync mode.
if let Some(target) = target {
match target {
PipelineTarget::Sync(tip) => self.set_tip(tip),
PipelineTarget::Unwind(target) => {
if let Err(err) = self.move_to_static_files() {
return (self, Err(err.into()))
}
if let Err(err) = self.unwind(target, None) {
return (self, Err(err))
}
self.progress.update(target);
return (self, Ok(ControlFlow::Continue { block_number: target }))
}
}
}
let result = self.run_loop().await;
trace!(target: "sync::pipeline", ?target, ?result, "Pipeline finished");
(self, result)
})
}
/// Run the pipeline in an infinite loop. Will terminate early if the user has specified
/// a `max_block` in the pipeline.
pub async fn run(&mut self) -> Result<(), PipelineError> {
let _ = self.register_metrics(); // ignore error
loop {
let next_action = self.run_loop().await?;
if next_action.is_unwind() && self.fail_on_unwind {
return Err(PipelineError::UnexpectedUnwind)
}
// Terminate the loop early if it's reached the maximum user
// configured block.
if next_action.should_continue() &&
self.progress
.minimum_block_number
.zip(self.max_block)
.is_some_and(|(progress, target)| progress >= target)
{
trace!(
target: "sync::pipeline",
?next_action,
minimum_block_number = ?self.progress.minimum_block_number,
max_block = ?self.max_block,
"Terminating pipeline."
);
return Ok(())
}
}
}
/// Performs one pass of the pipeline across all stages. After successful
/// execution of each stage, it proceeds to commit it to the database.
///
/// If any stage is unsuccessful at execution, we proceed to
/// unwind. This will undo the progress across the entire pipeline
/// up to the block that caused the error.
///
/// Returns the control flow after it ran the pipeline.
/// This will be [`ControlFlow::Continue`] or [`ControlFlow::NoProgress`] of the _last_ stage in
/// the pipeline (for example the `Finish` stage). Or [`ControlFlow::Unwind`] of the stage
/// that caused the unwind.
pub async fn run_loop(&mut self) -> Result<ControlFlow, PipelineError> {
self.move_to_static_files()?;
let mut previous_stage = None;
for stage_index in 0..self.stages.len() {
let stage = &self.stages[stage_index];
let stage_id = stage.id();
trace!(target: "sync::pipeline", stage = %stage_id, "Executing stage");
let next = self.execute_stage_to_completion(previous_stage, stage_index).await?;
trace!(target: "sync::pipeline", stage = %stage_id, ?next, "Completed stage");
match next {
ControlFlow::NoProgress { block_number } => {
if let Some(block_number) = block_number {
self.progress.update(block_number);
}
}
ControlFlow::Continue { block_number } => self.progress.update(block_number),
ControlFlow::Unwind { target, bad_block } => {
self.unwind(target, Some(bad_block.block.number))?;
return Ok(ControlFlow::Unwind { target, bad_block })
}
}
previous_stage = Some(
self.provider_factory
.provider()?
.get_stage_checkpoint(stage_id)?
.unwrap_or_default()
.block_number,
);
}
Ok(self.progress.next_ctrl())
}
/// Run [static file producer](StaticFileProducer) and [pruner](reth_prune::Pruner) to **move**
/// all data from the database to static files for corresponding
/// [segments](reth_static_file_types::StaticFileSegment), according to their [stage
/// checkpoints](StageCheckpoint):
/// - [`StaticFileSegment::Headers`](reth_static_file_types::StaticFileSegment::Headers) ->
/// [`StageId::Headers`]
/// - [`StaticFileSegment::Receipts`](reth_static_file_types::StaticFileSegment::Receipts) ->
/// [`StageId::Execution`]
/// - [`StaticFileSegment::Transactions`](reth_static_file_types::StaticFileSegment::Transactions)
/// -> [`StageId::Bodies`]
///
/// CAUTION: This method locks the static file producer Mutex, hence can block the thread if the
/// lock is occupied.
pub fn move_to_static_files(&self) -> RethResult<()> {
// Copies data from database to static files
let lowest_static_file_height =
self.static_file_producer.lock().copy_to_static_files()?.min_block_num();
// Deletes data which has been copied to static files.
if let Some(prune_tip) = lowest_static_file_height {
// Run the pruner so we don't potentially end up with higher height in the database vs
// static files during a pipeline unwind
let mut pruner = PrunerBuilder::new(Default::default())
.delete_limit(usize::MAX)
.build_with_provider_factory(self.provider_factory.clone());
pruner.run(prune_tip)?;
}
Ok(())
}
/// Unwind the stages to the target block (exclusive).
///
/// If the unwind is due to a bad block the number of that block should be specified.
pub fn unwind(
&mut self,
to: BlockNumber,
bad_block: Option<BlockNumber>,
) -> Result<(), PipelineError> {
// Add validation before starting unwind
let provider = self.provider_factory.provider()?;
let latest_block = provider.last_block_number()?;
// Get the actual pruning configuration
let prune_modes = provider.prune_modes_ref();
let checkpoints = provider.get_prune_checkpoints()?;
prune_modes.ensure_unwind_target_unpruned(latest_block, to, &checkpoints)?;
// Unwind stages in reverse order of execution
let unwind_pipeline = self.stages.iter_mut().rev();
// Legacy Engine: This prevents a race condition in which the `StaticFileProducer` could
// attempt to proceed with a finalized block which has been unwinded
let _locked_sf_producer = self.static_file_producer.lock();
let mut provider_rw = self.provider_factory.database_provider_rw()?;
for stage in unwind_pipeline {
let stage_id = stage.id();
let span = info_span!("Unwinding", stage = %stage_id);
let _enter = span.enter();
let mut checkpoint = provider_rw.get_stage_checkpoint(stage_id)?.unwrap_or_default();
if checkpoint.block_number < to {
debug!(
target: "sync::pipeline",
from = %checkpoint.block_number,
%to,
"Unwind point too far for stage"
);
self.event_sender.notify(PipelineEvent::Skipped { stage_id });
continue
}
info!(
target: "sync::pipeline",
from = %checkpoint.block_number,
%to,
?bad_block,
"Starting unwind"
);
while checkpoint.block_number > to {
let unwind_started_at = Instant::now();
let input = UnwindInput { checkpoint, unwind_to: to, bad_block };
self.event_sender.notify(PipelineEvent::Unwind { stage_id, input });
let output = stage.unwind(&provider_rw, input);
match output {
Ok(unwind_output) => {
checkpoint = unwind_output.checkpoint;
info!(
target: "sync::pipeline",
stage = %stage_id,
unwind_to = to,
progress = checkpoint.block_number,
done = checkpoint.block_number == to,
"Stage unwound"
);
provider_rw.save_stage_checkpoint(stage_id, checkpoint)?;
// Notify event listeners and update metrics.
self.event_sender
.notify(PipelineEvent::Unwound { stage_id, result: unwind_output });
if let Some(metrics_tx) = &mut self.metrics_tx {
let _ = metrics_tx.send(MetricEvent::StageCheckpoint {
stage_id,
checkpoint,
// We assume it was set in the previous execute iteration, so it
// doesn't change when we unwind.
max_block_number: None,
elapsed: unwind_started_at.elapsed(),
});
}
// update finalized block if needed
let last_saved_finalized_block_number =
provider_rw.last_finalized_block_number()?;
// If None, that means the finalized block is not written so we should
// always save in that case
if last_saved_finalized_block_number.is_none() ||
Some(checkpoint.block_number) < last_saved_finalized_block_number
{
provider_rw.save_finalized_block_number(BlockNumber::from(
checkpoint.block_number,
))?;
}
UnifiedStorageWriter::commit_unwind(provider_rw)?;
stage.post_unwind_commit()?;
provider_rw = self.provider_factory.database_provider_rw()?;
}
Err(err) => {
self.event_sender.notify(PipelineEvent::Error { stage_id });
return Err(PipelineError::Stage(StageError::Fatal(Box::new(err))))
}
}
}
}
Ok(())
}
async fn execute_stage_to_completion(
&mut self,
previous_stage: Option<BlockNumber>,
stage_index: usize,
) -> Result<ControlFlow, PipelineError> {
let total_stages = self.stages.len();
let stage_id = self.stage(stage_index).id();
let mut made_progress = false;
let target = self.max_block.or(previous_stage);
loop {
let prev_checkpoint = self.provider_factory.get_stage_checkpoint(stage_id)?;
let stage_reached_max_block = prev_checkpoint
.zip(self.max_block)
.is_some_and(|(prev_progress, target)| prev_progress.block_number >= target);
if stage_reached_max_block {
warn!(
target: "sync::pipeline",
stage = %stage_id,
max_block = self.max_block,
prev_block = prev_checkpoint.map(|progress| progress.block_number),
"Stage reached target block, skipping."
);
self.event_sender.notify(PipelineEvent::Skipped { stage_id });
// We reached the maximum block, so we skip the stage
return Ok(ControlFlow::NoProgress {
block_number: prev_checkpoint.map(|progress| progress.block_number),
})
}
let exec_input = ExecInput { target, checkpoint: prev_checkpoint };
self.event_sender.notify(PipelineEvent::Prepare {
pipeline_stages_progress: PipelineStagesProgress {
current: stage_index + 1,
total: total_stages,
},
stage_id,
checkpoint: prev_checkpoint,
target,
});
if let Err(err) = self.stage(stage_index).execute_ready(exec_input).await {
self.event_sender.notify(PipelineEvent::Error { stage_id });
match self.on_stage_error(stage_id, prev_checkpoint, err)? {
Some(ctrl) => return Ok(ctrl),
None => continue,
};
}
let stage_started_at = Instant::now();
let provider_rw = self.provider_factory.database_provider_rw()?;
self.event_sender.notify(PipelineEvent::Run {
pipeline_stages_progress: PipelineStagesProgress {
current: stage_index + 1,
total: total_stages,
},
stage_id,
checkpoint: prev_checkpoint,
target,
});
match self.stage(stage_index).execute(&provider_rw, exec_input) {
Ok(out @ ExecOutput { checkpoint, done }) => {
// Update stage checkpoint.
provider_rw.save_stage_checkpoint(stage_id, checkpoint)?;
// Commit processed data to the database.
UnifiedStorageWriter::commit(provider_rw)?;
// Invoke stage post commit hook.
self.stage(stage_index).post_execute_commit()?;
// Notify event listeners and update metrics.
self.event_sender.notify(PipelineEvent::Ran {
pipeline_stages_progress: PipelineStagesProgress {
current: stage_index + 1,
total: total_stages,
},
stage_id,
result: out.clone(),
});
if let Some(metrics_tx) = &mut self.metrics_tx {
let _ = metrics_tx.send(MetricEvent::StageCheckpoint {
stage_id,
checkpoint,
max_block_number: target,
elapsed: stage_started_at.elapsed(),
});
}
let block_number = checkpoint.block_number;
let prev_block_number = prev_checkpoint.unwrap_or_default().block_number;
made_progress |= block_number != prev_block_number;
if done {
return Ok(if made_progress {
ControlFlow::Continue { block_number }
} else {
ControlFlow::NoProgress { block_number: Some(block_number) }
})
}
}
Err(err) => {
drop(provider_rw);
self.event_sender.notify(PipelineEvent::Error { stage_id });
if let Some(ctrl) = self.on_stage_error(stage_id, prev_checkpoint, err)? {
return Ok(ctrl)
}
}
}
}
}
fn on_stage_error(
&mut self,
stage_id: StageId,
prev_checkpoint: Option<StageCheckpoint>,
err: StageError,
) -> Result<Option<ControlFlow>, PipelineError> {
if let StageError::DetachedHead { local_head, header, error } = err {
warn!(target: "sync::pipeline", stage = %stage_id, ?local_head, ?header, %error, "Stage encountered detached head");
if let Some(last_detached_head_unwind_target) = self.last_detached_head_unwind_target {
if local_head.block.hash == last_detached_head_unwind_target &&
header.block.number == local_head.block.number + 1
{
self.detached_head_attempts += 1;
} else {
self.detached_head_attempts = 1;
}
} else {
self.detached_head_attempts = 1;
}
// We unwind because of a detached head.
let unwind_to = local_head
.block
.number
.saturating_sub(
BEACON_CONSENSUS_REORG_UNWIND_DEPTH.saturating_mul(self.detached_head_attempts),
)
.max(1);
self.last_detached_head_unwind_target = self.provider_factory.block_hash(unwind_to)?;
Ok(Some(ControlFlow::Unwind { target: unwind_to, bad_block: local_head }))
} else if let StageError::Block { block, error } = err {
match error {
BlockErrorKind::Validation(validation_error) => {
error!(
target: "sync::pipeline",
stage = %stage_id,
bad_block = %block.block.number,
"Stage encountered a validation error: {validation_error}"
);
// FIXME: When handling errors, we do not commit the database transaction. This
// leads to the Merkle stage not clearing its checkpoint, and restarting from an
// invalid place.
let provider_rw = self.provider_factory.database_provider_rw()?;
provider_rw.save_stage_checkpoint_progress(StageId::MerkleExecute, vec![])?;
provider_rw.save_stage_checkpoint(
StageId::MerkleExecute,
prev_checkpoint.unwrap_or_default(),
)?;
UnifiedStorageWriter::commit(provider_rw)?;
// We unwind because of a validation error. If the unwind itself
// fails, we bail entirely,
// otherwise we restart the execution loop from the
// beginning.
Ok(Some(ControlFlow::Unwind {
target: prev_checkpoint.unwrap_or_default().block_number,
bad_block: block,
}))
}
BlockErrorKind::Execution(execution_error) => {
error!(
target: "sync::pipeline",
stage = %stage_id,
bad_block = %block.block.number,
"Stage encountered an execution error: {execution_error}"
);
// We unwind because of an execution error. If the unwind itself
// fails, we bail entirely,
// otherwise we restart
// the execution loop from the beginning.
Ok(Some(ControlFlow::Unwind {
target: prev_checkpoint.unwrap_or_default().block_number,
bad_block: block,
}))
}
}
} else if let StageError::MissingStaticFileData { block, segment } = err {
error!(
target: "sync::pipeline",
stage = %stage_id,
bad_block = %block.block.number,
segment = %segment,
"Stage is missing static file data."
);
Ok(Some(ControlFlow::Unwind { target: block.block.number - 1, bad_block: block }))
} else if err.is_fatal() {
error!(target: "sync::pipeline", stage = %stage_id, "Stage encountered a fatal error: {err}");
Err(err.into())
} else {
// On other errors we assume they are recoverable if we discard the
// transaction and run the stage again.
warn!(
target: "sync::pipeline",
stage = %stage_id,
"Stage encountered a non-fatal error: {err}. Retrying..."
);
Ok(None)
}
}
}
impl<N: ProviderNodeTypes> std::fmt::Debug for Pipeline<N> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Pipeline")
.field("stages", &self.stages.iter().map(|stage| stage.id()).collect::<Vec<StageId>>())
.field("max_block", &self.max_block)
.field("event_sender", &self.event_sender)
.field("fail_on_unwind", &self.fail_on_unwind)
.finish()
}
}
#[cfg(test)]
mod tests {
use std::sync::atomic::Ordering;
use super::*;
use crate::{test_utils::TestStage, UnwindOutput};
use assert_matches::assert_matches;
use reth_consensus::ConsensusError;
use reth_errors::ProviderError;
use reth_provider::test_utils::{create_test_provider_factory, MockNodeTypesWithDB};
use reth_prune::PruneModes;
use reth_testing_utils::generators::{self, random_block_with_parent};
use tokio_stream::StreamExt;
#[test]
fn record_progress_calculates_outliers() {
let mut progress = PipelineProgress::default();
progress.update(10);
assert_eq!(progress.minimum_block_number, Some(10));
assert_eq!(progress.maximum_block_number, Some(10));
progress.update(20);
assert_eq!(progress.minimum_block_number, Some(10));
assert_eq!(progress.maximum_block_number, Some(20));
progress.update(1);
assert_eq!(progress.minimum_block_number, Some(1));
assert_eq!(progress.maximum_block_number, Some(20));
}
#[test]
fn progress_ctrl_flow() {
let mut progress = PipelineProgress::default();
assert_eq!(progress.next_ctrl(), ControlFlow::NoProgress { block_number: None });
progress.update(1);
assert_eq!(progress.next_ctrl(), ControlFlow::Continue { block_number: 1 });
}
/// Runs a simple pipeline.
#[tokio::test]
async fn run_pipeline() {
let provider_factory = create_test_provider_factory();
let stage_a = TestStage::new(StageId::Other("A"))
.add_exec(Ok(ExecOutput { checkpoint: StageCheckpoint::new(20), done: true }));
let (stage_a, post_execute_commit_counter_a) = stage_a.with_post_execute_commit_counter();
let (stage_a, post_unwind_commit_counter_a) = stage_a.with_post_unwind_commit_counter();
let stage_b = TestStage::new(StageId::Other("B"))
.add_exec(Ok(ExecOutput { checkpoint: StageCheckpoint::new(10), done: true }));
let (stage_b, post_execute_commit_counter_b) = stage_b.with_post_execute_commit_counter();
let (stage_b, post_unwind_commit_counter_b) = stage_b.with_post_unwind_commit_counter();
let mut pipeline = Pipeline::<MockNodeTypesWithDB>::builder()
.add_stage(stage_a)
.add_stage(stage_b)
.with_max_block(10)
.build(
provider_factory.clone(),
StaticFileProducer::new(provider_factory.clone(), PruneModes::default()),
);
let events = pipeline.events();
// Run pipeline
tokio::spawn(async move {
pipeline.run().await.unwrap();
});
// Check that the stages were run in order
assert_eq!(
events.collect::<Vec<PipelineEvent>>().await,
vec![
PipelineEvent::Prepare {
pipeline_stages_progress: PipelineStagesProgress { current: 1, total: 2 },
stage_id: StageId::Other("A"),
checkpoint: None,
target: Some(10),
},
PipelineEvent::Run {
pipeline_stages_progress: PipelineStagesProgress { current: 1, total: 2 },
stage_id: StageId::Other("A"),
checkpoint: None,
target: Some(10),
},
PipelineEvent::Ran {
pipeline_stages_progress: PipelineStagesProgress { current: 1, total: 2 },
stage_id: StageId::Other("A"),
result: ExecOutput { checkpoint: StageCheckpoint::new(20), done: true },
},
PipelineEvent::Prepare {
pipeline_stages_progress: PipelineStagesProgress { current: 2, total: 2 },
stage_id: StageId::Other("B"),
checkpoint: None,
target: Some(10),
},
PipelineEvent::Run {
pipeline_stages_progress: PipelineStagesProgress { current: 2, total: 2 },
stage_id: StageId::Other("B"),
checkpoint: None,
target: Some(10),
},
PipelineEvent::Ran {
pipeline_stages_progress: PipelineStagesProgress { current: 2, total: 2 },
stage_id: StageId::Other("B"),
result: ExecOutput { checkpoint: StageCheckpoint::new(10), done: true },
},
]
);
assert_eq!(post_execute_commit_counter_a.load(Ordering::Relaxed), 1);
assert_eq!(post_unwind_commit_counter_a.load(Ordering::Relaxed), 0);
assert_eq!(post_execute_commit_counter_b.load(Ordering::Relaxed), 1);
assert_eq!(post_unwind_commit_counter_b.load(Ordering::Relaxed), 0);
}
/// Unwinds a simple pipeline.
#[tokio::test]
async fn unwind_pipeline() {
let provider_factory = create_test_provider_factory();
let stage_a = TestStage::new(StageId::Other("A"))
.add_exec(Ok(ExecOutput { checkpoint: StageCheckpoint::new(100), done: true }))
.add_unwind(Ok(UnwindOutput { checkpoint: StageCheckpoint::new(1) }));
let (stage_a, post_execute_commit_counter_a) = stage_a.with_post_execute_commit_counter();
let (stage_a, post_unwind_commit_counter_a) = stage_a.with_post_unwind_commit_counter();
let stage_b = TestStage::new(StageId::Other("B"))
.add_exec(Ok(ExecOutput { checkpoint: StageCheckpoint::new(10), done: true }))
.add_unwind(Ok(UnwindOutput { checkpoint: StageCheckpoint::new(1) }));
let (stage_b, post_execute_commit_counter_b) = stage_b.with_post_execute_commit_counter();
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/api/src/pipeline/set.rs | crates/stages/api/src/pipeline/set.rs | use crate::{Stage, StageId};
use std::{
collections::HashMap,
fmt::{Debug, Formatter},
};
/// Combines multiple [`Stage`]s into a single unit.
///
/// A [`StageSet`] is a logical chunk of stages that depend on each other. It is up to the
/// individual stage sets to determine what kind of configuration they expose.
///
/// Individual stages in the set can be added, removed and overridden using [`StageSetBuilder`].
pub trait StageSet<Provider>: Sized {
/// Configures the stages in the set.
fn builder(self) -> StageSetBuilder<Provider>;
/// Overrides the given [`Stage`], if it is in this set.
///
/// # Panics
///
/// Panics if the [`Stage`] is not in this set.
fn set<S: Stage<Provider> + 'static>(self, stage: S) -> StageSetBuilder<Provider> {
self.builder().set(stage)
}
}
struct StageEntry<Provider> {
stage: Box<dyn Stage<Provider>>,
enabled: bool,
}
impl<Provider> Debug for StageEntry<Provider> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("StageEntry")
.field("stage", &self.stage.id())
.field("enabled", &self.enabled)
.finish()
}
}
/// Helper to create and configure a [`StageSet`].
///
/// The builder provides ordering helpers to ensure that stages that depend on each other are added
/// to the final sync pipeline before/after their dependencies.
///
/// Stages inside the set can be disabled, enabled, overridden and reordered.
pub struct StageSetBuilder<Provider> {
stages: HashMap<StageId, StageEntry<Provider>>,
order: Vec<StageId>,
}
impl<Provider> Default for StageSetBuilder<Provider> {
fn default() -> Self {
Self { stages: HashMap::default(), order: Vec::new() }
}
}
impl<Provider> Debug for StageSetBuilder<Provider> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("StageSetBuilder")
.field("stages", &self.stages)
.field("order", &self.order)
.finish()
}
}
impl<Provider> StageSetBuilder<Provider> {
fn index_of(&self, stage_id: StageId) -> usize {
let index = self.order.iter().position(|&id| id == stage_id);
index.unwrap_or_else(|| panic!("Stage does not exist in set: {stage_id}"))
}
fn upsert_stage_state(&mut self, stage: Box<dyn Stage<Provider>>, added_at_index: usize) {
let stage_id = stage.id();
if self.stages.insert(stage.id(), StageEntry { stage, enabled: true }).is_some() {
if let Some(to_remove) = self
.order
.iter()
.enumerate()
.find(|(i, id)| *i != added_at_index && **id == stage_id)
.map(|(i, _)| i)
{
self.order.remove(to_remove);
}
}
}
/// Overrides the given [`Stage`], if it is in this set.
///
/// # Panics
///
/// Panics if the [`Stage`] is not in this set.
pub fn set<S: Stage<Provider> + 'static>(mut self, stage: S) -> Self {
let entry = self
.stages
.get_mut(&stage.id())
.unwrap_or_else(|| panic!("Stage does not exist in set: {}", stage.id()));
entry.stage = Box::new(stage);
self
}
/// Returns iterator over the stages in this set,
/// In the same order they would be executed in the pipeline.
pub fn stages(&self) -> impl Iterator<Item = StageId> + '_ {
self.order.iter().copied()
}
/// Replaces a stage with the given ID with a new stage.
///
/// If the new stage has a different ID,
/// it will maintain the original stage's position in the execution order.
pub fn replace<S: Stage<Provider> + 'static>(mut self, stage_id: StageId, stage: S) -> Self {
self.stages
.get(&stage_id)
.unwrap_or_else(|| panic!("Stage does not exist in set: {stage_id}"));
if stage.id() == stage_id {
return self.set(stage);
}
let index = self.index_of(stage_id);
self.stages.remove(&stage_id);
self.order[index] = stage.id();
self.upsert_stage_state(Box::new(stage), index);
self
}
/// Adds the given [`Stage`] at the end of this set.
///
/// If the stage was already in the group, it is removed from its previous place.
pub fn add_stage<S: Stage<Provider> + 'static>(mut self, stage: S) -> Self {
let target_index = self.order.len();
self.order.push(stage.id());
self.upsert_stage_state(Box::new(stage), target_index);
self
}
/// Adds the given [`Stage`] at the end of this set if it's [`Some`].
///
/// If the stage was already in the group, it is removed from its previous place.
pub fn add_stage_opt<S: Stage<Provider> + 'static>(self, stage: Option<S>) -> Self {
if let Some(stage) = stage {
self.add_stage(stage)
} else {
self
}
}
/// Adds the given [`StageSet`] to the end of this set.
///
/// If a stage is in both sets, it is removed from its previous place in this set. Because of
/// this, it is advisable to merge sets first and re-order stages after if needed.
pub fn add_set<Set: StageSet<Provider>>(mut self, set: Set) -> Self {
for stage in set.builder().build() {
let target_index = self.order.len();
self.order.push(stage.id());
self.upsert_stage_state(stage, target_index);
}
self
}
/// Adds the given [`Stage`] before the stage with the given [`StageId`].
///
/// If the stage was already in the group, it is removed from its previous place.
///
/// # Panics
///
/// Panics if the dependency stage is not in this set.
pub fn add_before<S: Stage<Provider> + 'static>(mut self, stage: S, before: StageId) -> Self {
let target_index = self.index_of(before);
self.order.insert(target_index, stage.id());
self.upsert_stage_state(Box::new(stage), target_index);
self
}
/// Adds the given [`Stage`] after the stage with the given [`StageId`].
///
/// If the stage was already in the group, it is removed from its previous place.
///
/// # Panics
///
/// Panics if the dependency stage is not in this set.
pub fn add_after<S: Stage<Provider> + 'static>(mut self, stage: S, after: StageId) -> Self {
let target_index = self.index_of(after) + 1;
self.order.insert(target_index, stage.id());
self.upsert_stage_state(Box::new(stage), target_index);
self
}
/// Enables the given stage.
///
/// All stages within a [`StageSet`] are enabled by default.
///
/// # Panics
///
/// Panics if the stage is not in this set.
pub fn enable(mut self, stage_id: StageId) -> Self {
let entry =
self.stages.get_mut(&stage_id).expect("Cannot enable a stage that is not in the set.");
entry.enabled = true;
self
}
/// Disables the given stage.
///
/// The disabled [`Stage`] keeps its place in the set, so it can be used for ordering with
/// [`StageSetBuilder::add_before`] or [`StageSetBuilder::add_after`], or it can be re-enabled.
///
/// All stages within a [`StageSet`] are enabled by default.
///
/// # Panics
///
/// Panics if the stage is not in this set.
#[track_caller]
pub fn disable(mut self, stage_id: StageId) -> Self {
let entry = self
.stages
.get_mut(&stage_id)
.unwrap_or_else(|| panic!("Cannot disable a stage that is not in the set: {stage_id}"));
entry.enabled = false;
self
}
/// Disables all given stages. See [`disable`](Self::disable).
///
/// If any of the stages is not in this set, it is ignored.
pub fn disable_all(mut self, stages: &[StageId]) -> Self {
for stage_id in stages {
let Some(entry) = self.stages.get_mut(stage_id) else { continue };
entry.enabled = false;
}
self
}
/// Disables the given stage if the given closure returns true.
///
/// See [`Self::disable`]
#[track_caller]
pub fn disable_if<F>(self, stage_id: StageId, f: F) -> Self
where
F: FnOnce() -> bool,
{
if f() {
return self.disable(stage_id)
}
self
}
/// Disables all given stages if the given closure returns true.
///
/// See [`Self::disable`]
#[track_caller]
pub fn disable_all_if<F>(self, stages: &[StageId], f: F) -> Self
where
F: FnOnce() -> bool,
{
if f() {
return self.disable_all(stages)
}
self
}
/// Consumes the builder and returns the contained [`Stage`]s in the order specified.
pub fn build(mut self) -> Vec<Box<dyn Stage<Provider>>> {
let mut stages = Vec::new();
for id in &self.order {
if let Some(entry) = self.stages.remove(id) {
if entry.enabled {
stages.push(entry.stage);
}
}
}
stages
}
}
impl<Provider> StageSet<Provider> for StageSetBuilder<Provider> {
fn builder(self) -> Self {
self
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/types/src/checkpoints.rs | crates/stages/types/src/checkpoints.rs | use super::StageId;
use alloc::{format, string::String, vec::Vec};
use alloy_primitives::{Address, BlockNumber, B256, U256};
use core::ops::RangeInclusive;
use reth_trie_common::{hash_builder::HashBuilderState, StoredSubNode};
/// Saves the progress of Merkle stage.
#[derive(Default, Debug, Clone, PartialEq, Eq)]
pub struct MerkleCheckpoint {
/// The target block number.
pub target_block: BlockNumber,
/// The last hashed account key processed.
pub last_account_key: B256,
/// Previously recorded walker stack.
pub walker_stack: Vec<StoredSubNode>,
/// The hash builder state.
pub state: HashBuilderState,
/// Optional storage root checkpoint for the last processed account.
pub storage_root_checkpoint: Option<StorageRootMerkleCheckpoint>,
}
impl MerkleCheckpoint {
/// Creates a new Merkle checkpoint.
pub const fn new(
target_block: BlockNumber,
last_account_key: B256,
walker_stack: Vec<StoredSubNode>,
state: HashBuilderState,
) -> Self {
Self { target_block, last_account_key, walker_stack, state, storage_root_checkpoint: None }
}
}
#[cfg(any(test, feature = "reth-codec"))]
impl reth_codecs::Compact for MerkleCheckpoint {
fn to_compact<B>(&self, buf: &mut B) -> usize
where
B: bytes::BufMut + AsMut<[u8]>,
{
let mut len = 0;
buf.put_u64(self.target_block);
len += 8;
buf.put_slice(self.last_account_key.as_slice());
len += self.last_account_key.len();
buf.put_u16(self.walker_stack.len() as u16);
len += 2;
for item in &self.walker_stack {
len += item.to_compact(buf);
}
len += self.state.to_compact(buf);
// Encode the optional storage root checkpoint
match &self.storage_root_checkpoint {
Some(checkpoint) => {
// one means Some
buf.put_u8(1);
len += 1;
len += checkpoint.to_compact(buf);
}
None => {
// zero means None
buf.put_u8(0);
len += 1;
}
}
len
}
fn from_compact(mut buf: &[u8], _len: usize) -> (Self, &[u8]) {
use bytes::Buf;
let target_block = buf.get_u64();
let last_account_key = B256::from_slice(&buf[..32]);
buf.advance(32);
let walker_stack_len = buf.get_u16() as usize;
let mut walker_stack = Vec::with_capacity(walker_stack_len);
for _ in 0..walker_stack_len {
let (item, rest) = StoredSubNode::from_compact(buf, 0);
walker_stack.push(item);
buf = rest;
}
let (state, mut buf) = HashBuilderState::from_compact(buf, 0);
// Decode the storage root checkpoint if it exists
let (storage_root_checkpoint, buf) = if buf.is_empty() {
(None, buf)
} else {
match buf.get_u8() {
1 => {
let (checkpoint, rest) = StorageRootMerkleCheckpoint::from_compact(buf, 0);
(Some(checkpoint), rest)
}
_ => (None, buf),
}
};
(Self { target_block, last_account_key, walker_stack, state, storage_root_checkpoint }, buf)
}
}
/// Saves the progress of a storage root computation.
///
/// This contains the walker stack, hash builder state, and the last storage key processed.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct StorageRootMerkleCheckpoint {
/// The last storage key processed.
pub last_storage_key: B256,
/// Previously recorded walker stack.
pub walker_stack: Vec<StoredSubNode>,
/// The hash builder state.
pub state: HashBuilderState,
/// The account nonce.
pub account_nonce: u64,
/// The account balance.
pub account_balance: U256,
/// The account bytecode hash.
pub account_bytecode_hash: B256,
}
impl StorageRootMerkleCheckpoint {
/// Creates a new storage root merkle checkpoint.
pub const fn new(
last_storage_key: B256,
walker_stack: Vec<StoredSubNode>,
state: HashBuilderState,
account_nonce: u64,
account_balance: U256,
account_bytecode_hash: B256,
) -> Self {
Self {
last_storage_key,
walker_stack,
state,
account_nonce,
account_balance,
account_bytecode_hash,
}
}
}
#[cfg(any(test, feature = "reth-codec"))]
impl reth_codecs::Compact for StorageRootMerkleCheckpoint {
fn to_compact<B>(&self, buf: &mut B) -> usize
where
B: bytes::BufMut + AsMut<[u8]>,
{
let mut len = 0;
buf.put_slice(self.last_storage_key.as_slice());
len += self.last_storage_key.len();
buf.put_u16(self.walker_stack.len() as u16);
len += 2;
for item in &self.walker_stack {
len += item.to_compact(buf);
}
len += self.state.to_compact(buf);
// Encode account fields
buf.put_u64(self.account_nonce);
len += 8;
let balance_len = self.account_balance.byte_len() as u8;
buf.put_u8(balance_len);
len += 1;
len += self.account_balance.to_compact(buf);
buf.put_slice(self.account_bytecode_hash.as_slice());
len += 32;
len
}
fn from_compact(mut buf: &[u8], _len: usize) -> (Self, &[u8]) {
use bytes::Buf;
let last_storage_key = B256::from_slice(&buf[..32]);
buf.advance(32);
let walker_stack_len = buf.get_u16() as usize;
let mut walker_stack = Vec::with_capacity(walker_stack_len);
for _ in 0..walker_stack_len {
let (item, rest) = StoredSubNode::from_compact(buf, 0);
walker_stack.push(item);
buf = rest;
}
let (state, mut buf) = HashBuilderState::from_compact(buf, 0);
// Decode account fields
let account_nonce = buf.get_u64();
let balance_len = buf.get_u8() as usize;
let (account_balance, mut buf) = U256::from_compact(buf, balance_len);
let account_bytecode_hash = B256::from_slice(&buf[..32]);
buf.advance(32);
(
Self {
last_storage_key,
walker_stack,
state,
account_nonce,
account_balance,
account_bytecode_hash,
},
buf,
)
}
}
/// Saves the progress of `AccountHashing` stage.
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))]
#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))]
#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct AccountHashingCheckpoint {
/// The next account to start hashing from.
pub address: Option<Address>,
/// Block range which this checkpoint is valid for.
pub block_range: CheckpointBlockRange,
/// Progress measured in accounts.
pub progress: EntitiesCheckpoint,
}
/// Saves the progress of `StorageHashing` stage.
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))]
#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))]
#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct StorageHashingCheckpoint {
/// The next account to start hashing from.
pub address: Option<Address>,
/// The next storage slot to start hashing from.
pub storage: Option<B256>,
/// Block range which this checkpoint is valid for.
pub block_range: CheckpointBlockRange,
/// Progress measured in storage slots.
pub progress: EntitiesCheckpoint,
}
/// Saves the progress of Execution stage.
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))]
#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))]
#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct ExecutionCheckpoint {
/// Block range which this checkpoint is valid for.
pub block_range: CheckpointBlockRange,
/// Progress measured in gas.
pub progress: EntitiesCheckpoint,
}
/// Saves the progress of Headers stage.
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))]
#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))]
#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct HeadersCheckpoint {
/// Block range which this checkpoint is valid for.
pub block_range: CheckpointBlockRange,
/// Progress measured in gas.
pub progress: EntitiesCheckpoint,
}
/// Saves the progress of Index History stages.
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))]
#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))]
#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct IndexHistoryCheckpoint {
/// Block range which this checkpoint is valid for.
pub block_range: CheckpointBlockRange,
/// Progress measured in changesets.
pub progress: EntitiesCheckpoint,
}
/// Saves the progress of abstract stage iterating over or downloading entities.
#[derive(Debug, Default, PartialEq, Eq, Clone, Copy)]
#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))]
#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))]
#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct EntitiesCheckpoint {
/// Number of entities already processed.
pub processed: u64,
/// Total entities to be processed.
pub total: u64,
}
impl EntitiesCheckpoint {
/// Formats entities checkpoint as percentage, i.e. `processed / total`.
///
/// Return [None] if `total == 0`.
pub fn fmt_percentage(&self) -> Option<String> {
if self.total == 0 {
return None
}
// Calculate percentage with 2 decimal places.
let percentage = 100.0 * self.processed as f64 / self.total as f64;
// Truncate to 2 decimal places, rounding down so that 99.999% becomes 99.99% and not 100%.
#[cfg(not(feature = "std"))]
{
Some(format!("{:.2}%", (percentage * 100.0) / 100.0))
}
#[cfg(feature = "std")]
Some(format!("{:.2}%", (percentage * 100.0).floor() / 100.0))
}
}
/// Saves the block range. Usually, it's used to check the validity of some stage checkpoint across
/// multiple executions.
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))]
#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))]
#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct CheckpointBlockRange {
/// The first block of the range, inclusive.
pub from: BlockNumber,
/// The last block of the range, inclusive.
pub to: BlockNumber,
}
impl From<RangeInclusive<BlockNumber>> for CheckpointBlockRange {
fn from(range: RangeInclusive<BlockNumber>) -> Self {
Self { from: *range.start(), to: *range.end() }
}
}
impl From<&RangeInclusive<BlockNumber>> for CheckpointBlockRange {
fn from(range: &RangeInclusive<BlockNumber>) -> Self {
Self { from: *range.start(), to: *range.end() }
}
}
/// Saves the progress of a stage.
#[derive(Debug, Default, PartialEq, Eq, Clone, Copy)]
#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))]
#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))]
#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct StageCheckpoint {
/// The maximum block processed by the stage.
pub block_number: BlockNumber,
/// Stage-specific checkpoint. None if stage uses only block-based checkpoints.
pub stage_checkpoint: Option<StageUnitCheckpoint>,
}
impl StageCheckpoint {
/// Creates a new [`StageCheckpoint`] with only `block_number` set.
pub fn new(block_number: BlockNumber) -> Self {
Self { block_number, ..Default::default() }
}
/// Sets the block number.
pub const fn with_block_number(mut self, block_number: BlockNumber) -> Self {
self.block_number = block_number;
self
}
/// Sets the block range, if checkpoint uses block range.
pub fn with_block_range(mut self, stage_id: &StageId, from: u64, to: u64) -> Self {
self.stage_checkpoint = Some(match stage_id {
StageId::Execution => StageUnitCheckpoint::Execution(ExecutionCheckpoint::default()),
StageId::AccountHashing => {
StageUnitCheckpoint::Account(AccountHashingCheckpoint::default())
}
StageId::StorageHashing => {
StageUnitCheckpoint::Storage(StorageHashingCheckpoint::default())
}
StageId::IndexStorageHistory | StageId::IndexAccountHistory => {
StageUnitCheckpoint::IndexHistory(IndexHistoryCheckpoint::default())
}
_ => return self,
});
_ = self.stage_checkpoint.map(|mut checkpoint| checkpoint.set_block_range(from, to));
self
}
/// Get the underlying [`EntitiesCheckpoint`], if any, to determine the number of entities
/// processed, and the number of total entities to process.
pub fn entities(&self) -> Option<EntitiesCheckpoint> {
let stage_checkpoint = self.stage_checkpoint?;
match stage_checkpoint {
StageUnitCheckpoint::Account(AccountHashingCheckpoint {
progress: entities, ..
}) |
StageUnitCheckpoint::Storage(StorageHashingCheckpoint {
progress: entities, ..
}) |
StageUnitCheckpoint::Entities(entities) |
StageUnitCheckpoint::Execution(ExecutionCheckpoint { progress: entities, .. }) |
StageUnitCheckpoint::Headers(HeadersCheckpoint { progress: entities, .. }) |
StageUnitCheckpoint::IndexHistory(IndexHistoryCheckpoint {
progress: entities,
..
}) => Some(entities),
}
}
}
// TODO(alexey): add a merkle checkpoint. Currently it's hard because [`MerkleCheckpoint`]
// is not a Copy type.
/// Stage-specific checkpoint metrics.
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))]
#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))]
#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum StageUnitCheckpoint {
/// Saves the progress of `AccountHashing` stage.
Account(AccountHashingCheckpoint),
/// Saves the progress of `StorageHashing` stage.
Storage(StorageHashingCheckpoint),
/// Saves the progress of abstract stage iterating over or downloading entities.
Entities(EntitiesCheckpoint),
/// Saves the progress of Execution stage.
Execution(ExecutionCheckpoint),
/// Saves the progress of Headers stage.
Headers(HeadersCheckpoint),
/// Saves the progress of Index History stage.
IndexHistory(IndexHistoryCheckpoint),
}
impl StageUnitCheckpoint {
/// Sets the block range. Returns old block range, or `None` if checkpoint doesn't use block
/// range.
pub const fn set_block_range(&mut self, from: u64, to: u64) -> Option<CheckpointBlockRange> {
match self {
Self::Account(AccountHashingCheckpoint { block_range, .. }) |
Self::Storage(StorageHashingCheckpoint { block_range, .. }) |
Self::Execution(ExecutionCheckpoint { block_range, .. }) |
Self::IndexHistory(IndexHistoryCheckpoint { block_range, .. }) => {
let old_range = *block_range;
*block_range = CheckpointBlockRange { from, to };
Some(old_range)
}
_ => None,
}
}
}
#[cfg(test)]
impl Default for StageUnitCheckpoint {
fn default() -> Self {
Self::Account(AccountHashingCheckpoint::default())
}
}
/// Generates [`StageCheckpoint`] getter and builder methods.
macro_rules! stage_unit_checkpoints {
($(($index:expr,$enum_variant:tt,$checkpoint_ty:ty,#[doc = $fn_get_doc:expr]$fn_get_name:ident,#[doc = $fn_build_doc:expr]$fn_build_name:ident)),+) => {
impl StageCheckpoint {
$(
#[doc = $fn_get_doc]
pub const fn $fn_get_name(&self) -> Option<$checkpoint_ty> {
match self.stage_checkpoint {
Some(StageUnitCheckpoint::$enum_variant(checkpoint)) => Some(checkpoint),
_ => None,
}
}
#[doc = $fn_build_doc]
pub const fn $fn_build_name(
mut self,
checkpoint: $checkpoint_ty,
) -> Self {
self.stage_checkpoint = Some(StageUnitCheckpoint::$enum_variant(checkpoint));
self
}
)+
}
};
}
stage_unit_checkpoints!(
(
0,
Account,
AccountHashingCheckpoint,
/// Returns the account hashing stage checkpoint, if any.
account_hashing_stage_checkpoint,
/// Sets the stage checkpoint to account hashing.
with_account_hashing_stage_checkpoint
),
(
1,
Storage,
StorageHashingCheckpoint,
/// Returns the storage hashing stage checkpoint, if any.
storage_hashing_stage_checkpoint,
/// Sets the stage checkpoint to storage hashing.
with_storage_hashing_stage_checkpoint
),
(
2,
Entities,
EntitiesCheckpoint,
/// Returns the entities stage checkpoint, if any.
entities_stage_checkpoint,
/// Sets the stage checkpoint to entities.
with_entities_stage_checkpoint
),
(
3,
Execution,
ExecutionCheckpoint,
/// Returns the execution stage checkpoint, if any.
execution_stage_checkpoint,
/// Sets the stage checkpoint to execution.
with_execution_stage_checkpoint
),
(
4,
Headers,
HeadersCheckpoint,
/// Returns the headers stage checkpoint, if any.
headers_stage_checkpoint,
/// Sets the stage checkpoint to headers.
with_headers_stage_checkpoint
),
(
5,
IndexHistory,
IndexHistoryCheckpoint,
/// Returns the index history stage checkpoint, if any.
index_history_stage_checkpoint,
/// Sets the stage checkpoint to index history.
with_index_history_stage_checkpoint
)
);
#[cfg(test)]
mod tests {
use super::*;
use alloy_primitives::b256;
use rand::Rng;
use reth_codecs::Compact;
#[test]
fn merkle_checkpoint_roundtrip() {
let mut rng = rand::rng();
let checkpoint = MerkleCheckpoint {
target_block: rng.random(),
last_account_key: rng.random(),
walker_stack: vec![StoredSubNode {
key: B256::random_with(&mut rng).to_vec(),
nibble: Some(rng.random()),
node: None,
}],
state: HashBuilderState::default(),
storage_root_checkpoint: None,
};
let mut buf = Vec::new();
let encoded = checkpoint.to_compact(&mut buf);
let (decoded, _) = MerkleCheckpoint::from_compact(&buf, encoded);
assert_eq!(decoded, checkpoint);
}
#[test]
fn storage_root_merkle_checkpoint_roundtrip() {
let mut rng = rand::rng();
let checkpoint = StorageRootMerkleCheckpoint {
last_storage_key: rng.random(),
walker_stack: vec![StoredSubNode {
key: B256::random_with(&mut rng).to_vec(),
nibble: Some(rng.random()),
node: None,
}],
state: HashBuilderState::default(),
account_nonce: 0,
account_balance: U256::ZERO,
account_bytecode_hash: B256::ZERO,
};
let mut buf = Vec::new();
let encoded = checkpoint.to_compact(&mut buf);
let (decoded, _) = StorageRootMerkleCheckpoint::from_compact(&buf, encoded);
assert_eq!(decoded, checkpoint);
}
#[test]
fn merkle_checkpoint_with_storage_root_roundtrip() {
let mut rng = rand::rng();
// Create a storage root checkpoint
let storage_checkpoint = StorageRootMerkleCheckpoint {
last_storage_key: rng.random(),
walker_stack: vec![StoredSubNode {
key: B256::random_with(&mut rng).to_vec(),
nibble: Some(rng.random()),
node: None,
}],
state: HashBuilderState::default(),
account_nonce: 1,
account_balance: U256::from(1),
account_bytecode_hash: b256!(
"0x0fffffffffffffffffffffffffffffff0fffffffffffffffffffffffffffffff"
),
};
// Create a merkle checkpoint with the storage root checkpoint
let checkpoint = MerkleCheckpoint {
target_block: rng.random(),
last_account_key: rng.random(),
walker_stack: vec![StoredSubNode {
key: B256::random_with(&mut rng).to_vec(),
nibble: Some(rng.random()),
node: None,
}],
state: HashBuilderState::default(),
storage_root_checkpoint: Some(storage_checkpoint),
};
let mut buf = Vec::new();
let encoded = checkpoint.to_compact(&mut buf);
let (decoded, _) = MerkleCheckpoint::from_compact(&buf, encoded);
assert_eq!(decoded, checkpoint);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/types/src/lib.rs | crates/stages/types/src/lib.rs | //! Commonly used types for staged sync usage.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(feature = "std"), no_std)]
extern crate alloc;
mod id;
use alloy_primitives::{BlockHash, BlockNumber};
pub use id::StageId;
mod checkpoints;
pub use checkpoints::{
AccountHashingCheckpoint, CheckpointBlockRange, EntitiesCheckpoint, ExecutionCheckpoint,
HeadersCheckpoint, IndexHistoryCheckpoint, MerkleCheckpoint, StageCheckpoint,
StageUnitCheckpoint, StorageHashingCheckpoint, StorageRootMerkleCheckpoint,
};
mod execution;
pub use execution::*;
/// Direction and target block for pipeline operations.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum PipelineTarget {
/// Target for forward synchronization, indicating a block hash to sync to.
Sync(BlockHash),
/// Target for backward unwinding, indicating a block number to unwind to.
Unwind(BlockNumber),
}
impl PipelineTarget {
/// Returns the target block hash for forward synchronization, if applicable.
///
/// # Returns
///
/// - `Some(BlockHash)`: The target block hash for forward synchronization.
/// - `None`: If the target is for backward unwinding.
pub const fn sync_target(self) -> Option<BlockHash> {
match self {
Self::Sync(hash) => Some(hash),
Self::Unwind(_) => None,
}
}
/// Returns the target block number for backward unwinding, if applicable.
///
/// # Returns
///
/// - `Some(BlockNumber)`: The target block number for backward unwinding.
/// - `None`: If the target is for forward synchronization.
pub const fn unwind_target(self) -> Option<BlockNumber> {
match self {
Self::Sync(_) => None,
Self::Unwind(number) => Some(number),
}
}
}
impl From<BlockHash> for PipelineTarget {
fn from(hash: BlockHash) -> Self {
Self::Sync(hash)
}
}
impl core::fmt::Display for PipelineTarget {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
Self::Sync(block) => {
write!(f, "Sync({block})")
}
Self::Unwind(block) => write!(f, "Unwind({block})"),
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/types/src/id.rs | crates/stages/types/src/id.rs | use alloc::vec::Vec;
#[cfg(feature = "std")]
use std::{collections::HashMap, sync::OnceLock};
/// Stage IDs for all known stages.
///
/// For custom stages, use [`StageId::Other`]
#[expect(missing_docs)]
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum StageId {
#[deprecated(
note = "Static Files are generated outside of the pipeline and do not require a separate stage"
)]
StaticFile,
Era,
Headers,
Bodies,
SenderRecovery,
Execution,
PruneSenderRecovery,
MerkleUnwind,
AccountHashing,
StorageHashing,
MerkleExecute,
TransactionLookup,
IndexStorageHistory,
IndexAccountHistory,
Prune,
Finish,
/// Other custom stage with a provided string identifier.
Other(&'static str),
}
/// One-time-allocated stage ids encoded as raw Vecs, useful for database
/// clients to reference them for queries instead of encoding anew per query
/// (sad heap allocation required).
#[cfg(feature = "std")]
static ENCODED_STAGE_IDS: OnceLock<HashMap<StageId, Vec<u8>>> = OnceLock::new();
impl StageId {
/// All supported Stages
pub const ALL: [Self; 15] = [
Self::Era,
Self::Headers,
Self::Bodies,
Self::SenderRecovery,
Self::Execution,
Self::PruneSenderRecovery,
Self::MerkleUnwind,
Self::AccountHashing,
Self::StorageHashing,
Self::MerkleExecute,
Self::TransactionLookup,
Self::IndexStorageHistory,
Self::IndexAccountHistory,
Self::Prune,
Self::Finish,
];
/// Stages that require state.
pub const STATE_REQUIRED: [Self; 9] = [
Self::Execution,
Self::PruneSenderRecovery,
Self::MerkleUnwind,
Self::AccountHashing,
Self::StorageHashing,
Self::MerkleExecute,
Self::IndexStorageHistory,
Self::IndexAccountHistory,
Self::Prune,
];
/// Return stage id formatted as string.
pub const fn as_str(&self) -> &str {
match self {
#[expect(deprecated)]
Self::StaticFile => "StaticFile",
Self::Era => "Era",
Self::Headers => "Headers",
Self::Bodies => "Bodies",
Self::SenderRecovery => "SenderRecovery",
Self::Execution => "Execution",
Self::PruneSenderRecovery => "PruneSenderRecovery",
Self::MerkleUnwind => "MerkleUnwind",
Self::AccountHashing => "AccountHashing",
Self::StorageHashing => "StorageHashing",
Self::MerkleExecute => "MerkleExecute",
Self::TransactionLookup => "TransactionLookup",
Self::IndexAccountHistory => "IndexAccountHistory",
Self::IndexStorageHistory => "IndexStorageHistory",
Self::Prune => "Prune",
Self::Finish => "Finish",
Self::Other(s) => s,
}
}
/// Returns true if it's a downloading stage [`StageId::Headers`] or [`StageId::Bodies`]
pub const fn is_downloading_stage(&self) -> bool {
matches!(self, Self::Era | Self::Headers | Self::Bodies)
}
/// Returns `true` if it's [`TransactionLookup`](StageId::TransactionLookup) stage.
pub const fn is_tx_lookup(&self) -> bool {
matches!(self, Self::TransactionLookup)
}
/// Returns true indicating if it's the finish stage [`StageId::Finish`]
pub const fn is_finish(&self) -> bool {
matches!(self, Self::Finish)
}
/// Get a pre-encoded raw Vec, for example, to be used as the DB key for
/// `tables::StageCheckpoints` and `tables::StageCheckpointProgresses`
pub fn get_pre_encoded(&self) -> Option<&Vec<u8>> {
#[cfg(not(feature = "std"))]
{
None
}
#[cfg(feature = "std")]
ENCODED_STAGE_IDS
.get_or_init(|| {
let mut map = HashMap::with_capacity(Self::ALL.len());
for stage_id in Self::ALL {
map.insert(stage_id, stage_id.to_string().into_bytes());
}
map
})
.get(self)
}
}
impl core::fmt::Display for StageId {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(f, "{}", self.as_str())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn stage_id_as_string() {
assert_eq!(StageId::Era.to_string(), "Era");
assert_eq!(StageId::Headers.to_string(), "Headers");
assert_eq!(StageId::Bodies.to_string(), "Bodies");
assert_eq!(StageId::SenderRecovery.to_string(), "SenderRecovery");
assert_eq!(StageId::Execution.to_string(), "Execution");
assert_eq!(StageId::MerkleUnwind.to_string(), "MerkleUnwind");
assert_eq!(StageId::AccountHashing.to_string(), "AccountHashing");
assert_eq!(StageId::StorageHashing.to_string(), "StorageHashing");
assert_eq!(StageId::MerkleExecute.to_string(), "MerkleExecute");
assert_eq!(StageId::IndexAccountHistory.to_string(), "IndexAccountHistory");
assert_eq!(StageId::IndexStorageHistory.to_string(), "IndexStorageHistory");
assert_eq!(StageId::TransactionLookup.to_string(), "TransactionLookup");
assert_eq!(StageId::Finish.to_string(), "Finish");
assert_eq!(StageId::Other("Foo").to_string(), "Foo");
}
#[test]
fn is_downloading_stage() {
assert!(StageId::Headers.is_downloading_stage());
assert!(StageId::Bodies.is_downloading_stage());
assert!(StageId::Era.is_downloading_stage());
assert!(!StageId::Execution.is_downloading_stage());
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/types/src/execution.rs | crates/stages/types/src/execution.rs | use core::time::Duration;
/// The thresholds at which the execution stage writes state changes to the database.
///
/// If either of the thresholds (`max_blocks` and `max_changes`) are hit, then the execution stage
/// commits all pending changes to the database.
///
/// A third threshold, `max_changesets`, can be set to periodically write changesets to the
/// current database transaction, which frees up memory.
#[derive(Debug, Clone)]
pub struct ExecutionStageThresholds {
/// The maximum number of blocks to execute before the execution stage commits.
pub max_blocks: Option<u64>,
/// The maximum number of state changes to keep in memory before the execution stage commits.
pub max_changes: Option<u64>,
/// The maximum cumulative amount of gas to process before the execution stage commits.
pub max_cumulative_gas: Option<u64>,
/// The maximum spent on blocks processing before the execution stage commits.
pub max_duration: Option<Duration>,
}
impl Default for ExecutionStageThresholds {
fn default() -> Self {
Self {
max_blocks: Some(500_000),
max_changes: Some(5_000_000),
// 50k full blocks of 30M gas
max_cumulative_gas: Some(30_000_000 * 50_000),
// 10 minutes
max_duration: Some(Duration::from_secs(10 * 60)),
}
}
}
impl ExecutionStageThresholds {
/// Check if the batch thresholds have been hit.
#[inline]
pub fn is_end_of_batch(
&self,
blocks_processed: u64,
changes_processed: u64,
cumulative_gas_used: u64,
elapsed: Duration,
) -> bool {
blocks_processed >= self.max_blocks.unwrap_or(u64::MAX) ||
changes_processed >= self.max_changes.unwrap_or(u64::MAX) ||
cumulative_gas_used >= self.max_cumulative_gas.unwrap_or(u64::MAX) ||
elapsed >= self.max_duration.unwrap_or(Duration::MAX)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives/src/lib.rs | crates/primitives/src/lib.rs | //! Commonly used types in Reth.
//!
//! This crate contains Ethereum primitive types and helper functions.
//!
//! ## Feature Flags
//!
//! - `arbitrary`: Adds `proptest` and `arbitrary` support for primitive types.
//! - `test-utils`: Export utilities for testing
//! - `reth-codec`: Enables db codec support for reth types including zstd compression for certain
//! types.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(feature = "std"), no_std)]
mod block;
mod receipt;
pub use reth_static_file_types as static_file;
pub mod transaction;
#[cfg(any(test, feature = "arbitrary"))]
pub use block::{generate_valid_header, valid_header_strategy};
pub use block::{Block, BlockBody, SealedBlock};
#[expect(deprecated)]
pub use block::{BlockWithSenders, SealedBlockFor, SealedBlockWithSenders};
pub use receipt::{gas_spent_by_transactions, Receipt};
pub use reth_primitives_traits::{
logs_bloom, Account, BlockTy, BodyTy, Bytecode, GotExpected, GotExpectedBoxed, Header,
HeaderTy, Log, LogData, NodePrimitives, ReceiptTy, RecoveredBlock, SealedHeader, StorageEntry,
TxTy,
};
pub use static_file::StaticFileSegment;
pub use alloy_consensus::{
transaction::{PooledTransaction, Recovered, TransactionMeta},
ReceiptWithBloom,
};
/// Recovered transaction
#[deprecated(note = "use `Recovered` instead")]
pub type RecoveredTx<T> = Recovered<T>;
pub use transaction::{
util::secp256k1::{public_key_to_address, recover_signer_unchecked, sign_message},
InvalidTransactionError, Transaction, TransactionSigned, TxType,
};
#[expect(deprecated)]
pub use transaction::{PooledTransactionsElementEcRecovered, TransactionSignedEcRecovered};
// Re-exports
pub use reth_ethereum_forks::*;
#[cfg(feature = "c-kzg")]
pub use c_kzg as kzg;
/// Bincode-compatible serde implementations for commonly used types in Reth.
///
/// `bincode` crate doesn't work with optionally serializable serde fields, but some of the
/// Reth types require optional serialization for RPC compatibility. This module makes so that
/// all fields are serialized.
///
/// Read more: <https://github.com/bincode-org/bincode/issues/326>
#[cfg(feature = "serde-bincode-compat")]
pub mod serde_bincode_compat {
pub use reth_primitives_traits::serde_bincode_compat::*;
}
// Re-export of `EthPrimitives`
pub use reth_ethereum_primitives::EthPrimitives;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives/src/block.rs | crates/primitives/src/block.rs | use alloy_consensus::Header;
use reth_ethereum_primitives::TransactionSigned;
#[cfg(any(test, feature = "arbitrary"))]
pub use reth_primitives_traits::test_utils::{generate_valid_header, valid_header_strategy};
/// Ethereum full block.
///
/// Withdrawals can be optionally included at the end of the RLP encoded message.
pub type Block<T = TransactionSigned, H = Header> = alloy_consensus::Block<T, H>;
/// A response to `GetBlockBodies`, containing bodies if any bodies were found.
///
/// Withdrawals can be optionally included at the end of the RLP encoded message.
pub type BlockBody<T = TransactionSigned, H = Header> = alloy_consensus::BlockBody<T, H>;
/// Ethereum sealed block type
pub type SealedBlock<B = Block> = reth_primitives_traits::block::SealedBlock<B>;
/// Helper type for constructing the block
#[deprecated(note = "Use `RecoveredBlock` instead")]
pub type SealedBlockFor<B = Block> = reth_primitives_traits::block::SealedBlock<B>;
/// Ethereum recovered block
#[deprecated(note = "Use `RecoveredBlock` instead")]
pub type BlockWithSenders<B = Block> = reth_primitives_traits::block::RecoveredBlock<B>;
/// Ethereum recovered block
#[deprecated(note = "Use `RecoveredBlock` instead")]
pub type SealedBlockWithSenders<B = Block> = reth_primitives_traits::block::RecoveredBlock<B>;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives/src/receipt.rs | crates/primitives/src/receipt.rs | /// Retrieves gas spent by transactions as a vector of tuples (transaction index, gas used).
pub use reth_primitives_traits::receipt::gas_spent_by_transactions;
/// Receipt containing result of transaction execution.
pub use reth_ethereum_primitives::Receipt;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives/src/transaction/signature.rs | crates/primitives/src/transaction/signature.rs | pub use reth_primitives_traits::crypto::secp256k1::{recover_signer, recover_signer_unchecked};
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives/src/transaction/util.rs | crates/primitives/src/transaction/util.rs | //! Utility functions for signature.
pub use reth_primitives_traits::crypto::*;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives/src/transaction/mod.rs | crates/primitives/src/transaction/mod.rs | //! Transaction types.
use crate::Recovered;
pub use alloy_consensus::transaction::PooledTransaction;
use once_cell as _;
#[expect(deprecated)]
pub use pooled::PooledTransactionsElementEcRecovered;
pub use reth_primitives_traits::{
sync::{LazyLock, OnceLock},
transaction::{
error::{
InvalidTransactionError, TransactionConversionError, TryFromRecoveredTransactionError,
},
signed::SignedTransaction,
},
FillTxEnv, WithEncoded,
};
pub use signature::{recover_signer, recover_signer_unchecked};
pub use tx_type::TxType;
/// Handling transaction signature operations, including signature recovery,
/// applying chain IDs, and EIP-2 validation.
pub mod signature;
pub mod util;
mod pooled;
mod tx_type;
/// Signed transaction.
pub use reth_ethereum_primitives::{Transaction, TransactionSigned};
/// Type alias kept for backward compatibility.
#[deprecated(note = "Use `Recovered` instead")]
pub type TransactionSignedEcRecovered<T = TransactionSigned> = Recovered<T>;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives/src/transaction/pooled.rs | crates/primitives/src/transaction/pooled.rs | //! Defines the types for blob transactions, legacy, and other EIP-2718 transactions included in a
//! response to `GetPooledTransactions`.
use crate::Recovered;
use alloy_consensus::transaction::PooledTransaction;
/// A signed pooled transaction with recovered signer.
#[deprecated(note = "use `Recovered` instead")]
pub type PooledTransactionsElementEcRecovered<T = PooledTransaction> = Recovered<T>;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives/src/transaction/tx_type.rs | crates/primitives/src/transaction/tx_type.rs | /// Transaction Type
///
/// Currently being used as 2-bit type when encoding it to `reth_codecs::Compact` on
/// [`crate::TransactionSigned`]. Adding more transaction types will break the codec and
/// database format.
///
/// Other required changes when adding a new type can be seen on [PR#3953](https://github.com/paradigmxyz/reth/pull/3953/files).
pub use alloy_consensus::TxType;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives/benches/validate_blob_tx.rs | crates/primitives/benches/validate_blob_tx.rs | #![allow(missing_docs)]
use alloy_consensus::TxEip4844;
use alloy_eips::eip4844::{
env_settings::EnvKzgSettings, BlobTransactionSidecar, MAX_BLOBS_PER_BLOCK_DENCUN,
};
use criterion::{
criterion_group, criterion_main, measurement::WallTime, BenchmarkGroup, Criterion,
};
use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner};
use proptest_arbitrary_interop::arb;
/// Benchmarks EIP-4844 blob validation.
fn blob_validation(c: &mut Criterion) {
let mut group = c.benchmark_group("Blob Transaction KZG validation");
for num_blobs in 1..=MAX_BLOBS_PER_BLOCK_DENCUN {
println!("Benchmarking validation for tx with {num_blobs} blobs");
validate_blob_tx(&mut group, "ValidateBlob", num_blobs as u64, EnvKzgSettings::Default);
}
}
fn validate_blob_tx(
group: &mut BenchmarkGroup<'_, WallTime>,
description: &str,
num_blobs: u64,
kzg_settings: EnvKzgSettings,
) {
let setup = || {
let mut runner = TestRunner::deterministic();
// generate tx and sidecar
let mut tx = arb::<TxEip4844>().new_tree(&mut runner).unwrap().current();
let mut blob_sidecar =
arb::<BlobTransactionSidecar>().new_tree(&mut runner).unwrap().current();
while blob_sidecar.blobs.len() < num_blobs as usize {
let blob_sidecar_ext =
arb::<BlobTransactionSidecar>().new_tree(&mut runner).unwrap().current();
// extend the sidecar with the new blobs
blob_sidecar.blobs.extend(blob_sidecar_ext.blobs);
blob_sidecar.proofs.extend(blob_sidecar_ext.proofs);
blob_sidecar.commitments.extend(blob_sidecar_ext.commitments);
if blob_sidecar.blobs.len() > num_blobs as usize {
blob_sidecar.blobs.truncate(num_blobs as usize);
blob_sidecar.proofs.truncate(num_blobs as usize);
blob_sidecar.commitments.truncate(num_blobs as usize);
}
}
tx.blob_versioned_hashes = blob_sidecar.versioned_hashes().collect();
(tx, blob_sidecar)
};
let group_id = format!("validate_blob | num blobs: {num_blobs} | {description}");
let kzg_settings = kzg_settings.get();
// for now we just use the default SubPoolLimit
group.bench_function(group_id, |b| {
b.iter_with_setup(setup, |(tx, blob_sidecar)| {
let r = tx.validate_blob(&blob_sidecar, kzg_settings);
(r, tx, blob_sidecar)
});
});
}
criterion_group!(validate_blob, blob_validation);
criterion_main!(validate_blob);
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives/benches/recover_ecdsa_crit.rs | crates/primitives/benches/recover_ecdsa_crit.rs | #![allow(missing_docs)]
use alloy_consensus::transaction::SignerRecoverable;
use alloy_primitives::hex_literal::hex;
use alloy_rlp::Decodable;
use criterion::{criterion_group, criterion_main, Criterion};
use reth_ethereum_primitives::TransactionSigned;
/// Benchmarks the recovery of the public key from the ECDSA message using criterion.
pub fn criterion_benchmark(c: &mut Criterion) {
c.bench_function("recover ECDSA", |b| {
b.iter(|| {
let raw =hex!("f88b8212b085028fa6ae00830f424094aad593da0c8116ef7d2d594dd6a63241bccfc26c80a48318b64b000000000000000000000000641c5d790f862a58ec7abcfd644c0442e9c201b32aa0a6ef9e170bca5ffb7ac05433b13b7043de667fbb0b4a5e45d3b54fb2d6efcc63a0037ec2c05c3d60c5f5f78244ce0a3859e3a18a36c61efb061b383507d3ce19d2");
let mut pointer = raw.as_ref();
let tx = TransactionSigned::decode(&mut pointer).unwrap();
SignerRecoverable::recover_signer(&tx).unwrap();
}
)
});
}
criterion_group! {
name = benches;
config = Criterion::default();
targets = criterion_benchmark
}
criterion_main!(benches);
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/era-utils/src/lib.rs | crates/era-utils/src/lib.rs | //! Utilities to store history from downloaded ERA files with storage-api
//! and export it to recreate era1 files.
//!
//! The import is downloaded using [`reth_era_downloader`] and parsed using [`reth_era`].
mod history;
/// Export block history data from the database to recreate era1 files.
mod export;
/// Export history from storage-api between 2 blocks
/// with parameters defined in [`ExportConfig`].
pub use export::{export, ExportConfig};
/// Imports history from ERA files.
pub use history::{
build_index, decode, import, open, process, process_iter, save_stage_checkpoints, ProcessIter,
};
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/era-utils/src/history.rs | crates/era-utils/src/history.rs | use alloy_primitives::{BlockHash, BlockNumber, U256};
use futures_util::{Stream, StreamExt};
use reth_db_api::{
cursor::{DbCursorRO, DbCursorRW},
table::Value,
tables,
transaction::{DbTx, DbTxMut},
RawKey, RawTable, RawValue,
};
use reth_era::{
e2s_types::E2sError,
era1_file::{BlockTupleIterator, Era1Reader},
era_file_ops::StreamReader,
execution_types::BlockTuple,
DecodeCompressed,
};
use reth_era_downloader::EraMeta;
use reth_etl::Collector;
use reth_fs_util as fs;
use reth_primitives_traits::{Block, FullBlockBody, FullBlockHeader, NodePrimitives};
use reth_provider::{
providers::StaticFileProviderRWRefMut, writer::UnifiedStorageWriter, BlockWriter,
ProviderError, StaticFileProviderFactory, StaticFileSegment, StaticFileWriter,
};
use reth_stages_types::{
CheckpointBlockRange, EntitiesCheckpoint, HeadersCheckpoint, StageCheckpoint, StageId,
};
use reth_storage_api::{
errors::ProviderResult, DBProvider, DatabaseProviderFactory, HeaderProvider,
NodePrimitivesProvider, StageCheckpointWriter, StorageLocation,
};
use std::{
collections::Bound,
error::Error,
fmt::{Display, Formatter},
io::{Read, Seek},
iter::Map,
ops::RangeBounds,
sync::mpsc,
};
use tracing::info;
/// Imports blocks from `downloader` using `provider`.
///
/// Returns current block height.
pub fn import<Downloader, Era, PF, B, BB, BH>(
mut downloader: Downloader,
provider_factory: &PF,
hash_collector: &mut Collector<BlockHash, BlockNumber>,
) -> eyre::Result<BlockNumber>
where
B: Block<Header = BH, Body = BB>,
BH: FullBlockHeader + Value,
BB: FullBlockBody<
Transaction = <<<PF as DatabaseProviderFactory>::ProviderRW as NodePrimitivesProvider>::Primitives as NodePrimitives>::SignedTx,
OmmerHeader = BH,
>,
Downloader: Stream<Item = eyre::Result<Era>> + Send + 'static + Unpin,
Era: EraMeta + Send + 'static,
PF: DatabaseProviderFactory<
ProviderRW: BlockWriter<Block = B>
+ DBProvider
+ StaticFileProviderFactory<Primitives: NodePrimitives<Block = B, BlockHeader = BH, BlockBody = BB>>
+ StageCheckpointWriter,
> + StaticFileProviderFactory<Primitives = <<PF as DatabaseProviderFactory>::ProviderRW as NodePrimitivesProvider>::Primitives>,
{
let (tx, rx) = mpsc::channel();
// Handle IO-bound async download in a background tokio task
tokio::spawn(async move {
while let Some(file) = downloader.next().await {
tx.send(Some(file))?;
}
tx.send(None)
});
let static_file_provider = provider_factory.static_file_provider();
// Consistency check of expected headers in static files vs DB is done on provider::sync_gap
// when poll_execute_ready is polled.
let mut height = static_file_provider
.get_highest_static_file_block(StaticFileSegment::Headers)
.unwrap_or_default();
// Find the latest total difficulty
let mut td = static_file_provider
.header_td_by_number(height)?
.ok_or(ProviderError::TotalDifficultyNotFound(height))?;
while let Some(meta) = rx.recv()? {
let from = height;
let provider = provider_factory.database_provider_rw()?;
height = process(
&meta?,
&mut static_file_provider.latest_writer(StaticFileSegment::Headers)?,
&provider,
hash_collector,
&mut td,
height..,
)?;
save_stage_checkpoints(&provider, from, height, height, height)?;
UnifiedStorageWriter::commit(provider)?;
}
let provider = provider_factory.database_provider_rw()?;
build_index(&provider, hash_collector)?;
UnifiedStorageWriter::commit(provider)?;
Ok(height)
}
/// Saves progress of ERA import into stages sync.
///
/// Since the ERA import does the same work as `HeaderStage` and `BodyStage`, it needs to inform
/// these stages that this work has already been done. Otherwise, there might be some conflict with
/// database integrity.
pub fn save_stage_checkpoints<P>(
provider: &P,
from: BlockNumber,
to: BlockNumber,
processed: u64,
total: u64,
) -> ProviderResult<()>
where
P: StageCheckpointWriter,
{
provider.save_stage_checkpoint(
StageId::Headers,
StageCheckpoint::new(to).with_headers_stage_checkpoint(HeadersCheckpoint {
block_range: CheckpointBlockRange { from, to },
progress: EntitiesCheckpoint { processed, total },
}),
)?;
provider.save_stage_checkpoint(
StageId::Bodies,
StageCheckpoint::new(to)
.with_entities_stage_checkpoint(EntitiesCheckpoint { processed, total }),
)?;
Ok(())
}
/// Extracts block headers and bodies from `meta` and appends them using `writer` and `provider`.
///
/// Adds on to `total_difficulty` and collects hash to height using `hash_collector`.
///
/// Skips all blocks below the [`start_bound`] of `block_numbers` and stops when reaching past the
/// [`end_bound`] or the end of the file.
///
/// Returns last block height.
///
/// [`start_bound`]: RangeBounds::start_bound
/// [`end_bound`]: RangeBounds::end_bound
pub fn process<Era, P, B, BB, BH>(
meta: &Era,
writer: &mut StaticFileProviderRWRefMut<'_, <P as NodePrimitivesProvider>::Primitives>,
provider: &P,
hash_collector: &mut Collector<BlockHash, BlockNumber>,
total_difficulty: &mut U256,
block_numbers: impl RangeBounds<BlockNumber>,
) -> eyre::Result<BlockNumber>
where
B: Block<Header = BH, Body = BB>,
BH: FullBlockHeader + Value,
BB: FullBlockBody<
Transaction = <<P as NodePrimitivesProvider>::Primitives as NodePrimitives>::SignedTx,
OmmerHeader = BH,
>,
Era: EraMeta + ?Sized,
P: DBProvider<Tx: DbTxMut> + NodePrimitivesProvider + BlockWriter<Block = B>,
<P as NodePrimitivesProvider>::Primitives: NodePrimitives<BlockHeader = BH, BlockBody = BB>,
{
let reader = open(meta)?;
let iter =
reader
.iter()
.map(Box::new(decode)
as Box<dyn Fn(Result<BlockTuple, E2sError>) -> eyre::Result<(BH, BB)>>);
let iter = ProcessIter { iter, era: meta };
process_iter(iter, writer, provider, hash_collector, total_difficulty, block_numbers)
}
type ProcessInnerIter<R, BH, BB> =
Map<BlockTupleIterator<R>, Box<dyn Fn(Result<BlockTuple, E2sError>) -> eyre::Result<(BH, BB)>>>;
/// An iterator that wraps era file extraction. After the final item [`EraMeta::mark_as_processed`]
/// is called to ensure proper cleanup.
#[derive(Debug)]
pub struct ProcessIter<'a, Era: ?Sized, R: Read, BH, BB>
where
BH: FullBlockHeader + Value,
BB: FullBlockBody<OmmerHeader = BH>,
{
iter: ProcessInnerIter<R, BH, BB>,
era: &'a Era,
}
impl<'a, Era: EraMeta + ?Sized, R: Read, BH, BB> Display for ProcessIter<'a, Era, R, BH, BB>
where
BH: FullBlockHeader + Value,
BB: FullBlockBody<OmmerHeader = BH>,
{
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
Display::fmt(&self.era.path().to_string_lossy(), f)
}
}
impl<'a, Era, R, BH, BB> Iterator for ProcessIter<'a, Era, R, BH, BB>
where
R: Read + Seek,
Era: EraMeta + ?Sized,
BH: FullBlockHeader + Value,
BB: FullBlockBody<OmmerHeader = BH>,
{
type Item = eyre::Result<(BH, BB)>;
fn next(&mut self) -> Option<Self::Item> {
match self.iter.next() {
Some(item) => Some(item),
None => match self.era.mark_as_processed() {
Ok(..) => None,
Err(e) => Some(Err(e)),
},
}
}
}
/// Opens the era file described by `meta`.
pub fn open<Era>(meta: &Era) -> eyre::Result<Era1Reader<std::fs::File>>
where
Era: EraMeta + ?Sized,
{
let file = fs::open(meta.path())?;
let reader = Era1Reader::new(file);
Ok(reader)
}
/// Extracts a pair of [`FullBlockHeader`] and [`FullBlockBody`] from [`BlockTuple`].
pub fn decode<BH, BB, E>(block: Result<BlockTuple, E>) -> eyre::Result<(BH, BB)>
where
BH: FullBlockHeader + Value,
BB: FullBlockBody<OmmerHeader = BH>,
E: From<E2sError> + Error + Send + Sync + 'static,
{
let block = block?;
let header: BH = block.header.decode()?;
let body: BB = block.body.decode()?;
Ok((header, body))
}
/// Extracts block headers and bodies from `iter` and appends them using `writer` and `provider`.
///
/// Adds on to `total_difficulty` and collects hash to height using `hash_collector`.
///
/// Skips all blocks below the [`start_bound`] of `block_numbers` and stops when reaching past the
/// [`end_bound`] or the end of the file.
///
/// Returns last block height.
///
/// [`start_bound`]: RangeBounds::start_bound
/// [`end_bound`]: RangeBounds::end_bound
pub fn process_iter<P, B, BB, BH>(
mut iter: impl Iterator<Item = eyre::Result<(BH, BB)>>,
writer: &mut StaticFileProviderRWRefMut<'_, <P as NodePrimitivesProvider>::Primitives>,
provider: &P,
hash_collector: &mut Collector<BlockHash, BlockNumber>,
total_difficulty: &mut U256,
block_numbers: impl RangeBounds<BlockNumber>,
) -> eyre::Result<BlockNumber>
where
B: Block<Header = BH, Body = BB>,
BH: FullBlockHeader + Value,
BB: FullBlockBody<
Transaction = <<P as NodePrimitivesProvider>::Primitives as NodePrimitives>::SignedTx,
OmmerHeader = BH,
>,
P: DBProvider<Tx: DbTxMut> + NodePrimitivesProvider + BlockWriter<Block = B>,
<P as NodePrimitivesProvider>::Primitives: NodePrimitives<BlockHeader = BH, BlockBody = BB>,
{
let mut last_header_number = match block_numbers.start_bound() {
Bound::Included(&number) => number,
Bound::Excluded(&number) => number.saturating_sub(1),
Bound::Unbounded => 0,
};
let target = match block_numbers.end_bound() {
Bound::Included(&number) => Some(number),
Bound::Excluded(&number) => Some(number.saturating_add(1)),
Bound::Unbounded => None,
};
for block in &mut iter {
let (header, body) = block?;
let number = header.number();
if number <= last_header_number {
continue;
}
if let Some(target) = target {
if number > target {
break;
}
}
let hash = header.hash_slow();
last_header_number = number;
// Increase total difficulty
*total_difficulty += header.difficulty();
// Append to Headers segment
writer.append_header(&header, *total_difficulty, &hash)?;
// Write bodies to database.
provider.append_block_bodies(
vec![(header.number(), Some(body))],
// We are writing transactions directly to static files.
StorageLocation::StaticFiles,
)?;
hash_collector.insert(hash, number)?;
}
Ok(last_header_number)
}
/// Dumps the contents of `hash_collector` into [`tables::HeaderNumbers`].
pub fn build_index<P, B, BB, BH>(
provider: &P,
hash_collector: &mut Collector<BlockHash, BlockNumber>,
) -> eyre::Result<()>
where
B: Block<Header = BH, Body = BB>,
BH: FullBlockHeader + Value,
BB: FullBlockBody<
Transaction = <<P as NodePrimitivesProvider>::Primitives as NodePrimitives>::SignedTx,
OmmerHeader = BH,
>,
P: DBProvider<Tx: DbTxMut> + NodePrimitivesProvider + BlockWriter<Block = B>,
<P as NodePrimitivesProvider>::Primitives: NodePrimitives<BlockHeader = BH, BlockBody = BB>,
{
let total_headers = hash_collector.len();
info!(target: "era::history::import", total = total_headers, "Writing headers hash index");
// Database cursor for hash to number index
let mut cursor_header_numbers =
provider.tx_ref().cursor_write::<RawTable<tables::HeaderNumbers>>()?;
let mut first_sync = false;
// If we only have the genesis block hash, then we are at first sync, and we can remove it,
// add it to the collector and use tx.append on all hashes.
if provider.tx_ref().entries::<RawTable<tables::HeaderNumbers>>()? == 1 {
if let Some((hash, block_number)) = cursor_header_numbers.last()? {
if block_number.value()? == 0 {
hash_collector.insert(hash.key()?, 0)?;
cursor_header_numbers.delete_current()?;
first_sync = true;
}
}
}
let interval = (total_headers / 10).max(8192);
// Build block hash to block number index
for (index, hash_to_number) in hash_collector.iter()?.enumerate() {
let (hash, number) = hash_to_number?;
if index != 0 && index.is_multiple_of(interval) {
info!(target: "era::history::import", progress = %format!("{:.2}%", (index as f64 / total_headers as f64) * 100.0), "Writing headers hash index");
}
let hash = RawKey::<BlockHash>::from_vec(hash);
let number = RawValue::<BlockNumber>::from_vec(number);
if first_sync {
cursor_header_numbers.append(hash, &number)?;
} else {
cursor_header_numbers.upsert(hash, &number)?;
}
}
Ok(())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/era-utils/src/export.rs | crates/era-utils/src/export.rs | //! Logic to export from database era1 block history
//! and injecting them into era1 files with `Era1Writer`.
use alloy_consensus::BlockHeader;
use alloy_primitives::{BlockNumber, B256, U256};
use eyre::{eyre, Result};
use reth_era::{
e2s_types::IndexEntry,
era1_file::Era1Writer,
era1_types::{BlockIndex, Era1Id},
era_file_ops::{EraFileId, StreamWriter},
execution_types::{
Accumulator, BlockTuple, CompressedBody, CompressedHeader, CompressedReceipts,
TotalDifficulty, MAX_BLOCKS_PER_ERA1,
},
};
use reth_fs_util as fs;
use reth_storage_api::{BlockNumReader, BlockReader, HeaderProvider};
use std::{
path::PathBuf,
time::{Duration, Instant},
};
use tracing::{debug, info, warn};
const REPORT_INTERVAL_SECS: u64 = 10;
const ENTRY_HEADER_SIZE: usize = 8;
const VERSION_ENTRY_SIZE: usize = ENTRY_HEADER_SIZE;
/// Configuration to export block history
/// to era1 files
#[derive(Clone, Debug)]
pub struct ExportConfig {
/// Directory to export era1 files to
pub dir: PathBuf,
/// First block to export
pub first_block_number: BlockNumber,
/// Last block to export
pub last_block_number: BlockNumber,
/// Number of blocks per era1 file
/// It can never be larger than `MAX_BLOCKS_PER_ERA1 = 8192`
/// See also <`https://github.com/eth-clients/e2store-format-specs/blob/main/formats/era1.md`>
pub max_blocks_per_file: u64,
/// Network name.
pub network: String,
}
impl Default for ExportConfig {
fn default() -> Self {
Self {
dir: PathBuf::new(),
first_block_number: 0,
last_block_number: (MAX_BLOCKS_PER_ERA1 - 1) as u64,
max_blocks_per_file: MAX_BLOCKS_PER_ERA1 as u64,
network: "mainnet".to_string(),
}
}
}
impl ExportConfig {
/// Validates the export configuration parameters
pub fn validate(&self) -> Result<()> {
if self.max_blocks_per_file > MAX_BLOCKS_PER_ERA1 as u64 {
return Err(eyre!(
"Max blocks per file ({}) exceeds ERA1 limit ({})",
self.max_blocks_per_file,
MAX_BLOCKS_PER_ERA1
));
}
if self.max_blocks_per_file == 0 {
return Err(eyre!("Max blocks per file cannot be zero"));
}
Ok(())
}
}
/// Fetches block history data from the provider
/// and prepares it for export to era1 files
/// for a given number of blocks then writes them to disk.
pub fn export<P>(provider: &P, config: &ExportConfig) -> Result<Vec<PathBuf>>
where
P: BlockReader,
{
config.validate()?;
info!(
"Exporting blockchain history from block {} to {} with this max of blocks per file of {}",
config.first_block_number, config.last_block_number, config.max_blocks_per_file
);
// Determine the actual last block to export
// best_block_number() might be outdated, so check actual block availability
let last_block_number = determine_export_range(provider, config)?;
info!(
target: "era::history::export",
first = config.first_block_number,
last = last_block_number,
max_blocks_per_file = config.max_blocks_per_file,
"Preparing era1 export data"
);
if !config.dir.exists() {
fs::create_dir_all(&config.dir)
.map_err(|e| eyre!("Failed to create output directory: {}", e))?;
}
let start_time = Instant::now();
let mut last_report_time = Instant::now();
let report_interval = Duration::from_secs(REPORT_INTERVAL_SECS);
let mut created_files = Vec::new();
let mut total_blocks_processed = 0;
let mut total_difficulty = if config.first_block_number > 0 {
let prev_block_number = config.first_block_number - 1;
provider
.header_td_by_number(prev_block_number)?
.ok_or_else(|| eyre!("Total difficulty not found for block {prev_block_number}"))?
} else {
U256::ZERO
};
// Process blocks in chunks according to `max_blocks_per_file`
for start_block in
(config.first_block_number..=last_block_number).step_by(config.max_blocks_per_file as usize)
{
let end_block = (start_block + config.max_blocks_per_file - 1).min(last_block_number);
let block_count = (end_block - start_block + 1) as usize;
info!(
target: "era::history::export",
"Processing blocks {start_block} to {end_block} ({block_count} blocks)"
);
let headers = provider.headers_range(start_block..=end_block)?;
// Extract first 4 bytes of last block's state root as historical identifier
let historical_root = headers
.last()
.map(|header| {
let state_root = header.state_root();
[state_root[0], state_root[1], state_root[2], state_root[3]]
})
.unwrap_or([0u8; 4]);
let era1_id = Era1Id::new(&config.network, start_block, block_count as u32)
.with_hash(historical_root);
debug!("Final file name {}", era1_id.to_file_name());
let file_path = config.dir.join(era1_id.to_file_name());
let file = std::fs::File::create(&file_path)?;
let mut writer = Era1Writer::new(file);
writer.write_version()?;
let mut offsets = Vec::<i64>::with_capacity(block_count);
let mut position = VERSION_ENTRY_SIZE as i64;
let mut blocks_written = 0;
let mut final_header_data = Vec::new();
for (i, header) in headers.into_iter().enumerate() {
let expected_block_number = start_block + i as u64;
let (compressed_header, compressed_body, compressed_receipts) = compress_block_data(
provider,
header,
expected_block_number,
&mut total_difficulty,
)?;
// Save last block's header data for accumulator
if expected_block_number == end_block {
final_header_data = compressed_header.data.clone();
}
let difficulty = TotalDifficulty::new(total_difficulty);
let header_size = compressed_header.data.len() + ENTRY_HEADER_SIZE;
let body_size = compressed_body.data.len() + ENTRY_HEADER_SIZE;
let receipts_size = compressed_receipts.data.len() + ENTRY_HEADER_SIZE;
let difficulty_size = 32 + ENTRY_HEADER_SIZE; // U256 is 32 + 8 bytes header overhead
let total_size = (header_size + body_size + receipts_size + difficulty_size) as i64;
let block_tuple = BlockTuple::new(
compressed_header,
compressed_body,
compressed_receipts,
difficulty,
);
offsets.push(position);
position += total_size;
writer.write_block(&block_tuple)?;
blocks_written += 1;
total_blocks_processed += 1;
if last_report_time.elapsed() >= report_interval {
info!(
target: "era::history::export",
"Export progress: block {expected_block_number}/{last_block_number} ({:.2}%) - elapsed: {:?}",
(total_blocks_processed as f64) /
((last_block_number - config.first_block_number + 1) as f64) *
100.0,
start_time.elapsed()
);
last_report_time = Instant::now();
}
}
if blocks_written > 0 {
let accumulator_hash =
B256::from_slice(&final_header_data[0..32.min(final_header_data.len())]);
let accumulator = Accumulator::new(accumulator_hash);
let block_index = BlockIndex::new(start_block, offsets);
writer.write_accumulator(&accumulator)?;
writer.write_block_index(&block_index)?;
writer.flush()?;
created_files.push(file_path.clone());
info!(
target: "era::history::export",
"Wrote ERA1 file: {file_path:?} with {blocks_written} blocks"
);
}
}
info!(
target: "era::history::export",
"Successfully wrote {} ERA1 files in {:?}",
created_files.len(),
start_time.elapsed()
);
Ok(created_files)
}
// Determines the actual last block number that can be exported,
// Uses `headers_range` fallback when `best_block_number` is stale due to static file storage.
fn determine_export_range<P>(provider: &P, config: &ExportConfig) -> Result<BlockNumber>
where
P: HeaderProvider + BlockNumReader,
{
let best_block_number = provider.best_block_number()?;
let last_block_number = if best_block_number < config.last_block_number {
warn!(
"Last block {} is beyond current head {}, setting last = head",
config.last_block_number, best_block_number
);
// Check if more blocks are actually available beyond what `best_block_number()` reports
if let Ok(headers) = provider.headers_range(best_block_number..=config.last_block_number) {
if let Some(last_header) = headers.last() {
let highest_block = last_header.number();
info!("Found highest available block {} via headers_range", highest_block);
highest_block
} else {
warn!("No headers found in range, using best_block_number {}", best_block_number);
best_block_number
}
} else {
warn!("headers_range failed, using best_block_number {}", best_block_number);
best_block_number
}
} else {
config.last_block_number
};
Ok(last_block_number)
}
// Compresses block data and returns compressed components with metadata
fn compress_block_data<P>(
provider: &P,
header: P::Header,
expected_block_number: BlockNumber,
total_difficulty: &mut U256,
) -> Result<(CompressedHeader, CompressedBody, CompressedReceipts)>
where
P: BlockReader,
{
let actual_block_number = header.number();
if expected_block_number != actual_block_number {
return Err(eyre!("Expected block {expected_block_number}, got {actual_block_number}"));
}
let body = provider
.block_by_number(actual_block_number)?
.ok_or_else(|| eyre!("Block body not found for block {}", actual_block_number))?;
let receipts = provider
.receipts_by_block(actual_block_number.into())?
.ok_or_else(|| eyre!("Receipts not found for block {}", actual_block_number))?;
*total_difficulty += header.difficulty();
let compressed_header = CompressedHeader::from_header(&header)?;
let compressed_body = CompressedBody::from_body(&body)?;
let compressed_receipts = CompressedReceipts::from_encodable_list(&receipts)
.map_err(|e| eyre!("Failed to compress receipts: {}", e))?;
Ok((compressed_header, compressed_body, compressed_receipts))
}
#[cfg(test)]
mod tests {
use crate::ExportConfig;
use reth_era::execution_types::MAX_BLOCKS_PER_ERA1;
use tempfile::tempdir;
#[test]
fn test_export_config_validation() {
let temp_dir = tempdir().unwrap();
// Default config should pass
let default_config = ExportConfig::default();
assert!(default_config.validate().is_ok(), "Default config should be valid");
// Exactly at the limit should pass
let limit_config =
ExportConfig { max_blocks_per_file: MAX_BLOCKS_PER_ERA1 as u64, ..Default::default() };
assert!(limit_config.validate().is_ok(), "Config at ERA1 limit should pass validation");
// Valid config should pass
let valid_config = ExportConfig {
dir: temp_dir.path().to_path_buf(),
max_blocks_per_file: 1000,
..Default::default()
};
assert!(valid_config.validate().is_ok(), "Valid config should pass validation");
// Zero blocks per file should fail
let zero_blocks_config = ExportConfig {
max_blocks_per_file: 0, // Invalid
..Default::default()
};
let result = zero_blocks_config.validate();
assert!(result.is_err(), "Zero blocks per file should fail validation");
assert!(result.unwrap_err().to_string().contains("cannot be zero"));
// Exceeding era1 limit should fail
let oversized_config = ExportConfig {
max_blocks_per_file: MAX_BLOCKS_PER_ERA1 as u64 + 1, // Invalid
..Default::default()
};
let result = oversized_config.validate();
assert!(result.is_err(), "Oversized blocks per file should fail validation");
assert!(result.unwrap_err().to_string().contains("exceeds ERA1 limit"));
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/era-utils/tests/it/genesis.rs | crates/era-utils/tests/it/genesis.rs | use reth_db_common::init::init_genesis;
use reth_era_utils::{export, ExportConfig};
use reth_fs_util as fs;
use reth_provider::{test_utils::create_test_provider_factory, BlockReader};
use tempfile::tempdir;
#[test]
fn test_export_with_genesis_only() {
let provider_factory = create_test_provider_factory();
init_genesis(&provider_factory).unwrap();
let provider = provider_factory.provider().unwrap();
assert!(provider.block_by_number(0).unwrap().is_some(), "Genesis block should exist");
assert!(provider.block_by_number(1).unwrap().is_none(), "Block 1 should not exist");
let export_dir = tempdir().unwrap();
let export_config = ExportConfig { dir: export_dir.path().to_owned(), ..Default::default() };
let exported_files =
export(&provider_factory.provider_rw().unwrap().0, &export_config).unwrap();
assert_eq!(exported_files.len(), 1, "Should export exactly one file");
let file_path = &exported_files[0];
assert!(file_path.exists(), "Exported file should exist on disk");
let file_name = file_path.file_name().unwrap().to_str().unwrap();
assert!(
file_name.starts_with("mainnet-00000-00001-"),
"File should have correct prefix with era format"
);
assert!(file_name.ends_with(".era1"), "File should have correct extension");
let metadata = fs::metadata(file_path).unwrap();
assert!(metadata.len() > 0, "Exported file should not be empty");
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/era-utils/tests/it/history.rs | crates/era-utils/tests/it/history.rs | use crate::{ClientWithFakeIndex, ITHACA_ERA_INDEX_URL};
use reqwest::{Client, Url};
use reth_db_common::init::init_genesis;
use reth_era::execution_types::MAX_BLOCKS_PER_ERA1;
use reth_era_downloader::{EraClient, EraStream, EraStreamConfig};
use reth_era_utils::{export, import, ExportConfig};
use reth_etl::Collector;
use reth_fs_util as fs;
use reth_provider::{test_utils::create_test_provider_factory, BlockNumReader, BlockReader};
use std::str::FromStr;
use tempfile::tempdir;
const EXPORT_FIRST_BLOCK: u64 = 0;
const EXPORT_BLOCKS_PER_FILE: u64 = 250;
const EXPORT_TOTAL_BLOCKS: u64 = 900;
const EXPORT_LAST_BLOCK: u64 = EXPORT_FIRST_BLOCK + EXPORT_TOTAL_BLOCKS - 1;
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_history_imports_from_fresh_state_successfully() {
// URL where the ERA1 files are hosted
let url = Url::from_str(ITHACA_ERA_INDEX_URL).unwrap();
// Directory where the ERA1 files will be downloaded to
let folder = tempdir().unwrap();
let folder = folder.path();
let client = EraClient::new(ClientWithFakeIndex(Client::new()), url, folder);
let config = EraStreamConfig::default().with_max_files(1).with_max_concurrent_downloads(1);
let stream = EraStream::new(client, config);
let pf = create_test_provider_factory();
init_genesis(&pf).unwrap();
let folder = tempdir().unwrap();
let folder = Some(folder.path().to_owned());
let mut hash_collector = Collector::new(4096, folder);
let expected_block_number = 8191;
let actual_block_number = import(stream, &pf, &mut hash_collector).unwrap();
assert_eq!(actual_block_number, expected_block_number);
}
/// Test that verifies the complete roundtrip from importing to exporting era1 files.
/// It validates :
/// - Downloads the first era1 file from ithaca's url and import the file data, into the database
/// - Exports blocks from database back to era1 format
/// - Ensure exported files have correct structure and naming
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_roundtrip_export_after_import() {
// URL where the ERA1 files are hosted
let url = Url::from_str(ITHACA_ERA_INDEX_URL).unwrap();
let download_folder = tempdir().unwrap();
let download_folder = download_folder.path().to_owned().into_boxed_path();
let client = EraClient::new(ClientWithFakeIndex(Client::new()), url, download_folder);
let config = EraStreamConfig::default().with_max_files(1).with_max_concurrent_downloads(1);
let stream = EraStream::new(client, config);
let pf = create_test_provider_factory();
init_genesis(&pf).unwrap();
let folder = tempdir().unwrap();
let folder = Some(folder.path().to_owned());
let mut hash_collector = Collector::new(4096, folder);
// Import blocks from one era1 file into database
let last_imported_block_height = import(stream, &pf, &mut hash_collector).unwrap();
assert_eq!(last_imported_block_height, 8191);
let provider_ref = pf.provider_rw().unwrap().0;
let best_block = provider_ref.best_block_number().unwrap();
assert!(best_block <= 8191, "Best block {best_block} should not exceed imported count");
// Verify some blocks exist in the database
for &block_num in &[0, 1, 2, 10, 50, 100, 5000, 8190, 8191] {
let block_exists = provider_ref.block_by_number(block_num).unwrap().is_some();
assert!(block_exists, "Block {block_num} should exist after importing 8191 blocks");
}
// The import was verified let's start the export!
// 900 blocks will be exported from 0 to 899
// It should be split into 3 files of 250 blocks each, and the last file with 150 blocks
let export_folder = tempdir().unwrap();
let export_config = ExportConfig {
dir: export_folder.path().to_path_buf(),
first_block_number: EXPORT_FIRST_BLOCK, // 0
last_block_number: EXPORT_LAST_BLOCK, // 899
max_blocks_per_file: EXPORT_BLOCKS_PER_FILE, // 250 blocks per file
network: "mainnet".to_string(),
};
// Export blocks from database to era1 files
let exported_files = export(&provider_ref, &export_config).expect("Export should succeed");
// Calculate how many files we expect based on the configuration
// We expect 4 files for 900 blocks: first 3 files with 250 blocks each,
// then 150 for the last file
let expected_files_number = EXPORT_TOTAL_BLOCKS.div_ceil(EXPORT_BLOCKS_PER_FILE);
assert_eq!(
exported_files.len(),
expected_files_number as usize,
"Should create {expected_files_number} files for {EXPORT_TOTAL_BLOCKS} blocks with {EXPORT_BLOCKS_PER_FILE} blocks per file"
);
for (i, file_path) in exported_files.iter().enumerate() {
// Verify file exists and has content
assert!(file_path.exists(), "File {} should exist", i + 1);
let file_size = fs::metadata(file_path).unwrap().len();
assert!(file_size > 0, "File {} should not be empty", i + 1);
// Calculate expected file parameters
let file_start_block = EXPORT_FIRST_BLOCK + (i as u64 * EXPORT_BLOCKS_PER_FILE);
let remaining_blocks = EXPORT_TOTAL_BLOCKS - (i as u64 * EXPORT_BLOCKS_PER_FILE);
let blocks_numbers_per_file = std::cmp::min(EXPORT_BLOCKS_PER_FILE, remaining_blocks);
// Verify chunking : first 3 files have 250 blocks, last file has 150 blocks - 900 total
let expected_blocks = if i < 3 { 250 } else { 150 };
assert_eq!(
blocks_numbers_per_file,
expected_blocks,
"File {} should contain exactly {} blocks, got {}",
i + 1,
expected_blocks,
blocks_numbers_per_file
);
// Verify format: mainnet-{era_number:05}-{era_count:05}-{8hexchars}.era1
let era_number = file_start_block / MAX_BLOCKS_PER_ERA1 as u64;
// Era count is always 1 for this test, as we are only exporting one era
let expected_prefix = format!("mainnet-{:05}-{:05}-", era_number, 1);
let file_name = file_path.file_name().unwrap().to_str().unwrap();
assert!(
file_name.starts_with(&expected_prefix),
"File {} should start with '{expected_prefix}', got '{file_name}'",
i + 1
);
// Verify the hash part is 8 characters
let hash_start = expected_prefix.len();
let hash_end = file_name.len() - 5; // remove ".era1"
let hash_part = &file_name[hash_start..hash_end];
assert_eq!(
hash_part.len(),
8,
"File {} hash should be 8 characters, got {} in '{}'",
i + 1,
hash_part.len(),
file_name
);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/era-utils/tests/it/main.rs | crates/era-utils/tests/it/main.rs | //! Root module for test modules, so that the tests are built into a single binary.
use alloy_primitives::bytes::Bytes;
use futures_util::{stream, Stream, TryStreamExt};
use reqwest::{Client, IntoUrl};
use reth_era_downloader::HttpClient;
use tokio_util::either::Either;
// Url where the ERA1 files are hosted
const ITHACA_ERA_INDEX_URL: &str = "https://era.ithaca.xyz/era1/index.html";
// The response containing one file that the fake client will return when the index Url is requested
const GENESIS_ITHACA_INDEX_RESPONSE: &[u8] = b"<a href=\"https://era.ithaca.xyz/era1/mainnet-00000-5ec1ffb8.era1\">mainnet-00000-5ec1ffb8.era1</a>";
mod genesis;
mod history;
const fn main() {}
/// An HTTP client that fakes the file list to always show one known file
///
/// but passes all other calls including actual downloads to a real HTTP client
///
/// In that way, only one file is used but downloads are still performed from the original source.
#[derive(Debug, Clone)]
struct ClientWithFakeIndex(Client);
impl HttpClient for ClientWithFakeIndex {
async fn get<U: IntoUrl + Send + Sync>(
&self,
url: U,
) -> eyre::Result<impl Stream<Item = eyre::Result<Bytes>> + Send + Sync + Unpin> {
let url = url.into_url()?;
match url.to_string().as_str() {
ITHACA_ERA_INDEX_URL => {
// Create a static stream without boxing
let stream =
stream::iter(vec![Ok(Bytes::from_static(GENESIS_ITHACA_INDEX_RESPONSE))]);
Ok(Either::Left(stream))
}
_ => {
let response = Client::get(&self.0, url).send().await?;
let stream = response.bytes_stream().map_err(|e| eyre::Error::new(e));
Ok(Either::Right(stream))
}
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/prune/types/src/event.rs | crates/prune/types/src/event.rs | use crate::PrunedSegmentInfo;
use alloc::vec::Vec;
use alloy_primitives::BlockNumber;
use core::time::Duration;
/// An event emitted by a pruner.
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum PrunerEvent {
/// Emitted when pruner started running.
Started {
/// The tip block number before pruning.
tip_block_number: BlockNumber,
},
/// Emitted when pruner finished running.
Finished {
/// The tip block number before pruning.
tip_block_number: BlockNumber,
/// The elapsed time for the pruning process.
elapsed: Duration,
/// Collected pruning stats.
stats: Vec<PrunedSegmentInfo>,
},
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/prune/types/src/lib.rs | crates/prune/types/src/lib.rs | //! Commonly used types for prune usage.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(feature = "std"), no_std)]
extern crate alloc;
mod checkpoint;
mod event;
mod mode;
mod pruner;
mod segment;
mod target;
use alloc::{collections::BTreeMap, vec::Vec};
use alloy_primitives::{Address, BlockNumber};
use core::ops::Deref;
pub use checkpoint::PruneCheckpoint;
pub use event::PrunerEvent;
pub use mode::PruneMode;
pub use pruner::{
PruneInterruptReason, PruneProgress, PrunedSegmentInfo, PrunerOutput, SegmentOutput,
SegmentOutputCheckpoint,
};
pub use segment::{PrunePurpose, PruneSegment, PruneSegmentError};
pub use target::{PruneModes, UnwindTargetPrunedError, MINIMUM_PRUNING_DISTANCE};
/// Configuration for pruning receipts not associated with logs emitted by the specified contracts.
#[derive(Debug, Clone, PartialEq, Eq, Default)]
#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))]
pub struct ReceiptsLogPruneConfig(pub BTreeMap<Address, PruneMode>);
impl ReceiptsLogPruneConfig {
/// Checks if the configuration is empty
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
/// Given the `tip` block number, consolidates the structure so it can easily be queried for
/// filtering across a range of blocks.
///
/// Example:
///
/// `{ addrA: Before(872), addrB: Before(500), addrC: Distance(128) }`
///
/// for `tip: 1000`, gets transformed to a map such as:
///
/// `{ 500: [addrB], 872: [addrA, addrC] }`
///
/// The [`BlockNumber`] key of the new map should be viewed as `PruneMode::Before(block)`, which
/// makes the previous result equivalent to
///
/// `{ Before(500): [addrB], Before(872): [addrA, addrC] }`
pub fn group_by_block(
&self,
tip: BlockNumber,
pruned_block: Option<BlockNumber>,
) -> Result<BTreeMap<BlockNumber, Vec<&Address>>, PruneSegmentError> {
let mut map = BTreeMap::new();
let base_block = pruned_block.unwrap_or_default() + 1;
for (address, mode) in &self.0 {
// Getting `None`, means that there is nothing to prune yet, so we need it to include in
// the BTreeMap (block = 0), otherwise it will be excluded.
// Reminder that this BTreeMap works as an inclusion list that excludes (prunes) all
// other receipts.
//
// Reminder, that we increment because the [`BlockNumber`] key of the new map should be
// viewed as `PruneMode::Before(block)`
let block = base_block.max(
mode.prune_target_block(tip, PruneSegment::ContractLogs, PrunePurpose::User)?
.map(|(block, _)| block)
.unwrap_or_default() +
1,
);
map.entry(block).or_insert_with(Vec::new).push(address)
}
Ok(map)
}
/// Returns the lowest block where we start filtering logs which use `PruneMode::Distance(_)`.
pub fn lowest_block_with_distance(
&self,
tip: BlockNumber,
pruned_block: Option<BlockNumber>,
) -> Result<Option<BlockNumber>, PruneSegmentError> {
let pruned_block = pruned_block.unwrap_or_default();
let mut lowest = None;
for mode in self.values() {
if mode.is_distance() {
if let Some((block, _)) =
mode.prune_target_block(tip, PruneSegment::ContractLogs, PrunePurpose::User)?
{
lowest = Some(lowest.unwrap_or(u64::MAX).min(block));
}
}
}
Ok(lowest.map(|lowest| lowest.max(pruned_block)))
}
}
impl Deref for ReceiptsLogPruneConfig {
type Target = BTreeMap<Address, PruneMode>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_group_by_block_empty_config() {
let config = ReceiptsLogPruneConfig(BTreeMap::new());
let tip = 1000;
let pruned_block = None;
let result = config.group_by_block(tip, pruned_block).unwrap();
assert!(result.is_empty(), "The result should be empty when the config is empty");
}
#[test]
fn test_group_by_block_single_entry() {
let mut config_map = BTreeMap::new();
let address = Address::new([1; 20]);
let prune_mode = PruneMode::Before(500);
config_map.insert(address, prune_mode);
let config = ReceiptsLogPruneConfig(config_map);
// Big tip to have something to prune for the target block
let tip = 3000000;
let pruned_block = Some(400);
let result = config.group_by_block(tip, pruned_block).unwrap();
// Expect one entry with block 500 and the corresponding address
assert_eq!(result.len(), 1);
assert_eq!(result[&500], vec![&address], "Address should be grouped under block 500");
// Tip smaller than the target block, so that we have nothing to prune for the block
let tip = 300;
let pruned_block = Some(400);
let result = config.group_by_block(tip, pruned_block).unwrap();
// Expect one entry with block 400 and the corresponding address
assert_eq!(result.len(), 1);
assert_eq!(result[&401], vec![&address], "Address should be grouped under block 400");
}
#[test]
fn test_group_by_block_multiple_entries() {
let mut config_map = BTreeMap::new();
let address1 = Address::new([1; 20]);
let address2 = Address::new([2; 20]);
let prune_mode1 = PruneMode::Before(600);
let prune_mode2 = PruneMode::Before(800);
config_map.insert(address1, prune_mode1);
config_map.insert(address2, prune_mode2);
let config = ReceiptsLogPruneConfig(config_map);
let tip = 900000;
let pruned_block = Some(400);
let result = config.group_by_block(tip, pruned_block).unwrap();
// Expect two entries: one for block 600 and another for block 800
assert_eq!(result.len(), 2);
assert_eq!(result[&600], vec![&address1], "Address1 should be grouped under block 600");
assert_eq!(result[&800], vec![&address2], "Address2 should be grouped under block 800");
}
#[test]
fn test_group_by_block_with_distance_prune_mode() {
let mut config_map = BTreeMap::new();
let address = Address::new([1; 20]);
let prune_mode = PruneMode::Distance(100000);
config_map.insert(address, prune_mode);
let config = ReceiptsLogPruneConfig(config_map);
let tip = 100100;
// Pruned block is smaller than the target block
let pruned_block = Some(50);
let result = config.group_by_block(tip, pruned_block).unwrap();
// Expect the entry to be grouped under block 100 (tip - distance)
assert_eq!(result.len(), 1);
assert_eq!(result[&101], vec![&address], "Address should be grouped under block 100");
let tip = 100100;
// Pruned block is larger than the target block
let pruned_block = Some(800);
let result = config.group_by_block(tip, pruned_block).unwrap();
// Expect the entry to be grouped under block 800 which is larger than tip - distance
assert_eq!(result.len(), 1);
assert_eq!(result[&801], vec![&address], "Address should be grouped under block 800");
}
#[test]
fn test_lowest_block_with_distance_empty_config() {
let config = ReceiptsLogPruneConfig(BTreeMap::new());
let tip = 1000;
let pruned_block = None;
let result = config.lowest_block_with_distance(tip, pruned_block).unwrap();
assert_eq!(result, None, "The result should be None when the config is empty");
}
#[test]
fn test_lowest_block_with_distance_no_distance_mode() {
let mut config_map = BTreeMap::new();
let address = Address::new([1; 20]);
let prune_mode = PruneMode::Before(500);
config_map.insert(address, prune_mode);
let config = ReceiptsLogPruneConfig(config_map);
let tip = 1000;
let pruned_block = None;
let result = config.lowest_block_with_distance(tip, pruned_block).unwrap();
assert_eq!(result, None, "The result should be None when there are no Distance modes");
}
#[test]
fn test_lowest_block_with_distance_single_entry() {
let mut config_map = BTreeMap::new();
let address = Address::new([1; 20]);
let prune_mode = PruneMode::Distance(100000);
config_map.insert(address, prune_mode);
let config = ReceiptsLogPruneConfig(config_map);
let tip = 100100;
let pruned_block = Some(400);
// Expect the lowest block to be 400 as 400 > 100100 - 100000 (tip - distance)
assert_eq!(
config.lowest_block_with_distance(tip, pruned_block).unwrap(),
Some(400),
"The lowest block should be 400"
);
let tip = 100100;
let pruned_block = Some(50);
// Expect the lowest block to be 100 as 100 > 50 (pruned block)
assert_eq!(
config.lowest_block_with_distance(tip, pruned_block).unwrap(),
Some(100),
"The lowest block should be 100"
);
}
#[test]
fn test_lowest_block_with_distance_multiple_entries_last() {
let mut config_map = BTreeMap::new();
let address1 = Address::new([1; 20]);
let address2 = Address::new([2; 20]);
let prune_mode1 = PruneMode::Distance(100100);
let prune_mode2 = PruneMode::Distance(100300);
config_map.insert(address1, prune_mode1);
config_map.insert(address2, prune_mode2);
let config = ReceiptsLogPruneConfig(config_map);
let tip = 200300;
let pruned_block = Some(100);
// The lowest block should be 200300 - 100300 = 100000:
// - First iteration will return 100200 => 200300 - 100100 = 100200
// - Second iteration will return 100000 => 200300 - 100300 = 100000 < 100200
// - Final result is 100000
assert_eq!(config.lowest_block_with_distance(tip, pruned_block).unwrap(), Some(100000));
}
#[test]
fn test_lowest_block_with_distance_multiple_entries_first() {
let mut config_map = BTreeMap::new();
let address1 = Address::new([1; 20]);
let address2 = Address::new([2; 20]);
let prune_mode1 = PruneMode::Distance(100400);
let prune_mode2 = PruneMode::Distance(100300);
config_map.insert(address1, prune_mode1);
config_map.insert(address2, prune_mode2);
let config = ReceiptsLogPruneConfig(config_map);
let tip = 200300;
let pruned_block = Some(100);
// The lowest block should be 200300 - 100400 = 99900:
// - First iteration, lowest block is 200300 - 100400 = 99900
// - Second iteration, lowest block is still 99900 < 200300 - 100300 = 100000
// - Final result is 99900
assert_eq!(config.lowest_block_with_distance(tip, pruned_block).unwrap(), Some(99900));
}
#[test]
fn test_lowest_block_with_distance_multiple_entries_pruned_block() {
let mut config_map = BTreeMap::new();
let address1 = Address::new([1; 20]);
let address2 = Address::new([2; 20]);
let prune_mode1 = PruneMode::Distance(100400);
let prune_mode2 = PruneMode::Distance(100300);
config_map.insert(address1, prune_mode1);
config_map.insert(address2, prune_mode2);
let config = ReceiptsLogPruneConfig(config_map);
let tip = 200300;
let pruned_block = Some(100000);
// The lowest block should be 100000 because:
// - Lowest is 200300 - 100400 = 99900 < 200300 - 100300 = 100000
// - Lowest is compared to the pruned block 100000: 100000 > 99900
// - Finally the lowest block is 100000
assert_eq!(config.lowest_block_with_distance(tip, pruned_block).unwrap(), Some(100000));
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/prune/types/src/target.rs | crates/prune/types/src/target.rs | use alloy_primitives::BlockNumber;
use derive_more::Display;
use thiserror::Error;
use crate::{PruneCheckpoint, PruneMode, PruneSegment, ReceiptsLogPruneConfig};
/// Minimum distance from the tip necessary for the node to work correctly:
/// 1. Minimum 2 epochs (32 blocks per epoch) required to handle any reorg according to the
/// consensus protocol.
/// 2. Another 10k blocks to have a room for maneuver in case when things go wrong and a manual
/// unwind is required.
pub const MINIMUM_PRUNING_DISTANCE: u64 = 32 * 2 + 10_000;
/// Type of history that can be pruned
#[derive(Debug, Error, PartialEq, Eq, Clone)]
pub enum UnwindTargetPrunedError {
/// The target block is beyond the history limit
#[error("Cannot unwind to block {target_block} as it is beyond the {history_type} limit. Latest block: {latest_block}, History limit: {limit}")]
TargetBeyondHistoryLimit {
/// The latest block number
latest_block: BlockNumber,
/// The target block number
target_block: BlockNumber,
/// The type of history that is beyond the limit
history_type: HistoryType,
/// The limit of the history
limit: u64,
},
}
#[derive(Debug, Display, Clone, PartialEq, Eq)]
pub enum HistoryType {
/// Account history
AccountHistory,
/// Storage history
StorageHistory,
}
/// Pruning configuration for every segment of the data that can be pruned.
#[derive(Debug, Clone, Default, Eq, PartialEq)]
#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(any(test, feature = "serde"), serde(default))]
pub struct PruneModes {
/// Sender Recovery pruning configuration.
#[cfg_attr(any(test, feature = "serde"), serde(skip_serializing_if = "Option::is_none"))]
pub sender_recovery: Option<PruneMode>,
/// Transaction Lookup pruning configuration.
#[cfg_attr(any(test, feature = "serde"), serde(skip_serializing_if = "Option::is_none"))]
pub transaction_lookup: Option<PruneMode>,
/// Receipts pruning configuration. This setting overrides `receipts_log_filter`
/// and offers improved performance.
#[cfg_attr(
any(test, feature = "serde"),
serde(
skip_serializing_if = "Option::is_none",
deserialize_with = "deserialize_opt_prune_mode_with_min_blocks::<MINIMUM_PRUNING_DISTANCE, _>"
)
)]
pub receipts: Option<PruneMode>,
/// Account History pruning configuration.
#[cfg_attr(
any(test, feature = "serde"),
serde(
skip_serializing_if = "Option::is_none",
deserialize_with = "deserialize_opt_prune_mode_with_min_blocks::<MINIMUM_PRUNING_DISTANCE, _>"
)
)]
pub account_history: Option<PruneMode>,
/// Storage History pruning configuration.
#[cfg_attr(
any(test, feature = "serde"),
serde(
skip_serializing_if = "Option::is_none",
deserialize_with = "deserialize_opt_prune_mode_with_min_blocks::<MINIMUM_PRUNING_DISTANCE, _>"
)
)]
pub storage_history: Option<PruneMode>,
/// Bodies History pruning configuration.
#[cfg_attr(
any(test, feature = "serde"),
serde(
skip_serializing_if = "Option::is_none",
deserialize_with = "deserialize_opt_prune_mode_with_min_blocks::<MINIMUM_PRUNING_DISTANCE, _>"
)
)]
pub bodies_history: Option<PruneMode>,
/// Receipts pruning configuration by retaining only those receipts that contain logs emitted
/// by the specified addresses, discarding others. This setting is overridden by `receipts`.
///
/// The [`BlockNumber`](`crate::BlockNumber`) represents the starting block from which point
/// onwards the receipts are preserved.
pub receipts_log_filter: ReceiptsLogPruneConfig,
}
impl PruneModes {
/// Sets pruning to no target.
pub fn none() -> Self {
Self::default()
}
/// Sets pruning to all targets.
pub fn all() -> Self {
Self {
sender_recovery: Some(PruneMode::Full),
transaction_lookup: Some(PruneMode::Full),
receipts: Some(PruneMode::Full),
account_history: Some(PruneMode::Full),
storage_history: Some(PruneMode::Full),
bodies_history: Some(PruneMode::Full),
receipts_log_filter: Default::default(),
}
}
/// Returns whether there is any kind of receipt pruning configuration.
pub fn has_receipts_pruning(&self) -> bool {
self.receipts.is_some() || !self.receipts_log_filter.is_empty()
}
/// Returns true if all prune modes are set to [`None`].
pub fn is_empty(&self) -> bool {
self == &Self::none()
}
/// Returns an error if we can't unwind to the targeted block because the target block is
/// outside the range.
///
/// This is only relevant for certain tables that are required by other stages
///
/// See also <https://github.com/paradigmxyz/reth/issues/16579>
pub fn ensure_unwind_target_unpruned(
&self,
latest_block: u64,
target_block: u64,
checkpoints: &[(PruneSegment, PruneCheckpoint)],
) -> Result<(), UnwindTargetPrunedError> {
let distance = latest_block.saturating_sub(target_block);
for (prune_mode, history_type, checkpoint) in &[
(
self.account_history,
HistoryType::AccountHistory,
checkpoints.iter().find(|(segment, _)| segment.is_account_history()),
),
(
self.storage_history,
HistoryType::StorageHistory,
checkpoints.iter().find(|(segment, _)| segment.is_storage_history()),
),
] {
if let Some(PruneMode::Distance(limit)) = prune_mode {
// check if distance exceeds the configured limit
if distance > *limit {
// but only if have haven't pruned the target yet, if we dont have a checkpoint
// yet, it's fully unpruned yet
let pruned_height = checkpoint
.and_then(|checkpoint| checkpoint.1.block_number)
.unwrap_or(latest_block);
if pruned_height >= target_block {
// we've pruned the target block already and can't unwind past it
return Err(UnwindTargetPrunedError::TargetBeyondHistoryLimit {
latest_block,
target_block,
history_type: history_type.clone(),
limit: *limit,
})
}
}
}
}
Ok(())
}
}
/// Deserializes [`Option<PruneMode>`] and validates that the value is not less than the const
/// generic parameter `MIN_BLOCKS`. This parameter represents the number of blocks that needs to be
/// left in database after the pruning.
///
/// 1. For [`PruneMode::Full`], it fails if `MIN_BLOCKS > 0`.
/// 2. For [`PruneMode::Distance`], it fails if `distance < MIN_BLOCKS + 1`. `+ 1` is needed because
/// `PruneMode::Distance(0)` means that we leave zero blocks from the latest, meaning we have one
/// block in the database.
#[cfg(any(test, feature = "serde"))]
fn deserialize_opt_prune_mode_with_min_blocks<
'de,
const MIN_BLOCKS: u64,
D: serde::Deserializer<'de>,
>(
deserializer: D,
) -> Result<Option<PruneMode>, D::Error> {
use alloc::format;
use serde::Deserialize;
let prune_mode = Option::<PruneMode>::deserialize(deserializer)?;
match prune_mode {
Some(PruneMode::Full) if MIN_BLOCKS > 0 => {
Err(serde::de::Error::invalid_value(
serde::de::Unexpected::Str("full"),
// This message should have "expected" wording
&format!("prune mode that leaves at least {MIN_BLOCKS} blocks in the database")
.as_str(),
))
}
Some(PruneMode::Distance(distance)) if distance < MIN_BLOCKS => {
Err(serde::de::Error::invalid_value(
serde::de::Unexpected::Unsigned(distance),
// This message should have "expected" wording
&format!("prune mode that leaves at least {MIN_BLOCKS} blocks in the database")
.as_str(),
))
}
_ => Ok(prune_mode),
}
}
#[cfg(test)]
mod tests {
use super::*;
use assert_matches::assert_matches;
use serde::Deserialize;
#[test]
fn test_deserialize_opt_prune_mode_with_min_blocks() {
#[derive(Debug, Deserialize, PartialEq, Eq)]
struct V(
#[serde(deserialize_with = "deserialize_opt_prune_mode_with_min_blocks::<10, _>")]
Option<PruneMode>,
);
assert!(serde_json::from_str::<V>(r#"{"distance": 10}"#).is_ok());
assert_matches!(
serde_json::from_str::<V>(r#"{"distance": 9}"#),
Err(err) if err.to_string() == "invalid value: integer `9`, expected prune mode that leaves at least 10 blocks in the database"
);
assert_matches!(
serde_json::from_str::<V>(r#""full""#),
Err(err) if err.to_string() == "invalid value: string \"full\", expected prune mode that leaves at least 10 blocks in the database"
);
}
#[test]
fn test_unwind_target_unpruned() {
// Test case 1: No pruning configured - should always succeed
let prune_modes = PruneModes::none();
assert!(prune_modes.ensure_unwind_target_unpruned(1000, 500, &[]).is_ok());
assert!(prune_modes.ensure_unwind_target_unpruned(1000, 0, &[]).is_ok());
// Test case 2: Distance pruning within limit - should succeed
let prune_modes = PruneModes {
account_history: Some(PruneMode::Distance(100)),
storage_history: Some(PruneMode::Distance(100)),
..Default::default()
};
// Distance is 50, limit is 100 - OK
assert!(prune_modes.ensure_unwind_target_unpruned(1000, 950, &[]).is_ok());
// Test case 3: Distance exceeds limit with no checkpoint
// NOTE: Current implementation assumes pruned_height = latest_block when no checkpoint
// exists This means it will fail because it assumes we've pruned up to block 1000 >
// target 800
let prune_modes =
PruneModes { account_history: Some(PruneMode::Distance(100)), ..Default::default() };
// Distance is 200 > 100, no checkpoint - current impl treats as pruned up to latest_block
let result = prune_modes.ensure_unwind_target_unpruned(1000, 800, &[]);
assert_matches!(
result,
Err(UnwindTargetPrunedError::TargetBeyondHistoryLimit {
latest_block: 1000,
target_block: 800,
history_type: HistoryType::AccountHistory,
limit: 100
})
);
// Test case 4: Distance exceeds limit and target is pruned - should fail
let prune_modes =
PruneModes { account_history: Some(PruneMode::Distance(100)), ..Default::default() };
let checkpoints = vec![(
PruneSegment::AccountHistory,
PruneCheckpoint {
block_number: Some(850),
tx_number: None,
prune_mode: PruneMode::Distance(100),
},
)];
// Distance is 200 > 100, and checkpoint shows we've pruned up to block 850 > target 800
let result = prune_modes.ensure_unwind_target_unpruned(1000, 800, &checkpoints);
assert_matches!(
result,
Err(UnwindTargetPrunedError::TargetBeyondHistoryLimit {
latest_block: 1000,
target_block: 800,
history_type: HistoryType::AccountHistory,
limit: 100
})
);
// Test case 5: Storage history exceeds limit and is pruned - should fail
let prune_modes =
PruneModes { storage_history: Some(PruneMode::Distance(50)), ..Default::default() };
let checkpoints = vec![(
PruneSegment::StorageHistory,
PruneCheckpoint {
block_number: Some(960),
tx_number: None,
prune_mode: PruneMode::Distance(50),
},
)];
// Distance is 100 > 50, and checkpoint shows we've pruned up to block 960 > target 900
let result = prune_modes.ensure_unwind_target_unpruned(1000, 900, &checkpoints);
assert_matches!(
result,
Err(UnwindTargetPrunedError::TargetBeyondHistoryLimit {
latest_block: 1000,
target_block: 900,
history_type: HistoryType::StorageHistory,
limit: 50
})
);
// Test case 6: Distance exceeds limit but target block not pruned yet - should succeed
let prune_modes =
PruneModes { account_history: Some(PruneMode::Distance(100)), ..Default::default() };
let checkpoints = vec![(
PruneSegment::AccountHistory,
PruneCheckpoint {
block_number: Some(700),
tx_number: None,
prune_mode: PruneMode::Distance(100),
},
)];
// Distance is 200 > 100, but checkpoint shows we've only pruned up to block 700 < target
// 800
assert!(prune_modes.ensure_unwind_target_unpruned(1000, 800, &checkpoints).is_ok());
// Test case 7: Both account and storage history configured, only one fails
let prune_modes = PruneModes {
account_history: Some(PruneMode::Distance(200)),
storage_history: Some(PruneMode::Distance(50)),
..Default::default()
};
let checkpoints = vec![
(
PruneSegment::AccountHistory,
PruneCheckpoint {
block_number: Some(700),
tx_number: None,
prune_mode: PruneMode::Distance(200),
},
),
(
PruneSegment::StorageHistory,
PruneCheckpoint {
block_number: Some(960),
tx_number: None,
prune_mode: PruneMode::Distance(50),
},
),
];
// For target 900: account history OK (distance 100 < 200), storage history fails (distance
// 100 > 50, pruned at 960)
let result = prune_modes.ensure_unwind_target_unpruned(1000, 900, &checkpoints);
assert_matches!(
result,
Err(UnwindTargetPrunedError::TargetBeyondHistoryLimit {
latest_block: 1000,
target_block: 900,
history_type: HistoryType::StorageHistory,
limit: 50
})
);
// Test case 8: Edge case - exact boundary
let prune_modes =
PruneModes { account_history: Some(PruneMode::Distance(100)), ..Default::default() };
let checkpoints = vec![(
PruneSegment::AccountHistory,
PruneCheckpoint {
block_number: Some(900),
tx_number: None,
prune_mode: PruneMode::Distance(100),
},
)];
// Distance is exactly 100, checkpoint at exactly the target block
assert!(prune_modes.ensure_unwind_target_unpruned(1000, 900, &checkpoints).is_ok());
// Test case 9: Full pruning mode - should succeed (no distance check)
let prune_modes = PruneModes {
account_history: Some(PruneMode::Full),
storage_history: Some(PruneMode::Full),
..Default::default()
};
assert!(prune_modes.ensure_unwind_target_unpruned(1000, 0, &[]).is_ok());
// Test case 10: Edge case - saturating subtraction (target > latest)
let prune_modes =
PruneModes { account_history: Some(PruneMode::Distance(100)), ..Default::default() };
// Target block (1500) > latest block (1000) - distance should be 0
assert!(prune_modes.ensure_unwind_target_unpruned(1000, 1500, &[]).is_ok());
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/prune/types/src/checkpoint.rs | crates/prune/types/src/checkpoint.rs | use crate::PruneMode;
use alloy_primitives::{BlockNumber, TxNumber};
/// Saves the pruning progress of a stage.
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))]
#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))]
#[cfg_attr(any(test, feature = "test-utils"), derive(Default, arbitrary::Arbitrary))]
#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))]
pub struct PruneCheckpoint {
/// Highest pruned block number. If it's [None], the pruning for block `0` is not finished yet.
pub block_number: Option<BlockNumber>,
/// Highest pruned transaction number, if applicable.
pub tx_number: Option<TxNumber>,
/// Prune mode.
pub prune_mode: PruneMode,
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/prune/types/src/mode.rs | crates/prune/types/src/mode.rs | use crate::{segment::PrunePurpose, PruneSegment, PruneSegmentError};
use alloy_primitives::BlockNumber;
/// Prune mode.
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))]
#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))]
#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))]
#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(any(test, feature = "serde"), serde(rename_all = "lowercase"))]
pub enum PruneMode {
/// Prune all blocks.
Full,
/// Prune blocks before the `head-N` block number. In other words, keep last N + 1 blocks.
Distance(u64),
/// Prune blocks before the specified block number. The specified block number is not pruned.
Before(BlockNumber),
}
#[cfg(any(test, feature = "test-utils"))]
#[allow(clippy::derivable_impls)]
impl Default for PruneMode {
fn default() -> Self {
Self::Full
}
}
impl PruneMode {
/// Prune blocks up to the specified block number. The specified block number is also pruned.
///
/// This acts as `PruneMode::Before(block_number + 1)`.
pub const fn before_inclusive(block_number: BlockNumber) -> Self {
Self::Before(block_number + 1)
}
/// Returns block up to which variant pruning needs to be done, inclusive, according to the
/// provided tip.
pub fn prune_target_block(
&self,
tip: BlockNumber,
segment: PruneSegment,
purpose: PrunePurpose,
) -> Result<Option<(BlockNumber, Self)>, PruneSegmentError> {
let result = match self {
Self::Full if segment.min_blocks(purpose) == 0 => Some((tip, *self)),
Self::Distance(distance) if *distance > tip => None, // Nothing to prune yet
Self::Distance(distance) if *distance >= segment.min_blocks(purpose) => {
Some((tip - distance, *self))
}
Self::Before(n) if *n == tip + 1 && purpose.is_static_file() => Some((tip, *self)),
Self::Before(n) if *n > tip => None, // Nothing to prune yet
Self::Before(n) => {
(tip - n >= segment.min_blocks(purpose)).then(|| ((*n).saturating_sub(1), *self))
}
_ => return Err(PruneSegmentError::Configuration(segment)),
};
Ok(result)
}
/// Check if target block should be pruned according to the provided prune mode and tip.
pub const fn should_prune(&self, block: BlockNumber, tip: BlockNumber) -> bool {
match self {
Self::Full => true,
Self::Distance(distance) => {
if *distance > tip {
return false
}
block < tip - *distance
}
Self::Before(n) => *n > block,
}
}
/// Returns true if the prune mode is [`PruneMode::Full`].
pub const fn is_full(&self) -> bool {
matches!(self, Self::Full)
}
/// Returns true if the prune mode is [`PruneMode::Distance`].
pub const fn is_distance(&self) -> bool {
matches!(self, Self::Distance(_))
}
}
#[cfg(test)]
mod tests {
use crate::{
PruneMode, PrunePurpose, PruneSegment, PruneSegmentError, MINIMUM_PRUNING_DISTANCE,
};
use assert_matches::assert_matches;
use serde::Deserialize;
#[test]
fn test_prune_target_block() {
let tip = 20000;
let segment = PruneSegment::Receipts;
let tests = vec![
// MINIMUM_PRUNING_DISTANCE makes this impossible
(PruneMode::Full, Err(PruneSegmentError::Configuration(segment))),
// Nothing to prune
(PruneMode::Distance(tip + 1), Ok(None)),
(
PruneMode::Distance(segment.min_blocks(PrunePurpose::User) + 1),
Ok(Some(tip - (segment.min_blocks(PrunePurpose::User) + 1))),
),
// Nothing to prune
(PruneMode::Before(tip + 1), Ok(None)),
(
PruneMode::Before(tip - MINIMUM_PRUNING_DISTANCE),
Ok(Some(tip - MINIMUM_PRUNING_DISTANCE - 1)),
),
(
PruneMode::Before(tip - MINIMUM_PRUNING_DISTANCE - 1),
Ok(Some(tip - MINIMUM_PRUNING_DISTANCE - 2)),
),
// Nothing to prune
(PruneMode::Before(tip - 1), Ok(None)),
];
for (index, (mode, expected_result)) in tests.into_iter().enumerate() {
assert_eq!(
mode.prune_target_block(tip, segment, PrunePurpose::User),
expected_result.map(|r| r.map(|b| (b, mode))),
"Test {} failed",
index + 1,
);
}
// Test for a scenario where there are no minimum blocks and Full can be used
assert_eq!(
PruneMode::Full.prune_target_block(tip, PruneSegment::Transactions, PrunePurpose::User),
Ok(Some((tip, PruneMode::Full))),
);
}
#[test]
fn test_should_prune() {
let tip = 20000;
let should_prune = true;
let tests = vec![
(PruneMode::Distance(tip + 1), 1, !should_prune),
(
PruneMode::Distance(MINIMUM_PRUNING_DISTANCE + 1),
tip - MINIMUM_PRUNING_DISTANCE - 1,
!should_prune,
),
(
PruneMode::Distance(MINIMUM_PRUNING_DISTANCE + 1),
tip - MINIMUM_PRUNING_DISTANCE - 2,
should_prune,
),
(PruneMode::Before(tip + 1), 1, should_prune),
(PruneMode::Before(tip + 1), tip + 1, !should_prune),
];
for (index, (mode, block, expected_result)) in tests.into_iter().enumerate() {
assert_eq!(mode.should_prune(block, tip), expected_result, "Test {} failed", index + 1,);
}
}
#[test]
fn prune_mode_deserialize() {
#[derive(Debug, Deserialize)]
struct Config {
a: Option<PruneMode>,
b: Option<PruneMode>,
c: Option<PruneMode>,
d: Option<PruneMode>,
}
let toml_str = r#"
a = "full"
b = { distance = 10 }
c = { before = 20 }
"#;
assert_matches!(
toml::from_str(toml_str),
Ok(Config {
a: Some(PruneMode::Full),
b: Some(PruneMode::Distance(10)),
c: Some(PruneMode::Before(20)),
d: None
})
);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/prune/types/src/pruner.rs | crates/prune/types/src/pruner.rs | use crate::{PruneCheckpoint, PruneMode, PruneSegment};
use alloc::vec::Vec;
use alloy_primitives::{BlockNumber, TxNumber};
use derive_more::Display;
/// Pruner run output.
#[derive(Debug)]
pub struct PrunerOutput {
/// Pruning progress.
pub progress: PruneProgress,
/// Pruning output for each segment.
pub segments: Vec<(PruneSegment, SegmentOutput)>,
}
impl From<PruneProgress> for PrunerOutput {
fn from(progress: PruneProgress) -> Self {
Self { progress, segments: Vec::new() }
}
}
/// Represents information of a pruner run for a segment.
#[derive(Debug, Clone, PartialEq, Eq, Display)]
#[display("(table={segment}, pruned={pruned}, status={progress})")]
pub struct PrunedSegmentInfo {
/// The pruned segment
pub segment: PruneSegment,
/// Number of pruned entries
pub pruned: usize,
/// Prune progress
pub progress: PruneProgress,
}
/// Segment pruning output.
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub struct SegmentOutput {
/// Segment pruning progress.
pub progress: PruneProgress,
/// Number of entries pruned, i.e. deleted from the database.
pub pruned: usize,
/// Pruning checkpoint to save to database, if any.
pub checkpoint: Option<SegmentOutputCheckpoint>,
}
impl SegmentOutput {
/// Returns a [`SegmentOutput`] with `done = true`, `pruned = 0` and `checkpoint = None`.
/// Use when no pruning is needed.
pub const fn done() -> Self {
Self { progress: PruneProgress::Finished, pruned: 0, checkpoint: None }
}
/// Returns a [`SegmentOutput`] with `done = false`, `pruned = 0` and `checkpoint = None`.
/// Use when pruning is needed but cannot be done.
pub const fn not_done(
reason: PruneInterruptReason,
checkpoint: Option<SegmentOutputCheckpoint>,
) -> Self {
Self { progress: PruneProgress::HasMoreData(reason), pruned: 0, checkpoint }
}
}
/// Segment pruning checkpoint.
#[derive(Debug, Clone, Copy, Default, Eq, PartialEq)]
pub struct SegmentOutputCheckpoint {
/// Highest pruned block number. If it's [None], the pruning for block `0` is not finished yet.
pub block_number: Option<BlockNumber>,
/// Highest pruned transaction number, if applicable.
pub tx_number: Option<TxNumber>,
}
impl SegmentOutputCheckpoint {
/// Converts [`PruneCheckpoint`] to [`SegmentOutputCheckpoint`].
pub const fn from_prune_checkpoint(checkpoint: PruneCheckpoint) -> Self {
Self { block_number: checkpoint.block_number, tx_number: checkpoint.tx_number }
}
/// Converts [`SegmentOutputCheckpoint`] to [`PruneCheckpoint`] with the provided [`PruneMode`]
pub const fn as_prune_checkpoint(&self, prune_mode: PruneMode) -> PruneCheckpoint {
PruneCheckpoint { block_number: self.block_number, tx_number: self.tx_number, prune_mode }
}
}
/// Progress of pruning.
#[derive(Debug, PartialEq, Eq, Clone, Copy, Display)]
pub enum PruneProgress {
/// There is more data to prune.
#[display("HasMoreData({_0})")]
HasMoreData(PruneInterruptReason),
/// Pruning has been finished.
#[display("Finished")]
Finished,
}
/// Reason for interrupting a prune run.
#[derive(Debug, PartialEq, Eq, Clone, Copy, Display)]
pub enum PruneInterruptReason {
/// Prune run timed out.
Timeout,
/// Limit on the number of deleted entries (rows in the database) per prune run was reached.
DeletedEntriesLimitReached,
/// Unknown reason for stopping prune run.
Unknown,
}
impl PruneInterruptReason {
/// Returns `true` if the reason is timeout.
pub const fn is_timeout(&self) -> bool {
matches!(self, Self::Timeout)
}
/// Returns `true` if the reason is reaching the limit on deleted entries.
pub const fn is_entries_limit_reached(&self) -> bool {
matches!(self, Self::DeletedEntriesLimitReached)
}
}
impl PruneProgress {
/// Returns `true` if prune run is finished.
pub const fn is_finished(&self) -> bool {
matches!(self, Self::Finished)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/prune/types/src/segment.rs | crates/prune/types/src/segment.rs | use crate::MINIMUM_PRUNING_DISTANCE;
use derive_more::Display;
use thiserror::Error;
/// Segment of the data that can be pruned.
#[derive(Debug, Display, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)]
#[cfg_attr(test, derive(arbitrary::Arbitrary))]
#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))]
#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))]
#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))]
pub enum PruneSegment {
/// Prune segment responsible for the `TransactionSenders` table.
SenderRecovery,
/// Prune segment responsible for the `TransactionHashNumbers` table.
TransactionLookup,
/// Prune segment responsible for all rows in `Receipts` table.
Receipts,
/// Prune segment responsible for some rows in `Receipts` table filtered by logs.
ContractLogs,
/// Prune segment responsible for the `AccountChangeSets` and `AccountsHistory` tables.
AccountHistory,
/// Prune segment responsible for the `StorageChangeSets` and `StoragesHistory` tables.
StorageHistory,
/// Prune segment responsible for the `CanonicalHeaders`, `Headers` and
/// `HeaderTerminalDifficulties` tables.
Headers,
/// Prune segment responsible for the `Transactions` table.
Transactions,
}
#[cfg(test)]
#[allow(clippy::derivable_impls)]
impl Default for PruneSegment {
fn default() -> Self {
Self::SenderRecovery
}
}
impl PruneSegment {
/// Returns minimum number of blocks to keep in the database for this segment.
pub const fn min_blocks(&self, purpose: PrunePurpose) -> u64 {
match self {
Self::SenderRecovery | Self::TransactionLookup | Self::Headers | Self::Transactions => {
0
}
Self::Receipts if purpose.is_static_file() => 0,
Self::ContractLogs | Self::AccountHistory | Self::StorageHistory => {
MINIMUM_PRUNING_DISTANCE
}
Self::Receipts => MINIMUM_PRUNING_DISTANCE,
}
}
/// Returns true if this is [`Self::AccountHistory`].
pub const fn is_account_history(&self) -> bool {
matches!(self, Self::AccountHistory)
}
/// Returns true if this is [`Self::StorageHistory`].
pub const fn is_storage_history(&self) -> bool {
matches!(self, Self::StorageHistory)
}
}
/// Prune purpose.
#[derive(Debug, Clone, Copy)]
pub enum PrunePurpose {
/// Prune data according to user configuration.
User,
/// Prune data according to highest `static_files` to delete the data from database.
StaticFile,
}
impl PrunePurpose {
/// Returns true if the purpose is [`PrunePurpose::User`].
pub const fn is_user(self) -> bool {
matches!(self, Self::User)
}
/// Returns true if the purpose is [`PrunePurpose::StaticFile`].
pub const fn is_static_file(self) -> bool {
matches!(self, Self::StaticFile)
}
}
/// `PruneSegment` error type.
#[derive(Debug, Error, PartialEq, Eq, Clone)]
pub enum PruneSegmentError {
/// Invalid configuration of a prune segment.
#[error("the configuration provided for {0} is invalid")]
Configuration(PruneSegment),
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/prune/prune/src/builder.rs | crates/prune/prune/src/builder.rs | use crate::{segments::SegmentSet, Pruner};
use alloy_eips::eip2718::Encodable2718;
use reth_chainspec::MAINNET_PRUNE_DELETE_LIMIT;
use reth_config::PruneConfig;
use reth_db_api::{table::Value, transaction::DbTxMut};
use reth_exex_types::FinishedExExHeight;
use reth_primitives_traits::NodePrimitives;
use reth_provider::{
providers::StaticFileProvider, BlockReader, DBProvider, DatabaseProviderFactory,
NodePrimitivesProvider, PruneCheckpointWriter, StaticFileProviderFactory,
};
use reth_prune_types::PruneModes;
use std::time::Duration;
use tokio::sync::watch;
/// Contains the information required to build a pruner
#[derive(Debug, Clone)]
pub struct PrunerBuilder {
/// Minimum pruning interval measured in blocks.
block_interval: usize,
/// Pruning configuration for every part of the data that can be pruned.
segments: PruneModes,
/// The delete limit for pruner, per run.
delete_limit: usize,
/// Time a pruner job can run before timing out.
timeout: Option<Duration>,
/// The finished height of all `ExEx`'s.
finished_exex_height: watch::Receiver<FinishedExExHeight>,
}
impl PrunerBuilder {
/// Default timeout for a prune run.
pub const DEFAULT_TIMEOUT: Duration = Duration::from_millis(100);
/// Creates a new [`PrunerBuilder`] from the given [`PruneConfig`].
pub fn new(pruner_config: PruneConfig) -> Self {
Self::default()
.block_interval(pruner_config.block_interval)
.segments(pruner_config.segments)
}
/// Sets the minimum pruning interval measured in blocks.
pub const fn block_interval(mut self, block_interval: usize) -> Self {
self.block_interval = block_interval;
self
}
/// Sets the configuration for every part of the data that can be pruned.
pub fn segments(mut self, segments: PruneModes) -> Self {
self.segments = segments;
self
}
/// Sets the delete limit for pruner, per run.
pub const fn delete_limit(mut self, prune_delete_limit: usize) -> Self {
self.delete_limit = prune_delete_limit;
self
}
/// Sets the timeout for pruner, per run.
///
/// CAUTION: Account and Storage History prune segments treat this timeout as a soft limit,
/// meaning they can go beyond it.
pub const fn timeout(mut self, timeout: Duration) -> Self {
self.timeout = Some(timeout);
self
}
/// Sets the receiver for the finished height of all `ExEx`'s.
pub fn finished_exex_height(
mut self,
finished_exex_height: watch::Receiver<FinishedExExHeight>,
) -> Self {
self.finished_exex_height = finished_exex_height;
self
}
/// Builds a [Pruner] from the current configuration with the given provider factory.
pub fn build_with_provider_factory<PF>(self, provider_factory: PF) -> Pruner<PF::ProviderRW, PF>
where
PF: DatabaseProviderFactory<
ProviderRW: PruneCheckpointWriter
+ BlockReader<Transaction: Encodable2718>
+ StaticFileProviderFactory<
Primitives: NodePrimitives<SignedTx: Value, Receipt: Value, BlockHeader: Value>,
>,
> + StaticFileProviderFactory<
Primitives = <PF::ProviderRW as NodePrimitivesProvider>::Primitives,
>,
{
let segments =
SegmentSet::from_components(provider_factory.static_file_provider(), self.segments);
Pruner::new_with_factory(
provider_factory,
segments.into_vec(),
self.block_interval,
self.delete_limit,
self.timeout,
self.finished_exex_height,
)
}
/// Builds a [Pruner] from the current configuration with the given static file provider.
pub fn build<Provider>(
self,
static_file_provider: StaticFileProvider<Provider::Primitives>,
) -> Pruner<Provider, ()>
where
Provider: StaticFileProviderFactory<
Primitives: NodePrimitives<SignedTx: Value, Receipt: Value, BlockHeader: Value>,
> + DBProvider<Tx: DbTxMut>
+ BlockReader<Transaction: Encodable2718>
+ PruneCheckpointWriter,
{
let segments = SegmentSet::<Provider>::from_components(static_file_provider, self.segments);
Pruner::new(
segments.into_vec(),
self.block_interval,
self.delete_limit,
self.timeout,
self.finished_exex_height,
)
}
}
impl Default for PrunerBuilder {
fn default() -> Self {
Self {
block_interval: 5,
segments: PruneModes::none(),
delete_limit: MAINNET_PRUNE_DELETE_LIMIT,
timeout: None,
finished_exex_height: watch::channel(FinishedExExHeight::NoExExs).1,
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/prune/prune/src/lib.rs | crates/prune/prune/src/lib.rs | //! Pruning implementation.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![allow(missing_docs)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
mod builder;
mod db_ext;
mod error;
mod limiter;
mod metrics;
mod pruner;
pub mod segments;
use crate::metrics::Metrics;
pub use builder::PrunerBuilder;
pub use error::PrunerError;
pub use limiter::PruneLimiter;
pub use pruner::{Pruner, PrunerResult, PrunerWithFactory, PrunerWithResult};
// Re-export prune types
#[doc(inline)]
pub use reth_prune_types::*;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/prune/prune/src/error.rs | crates/prune/prune/src/error.rs | use reth_errors::{DatabaseError, RethError};
use reth_provider::ProviderError;
use reth_prune_types::PruneSegmentError;
use thiserror::Error;
/// Errors that can occur during pruning.
#[derive(Error, Debug)]
pub enum PrunerError {
#[error(transparent)]
PruneSegment(#[from] PruneSegmentError),
#[error("inconsistent data: {0}")]
InconsistentData(&'static str),
#[error(transparent)]
Database(#[from] DatabaseError),
#[error(transparent)]
Provider(#[from] ProviderError),
}
impl From<PrunerError> for RethError {
fn from(err: PrunerError) -> Self {
match err {
PrunerError::PruneSegment(_) | PrunerError::InconsistentData(_) => Self::other(err),
PrunerError::Database(err) => Self::Database(err),
PrunerError::Provider(err) => Self::Provider(err),
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/prune/prune/src/limiter.rs | crates/prune/prune/src/limiter.rs | use reth_prune_types::{PruneInterruptReason, PruneProgress};
use std::{
num::NonZeroUsize,
time::{Duration, Instant},
};
/// Limits a pruner run by either the number of entries (rows in the database) that can be deleted
/// or the time it can run.
#[derive(Debug, Clone, Default)]
pub struct PruneLimiter {
/// Maximum entries (rows in the database) to delete from the database per run.
deleted_entries_limit: Option<PruneDeletedEntriesLimit>,
/// Maximum duration of one prune run.
time_limit: Option<PruneTimeLimit>,
}
#[derive(Debug, Clone)]
struct PruneDeletedEntriesLimit {
/// Maximum entries (rows in the database) to delete from the database.
limit: usize,
/// Current number of entries (rows in the database) that have been deleted.
deleted: usize,
}
impl PruneDeletedEntriesLimit {
const fn new(limit: usize) -> Self {
Self { limit, deleted: 0 }
}
const fn is_limit_reached(&self) -> bool {
self.deleted >= self.limit
}
}
#[derive(Debug, Clone)]
struct PruneTimeLimit {
/// Maximum duration of one prune run.
limit: Duration,
/// Time when the prune run has started.
start: Instant,
}
impl PruneTimeLimit {
fn new(limit: Duration) -> Self {
Self { limit, start: Instant::now() }
}
fn is_limit_reached(&self) -> bool {
self.start.elapsed() > self.limit
}
}
impl PruneLimiter {
/// Sets the limit on the number of deleted entries (rows in the database).
/// If the limit was already set, it will be overwritten.
pub const fn set_deleted_entries_limit(mut self, limit: usize) -> Self {
if let Some(deleted_entries_limit) = self.deleted_entries_limit.as_mut() {
deleted_entries_limit.limit = limit;
} else {
self.deleted_entries_limit = Some(PruneDeletedEntriesLimit::new(limit));
}
self
}
/// Sets the limit on the number of deleted entries (rows in the database) to a biggest
/// multiple of the given denominator that is smaller than the existing limit.
///
/// If the limit wasn't set, does nothing.
pub fn floor_deleted_entries_limit_to_multiple_of(mut self, denominator: NonZeroUsize) -> Self {
if let Some(deleted_entries_limit) = self.deleted_entries_limit.as_mut() {
deleted_entries_limit.limit =
(deleted_entries_limit.limit / denominator) * denominator.get();
}
self
}
/// Returns `true` if the limit on the number of deleted entries (rows in the database) is
/// reached.
pub fn is_deleted_entries_limit_reached(&self) -> bool {
self.deleted_entries_limit.as_ref().is_some_and(|limit| limit.is_limit_reached())
}
/// Increments the number of deleted entries by the given number.
pub const fn increment_deleted_entries_count_by(&mut self, entries: usize) {
if let Some(limit) = self.deleted_entries_limit.as_mut() {
limit.deleted += entries;
}
}
/// Increments the number of deleted entries by one.
pub const fn increment_deleted_entries_count(&mut self) {
self.increment_deleted_entries_count_by(1)
}
/// Returns the number of deleted entries left before the limit is reached.
pub fn deleted_entries_limit_left(&self) -> Option<usize> {
self.deleted_entries_limit.as_ref().map(|limit| limit.limit - limit.deleted)
}
/// Returns the limit on the number of deleted entries (rows in the database).
pub fn deleted_entries_limit(&self) -> Option<usize> {
self.deleted_entries_limit.as_ref().map(|limit| limit.limit)
}
/// Sets the time limit.
pub fn set_time_limit(mut self, limit: Duration) -> Self {
self.time_limit = Some(PruneTimeLimit::new(limit));
self
}
/// Returns `true` if time limit is reached.
pub fn is_time_limit_reached(&self) -> bool {
self.time_limit.as_ref().is_some_and(|limit| limit.is_limit_reached())
}
/// Returns `true` if any limit is reached.
pub fn is_limit_reached(&self) -> bool {
self.is_deleted_entries_limit_reached() || self.is_time_limit_reached()
}
/// Creates new [`PruneInterruptReason`] based on the limiter's state.
pub fn interrupt_reason(&self) -> PruneInterruptReason {
if self.is_time_limit_reached() {
PruneInterruptReason::Timeout
} else if self.is_deleted_entries_limit_reached() {
PruneInterruptReason::DeletedEntriesLimitReached
} else {
PruneInterruptReason::Unknown
}
}
/// Creates new [`PruneProgress`].
///
/// If `done == true`, returns [`PruneProgress::Finished`], otherwise
/// [`PruneProgress::HasMoreData`] is returned with [`PruneInterruptReason`] according to the
/// limiter's state.
pub fn progress(&self, done: bool) -> PruneProgress {
if done {
PruneProgress::Finished
} else {
PruneProgress::HasMoreData(self.interrupt_reason())
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::thread::sleep;
#[test]
fn test_prune_deleted_entries_limit_initial_state() {
let limit_tracker = PruneDeletedEntriesLimit::new(10);
// Limit should be set properly
assert_eq!(limit_tracker.limit, 10);
// No entries should be deleted
assert_eq!(limit_tracker.deleted, 0);
assert!(!limit_tracker.is_limit_reached());
}
#[test]
fn test_prune_deleted_entries_limit_is_limit_reached() {
// Test when the deleted entries are less than the limit
let mut limit_tracker = PruneDeletedEntriesLimit::new(5);
limit_tracker.deleted = 3;
assert!(!limit_tracker.is_limit_reached());
// Test when the deleted entries are equal to the limit
limit_tracker.deleted = 5;
assert!(limit_tracker.is_limit_reached());
// Test when the deleted entries exceed the limit
limit_tracker.deleted = 6;
assert!(limit_tracker.is_limit_reached());
}
#[test]
fn test_prune_time_limit_initial_state() {
let time_limit = PruneTimeLimit::new(Duration::from_secs(10));
// The limit should be set correctly
assert_eq!(time_limit.limit, Duration::from_secs(10));
// The elapsed time should be very small right after creation
assert!(time_limit.start.elapsed() < Duration::from_secs(1));
// Limit should not be reached initially
assert!(!time_limit.is_limit_reached());
}
#[test]
fn test_prune_time_limit_is_limit_reached() {
let time_limit = PruneTimeLimit::new(Duration::from_millis(50));
// Simulate waiting for some time (less than the limit)
std::thread::sleep(Duration::from_millis(30));
assert!(!time_limit.is_limit_reached());
// Simulate waiting for time greater than the limit
std::thread::sleep(Duration::from_millis(30));
assert!(time_limit.is_limit_reached());
}
#[test]
fn test_set_deleted_entries_limit_initial_state() {
let pruner = PruneLimiter::default().set_deleted_entries_limit(100);
// The deleted_entries_limit should be set with the correct limit
assert!(pruner.deleted_entries_limit.is_some());
let deleted_entries_limit = pruner.deleted_entries_limit.unwrap();
assert_eq!(deleted_entries_limit.limit, 100);
// The deleted count should be initially zero
assert_eq!(deleted_entries_limit.deleted, 0);
// The limit should not be reached initially
assert!(!deleted_entries_limit.is_limit_reached());
}
#[test]
fn test_set_deleted_entries_limit_overwrite_existing() {
let mut pruner = PruneLimiter::default().set_deleted_entries_limit(50);
// Overwrite the existing limit
pruner = pruner.set_deleted_entries_limit(200);
assert!(pruner.deleted_entries_limit.is_some());
let deleted_entries_limit = pruner.deleted_entries_limit.unwrap();
// Check that the limit has been overwritten correctly
assert_eq!(deleted_entries_limit.limit, 200);
// Deleted count should still be zero
assert_eq!(deleted_entries_limit.deleted, 0);
assert!(!deleted_entries_limit.is_limit_reached());
}
#[test]
fn test_set_deleted_entries_limit_when_limit_is_reached() {
let mut pruner = PruneLimiter::default().set_deleted_entries_limit(5);
assert!(pruner.deleted_entries_limit.is_some());
let mut deleted_entries_limit = pruner.deleted_entries_limit.clone().unwrap();
// Simulate deletion of entries
deleted_entries_limit.deleted = 5;
assert!(deleted_entries_limit.is_limit_reached());
// Overwrite the limit and check if it resets correctly
pruner = pruner.set_deleted_entries_limit(10);
deleted_entries_limit = pruner.deleted_entries_limit.unwrap();
assert_eq!(deleted_entries_limit.limit, 10);
// Deletion count should reset
assert_eq!(deleted_entries_limit.deleted, 0);
assert!(!deleted_entries_limit.is_limit_reached());
}
#[test]
fn test_floor_deleted_entries_limit_to_multiple_of() {
let limiter = PruneLimiter::default().set_deleted_entries_limit(15);
let denominator = NonZeroUsize::new(4).unwrap();
// Floor limit to the largest multiple of 4 less than or equal to 15 (that is 12)
let updated_limiter = limiter.floor_deleted_entries_limit_to_multiple_of(denominator);
assert_eq!(updated_limiter.deleted_entries_limit.unwrap().limit, 12);
// Test when the limit is already a multiple of the denominator
let limiter = PruneLimiter::default().set_deleted_entries_limit(16);
let updated_limiter = limiter.floor_deleted_entries_limit_to_multiple_of(denominator);
assert_eq!(updated_limiter.deleted_entries_limit.unwrap().limit, 16);
// Test when there's no limit set (should not panic)
let limiter = PruneLimiter::default();
let updated_limiter = limiter.floor_deleted_entries_limit_to_multiple_of(denominator);
assert!(updated_limiter.deleted_entries_limit.is_none());
}
#[test]
fn test_is_deleted_entries_limit_reached() {
// Limit is not set, should return false
let limiter = PruneLimiter::default();
assert!(!limiter.is_deleted_entries_limit_reached());
// Limit is set but not reached, should return false
let mut limiter = PruneLimiter::default().set_deleted_entries_limit(10);
limiter.deleted_entries_limit.as_mut().unwrap().deleted = 5;
// 5 entries deleted out of 10
assert!(!limiter.is_deleted_entries_limit_reached());
// Limit is reached, should return true
limiter.deleted_entries_limit.as_mut().unwrap().deleted = 10;
// 10 entries deleted out of 10
assert!(limiter.is_deleted_entries_limit_reached());
// Deleted entries exceed the limit, should return true
limiter.deleted_entries_limit.as_mut().unwrap().deleted = 12;
// 12 entries deleted out of 10
assert!(limiter.is_deleted_entries_limit_reached());
}
#[test]
fn test_increment_deleted_entries_count_by() {
// Increment when no limit is set
let mut limiter = PruneLimiter::default();
limiter.increment_deleted_entries_count_by(5);
assert_eq!(limiter.deleted_entries_limit.as_ref().map(|l| l.deleted), None); // Still None
// Increment when limit is set
let mut limiter = PruneLimiter::default().set_deleted_entries_limit(10);
limiter.increment_deleted_entries_count_by(3);
assert_eq!(limiter.deleted_entries_limit.as_ref().unwrap().deleted, 3); // Now 3 deleted
// Increment again
limiter.increment_deleted_entries_count_by(2);
assert_eq!(limiter.deleted_entries_limit.as_ref().unwrap().deleted, 5); // Now 5 deleted
}
#[test]
fn test_increment_deleted_entries_count() {
let mut limiter = PruneLimiter::default().set_deleted_entries_limit(5);
assert_eq!(limiter.deleted_entries_limit.as_ref().unwrap().deleted, 0); // Initially 0
limiter.increment_deleted_entries_count(); // Increment by 1
assert_eq!(limiter.deleted_entries_limit.as_ref().unwrap().deleted, 1); // Now 1
}
#[test]
fn test_deleted_entries_limit_left() {
// Test when limit is set and some entries are deleted
let mut limiter = PruneLimiter::default().set_deleted_entries_limit(10);
limiter.increment_deleted_entries_count_by(3); // Simulate 3 deleted entries
assert_eq!(limiter.deleted_entries_limit_left(), Some(7)); // 10 - 3 = 7
// Test when no entries are deleted
limiter = PruneLimiter::default().set_deleted_entries_limit(5);
assert_eq!(limiter.deleted_entries_limit_left(), Some(5)); // 5 - 0 = 5
// Test when limit is reached
limiter.increment_deleted_entries_count_by(5); // Simulate deleting 5 entries
assert_eq!(limiter.deleted_entries_limit_left(), Some(0)); // 5 - 5 = 0
// Test when limit is not set
limiter = PruneLimiter::default(); // No limit set
assert_eq!(limiter.deleted_entries_limit_left(), None); // Should be None
}
#[test]
fn test_set_time_limit() {
// Create a PruneLimiter instance with no time limit set
let mut limiter = PruneLimiter::default();
// Set a time limit of 5 seconds
limiter = limiter.set_time_limit(Duration::new(5, 0));
// Verify that the time limit is set correctly
assert!(limiter.time_limit.is_some());
let time_limit = limiter.time_limit.as_ref().unwrap();
assert_eq!(time_limit.limit, Duration::new(5, 0));
// Ensure the start time is recent
assert!(time_limit.start.elapsed() < Duration::new(1, 0));
}
#[test]
fn test_is_time_limit_reached() {
// Create a PruneLimiter instance and set a time limit of 10 milliseconds
let mut limiter = PruneLimiter::default();
// Time limit should not be reached initially
assert!(!limiter.is_time_limit_reached(), "Time limit should not be reached yet");
limiter = limiter.set_time_limit(Duration::new(0, 10_000_000)); // 10 milliseconds
// Sleep for 5 milliseconds (less than the time limit)
sleep(Duration::new(0, 5_000_000)); // 5 milliseconds
assert!(!limiter.is_time_limit_reached(), "Time limit should not be reached yet");
// Sleep for an additional 10 milliseconds (totaling 15 milliseconds)
sleep(Duration::new(0, 10_000_000)); // 10 milliseconds
assert!(limiter.is_time_limit_reached(), "Time limit should be reached now");
}
#[test]
fn test_is_limit_reached() {
// Create a PruneLimiter instance
let mut limiter = PruneLimiter::default();
// Test when no limits are set
assert!(!limiter.is_limit_reached(), "Limit should not be reached with no limits set");
// Set a deleted entries limit
limiter = limiter.set_deleted_entries_limit(5);
assert!(
!limiter.is_limit_reached(),
"Limit should not be reached when deleted entries are less than limit"
);
// Increment deleted entries count to reach the limit
limiter.increment_deleted_entries_count_by(5);
assert!(
limiter.is_limit_reached(),
"Limit should be reached when deleted entries equal the limit"
);
// Reset the limiter
limiter = PruneLimiter::default();
// Set a time limit and check
limiter = limiter.set_time_limit(Duration::new(0, 10_000_000)); // 10 milliseconds
// Sleep for 5 milliseconds (less than the time limit)
sleep(Duration::new(0, 5_000_000)); // 5 milliseconds
assert!(
!limiter.is_limit_reached(),
"Limit should not be reached when time limit not reached"
);
// Sleep for another 10 milliseconds (totaling 15 milliseconds)
sleep(Duration::new(0, 10_000_000)); // 10 milliseconds
assert!(limiter.is_limit_reached(), "Limit should be reached when time limit is reached");
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/prune/prune/src/metrics.rs | crates/prune/prune/src/metrics.rs | use reth_metrics::{
metrics::{Gauge, Histogram},
Metrics,
};
use reth_prune_types::PruneSegment;
use std::collections::HashMap;
#[derive(Metrics)]
#[metrics(scope = "pruner")]
pub(crate) struct Metrics {
/// Pruning duration
pub(crate) duration_seconds: Histogram,
#[metric(skip)]
prune_segments: HashMap<PruneSegment, PrunerSegmentMetrics>,
}
impl Metrics {
/// Returns existing or initializes a new instance of [`PrunerSegmentMetrics`] for the provided
/// [`PruneSegment`].
pub(crate) fn get_prune_segment_metrics(
&mut self,
segment: PruneSegment,
) -> &mut PrunerSegmentMetrics {
self.prune_segments.entry(segment).or_insert_with(|| {
PrunerSegmentMetrics::new_with_labels(&[("segment", segment.to_string())])
})
}
}
#[derive(Metrics)]
#[metrics(scope = "pruner.segments")]
pub(crate) struct PrunerSegmentMetrics {
/// Pruning duration for this segment
pub(crate) duration_seconds: Histogram,
/// Highest pruned block per segment
pub(crate) highest_pruned_block: Gauge,
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/prune/prune/src/db_ext.rs | crates/prune/prune/src/db_ext.rs | use crate::PruneLimiter;
use reth_db_api::{
cursor::{DbCursorRO, DbCursorRW, RangeWalker},
table::{Table, TableRow},
transaction::DbTxMut,
DatabaseError,
};
use std::{fmt::Debug, ops::RangeBounds};
use tracing::debug;
pub(crate) trait DbTxPruneExt: DbTxMut {
/// Prune the table for the specified pre-sorted key iterator.
///
/// Returns number of rows pruned.
fn prune_table_with_iterator<T: Table>(
&self,
keys: impl IntoIterator<Item = T::Key>,
limiter: &mut PruneLimiter,
mut delete_callback: impl FnMut(TableRow<T>),
) -> Result<(usize, bool), DatabaseError> {
let mut cursor = self.cursor_write::<T>()?;
let mut keys = keys.into_iter();
let mut deleted_entries = 0;
for key in &mut keys {
if limiter.is_limit_reached() {
debug!(
target: "providers::db",
?limiter,
deleted_entries_limit = %limiter.is_deleted_entries_limit_reached(),
time_limit = %limiter.is_time_limit_reached(),
table = %T::NAME,
"Pruning limit reached"
);
break
}
let row = cursor.seek_exact(key)?;
if let Some(row) = row {
cursor.delete_current()?;
limiter.increment_deleted_entries_count();
deleted_entries += 1;
delete_callback(row);
}
}
let done = keys.next().is_none();
Ok((deleted_entries, done))
}
/// Prune the table for the specified key range.
///
/// Returns number of rows pruned.
fn prune_table_with_range<T: Table>(
&self,
keys: impl RangeBounds<T::Key> + Clone + Debug,
limiter: &mut PruneLimiter,
mut skip_filter: impl FnMut(&TableRow<T>) -> bool,
mut delete_callback: impl FnMut(TableRow<T>),
) -> Result<(usize, bool), DatabaseError> {
let mut cursor = self.cursor_write::<T>()?;
let mut walker = cursor.walk_range(keys)?;
let mut deleted_entries = 0;
let done = loop {
// check for time out must be done in this scope since it's not done in
// `prune_table_with_range_step`
if limiter.is_limit_reached() {
debug!(
target: "providers::db",
?limiter,
deleted_entries_limit = %limiter.is_deleted_entries_limit_reached(),
time_limit = %limiter.is_time_limit_reached(),
table = %T::NAME,
"Pruning limit reached"
);
break false
}
let done = self.prune_table_with_range_step(
&mut walker,
limiter,
&mut skip_filter,
&mut delete_callback,
)?;
if done {
break true
}
deleted_entries += 1;
};
Ok((deleted_entries, done))
}
/// Steps once with the given walker and prunes the entry in the table.
///
/// Returns `true` if the walker is finished, `false` if it may have more data to prune.
///
/// CAUTION: Pruner limits are not checked. This allows for a clean exit of a prune run that's
/// pruning different tables concurrently, by letting them step to the same height before
/// timing out.
fn prune_table_with_range_step<T: Table>(
&self,
walker: &mut RangeWalker<'_, T, Self::CursorMut<T>>,
limiter: &mut PruneLimiter,
skip_filter: &mut impl FnMut(&TableRow<T>) -> bool,
delete_callback: &mut impl FnMut(TableRow<T>),
) -> Result<bool, DatabaseError> {
let Some(res) = walker.next() else { return Ok(true) };
let row = res?;
if !skip_filter(&row) {
walker.delete_current()?;
limiter.increment_deleted_entries_count();
delete_callback(row);
}
Ok(false)
}
}
impl<Tx> DbTxPruneExt for Tx where Tx: DbTxMut {}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/prune/prune/src/pruner.rs | crates/prune/prune/src/pruner.rs | //! Support for pruning.
use crate::{
segments::{PruneInput, Segment},
Metrics, PruneLimiter, PrunerError, PrunerEvent,
};
use alloy_primitives::BlockNumber;
use reth_exex_types::FinishedExExHeight;
use reth_provider::{
DBProvider, DatabaseProviderFactory, PruneCheckpointReader, PruneCheckpointWriter,
};
use reth_prune_types::{PruneProgress, PrunedSegmentInfo, PrunerOutput};
use reth_tokio_util::{EventSender, EventStream};
use std::time::{Duration, Instant};
use tokio::sync::watch;
use tracing::debug;
/// Result of [`Pruner::run`] execution.
pub type PrunerResult = Result<PrunerOutput, PrunerError>;
/// The pruner type itself with the result of [`Pruner::run`]
pub type PrunerWithResult<S, DB> = (Pruner<S, DB>, PrunerResult);
/// Pruner with preset provider factory.
pub type PrunerWithFactory<PF> = Pruner<<PF as DatabaseProviderFactory>::ProviderRW, PF>;
/// Pruning routine. Main pruning logic happens in [`Pruner::run`].
#[derive(Debug)]
pub struct Pruner<Provider, PF> {
/// Provider factory. If pruner is initialized without it, it will be set to `()`.
provider_factory: PF,
segments: Vec<Box<dyn Segment<Provider>>>,
/// Minimum pruning interval measured in blocks. All prune segments are checked and, if needed,
/// pruned, when the chain advances by the specified number of blocks.
min_block_interval: usize,
/// Previous tip block number when the pruner was run. Even if no data was pruned, this block
/// number is updated with the tip block number the pruner was called with. It's used in
/// conjunction with `min_block_interval` to determine when the pruning needs to be initiated.
previous_tip_block_number: Option<BlockNumber>,
/// Maximum total entries to prune (delete from database) per run.
delete_limit: usize,
/// Maximum time for one pruner run.
timeout: Option<Duration>,
/// The finished height of all `ExEx`'s.
finished_exex_height: watch::Receiver<FinishedExExHeight>,
#[doc(hidden)]
metrics: Metrics,
event_sender: EventSender<PrunerEvent>,
}
impl<Provider> Pruner<Provider, ()> {
/// Creates a new [Pruner] without a provider factory.
pub fn new(
segments: Vec<Box<dyn Segment<Provider>>>,
min_block_interval: usize,
delete_limit: usize,
timeout: Option<Duration>,
finished_exex_height: watch::Receiver<FinishedExExHeight>,
) -> Self {
Self {
provider_factory: (),
segments,
min_block_interval,
previous_tip_block_number: None,
delete_limit,
timeout,
finished_exex_height,
metrics: Metrics::default(),
event_sender: Default::default(),
}
}
}
impl<PF> Pruner<PF::ProviderRW, PF>
where
PF: DatabaseProviderFactory,
{
/// Crates a new pruner with the given provider factory.
pub fn new_with_factory(
provider_factory: PF,
segments: Vec<Box<dyn Segment<PF::ProviderRW>>>,
min_block_interval: usize,
delete_limit: usize,
timeout: Option<Duration>,
finished_exex_height: watch::Receiver<FinishedExExHeight>,
) -> Self {
Self {
provider_factory,
segments,
min_block_interval,
previous_tip_block_number: None,
delete_limit,
timeout,
finished_exex_height,
metrics: Metrics::default(),
event_sender: Default::default(),
}
}
}
impl<Provider, S> Pruner<Provider, S>
where
Provider: PruneCheckpointReader + PruneCheckpointWriter,
{
/// Listen for events on the pruner.
pub fn events(&self) -> EventStream<PrunerEvent> {
self.event_sender.new_listener()
}
/// Run the pruner with the given provider. This will only prune data up to the highest finished
/// `ExEx` height, if there are no `ExExes`.
///
/// Returns a [`PruneProgress`], indicating whether pruning is finished, or there is more data
/// to prune.
pub fn run_with_provider(
&mut self,
provider: &Provider,
tip_block_number: BlockNumber,
) -> PrunerResult {
let Some(tip_block_number) =
self.adjust_tip_block_number_to_finished_exex_height(tip_block_number)
else {
return Ok(PruneProgress::Finished.into())
};
if tip_block_number == 0 {
self.previous_tip_block_number = Some(tip_block_number);
debug!(target: "pruner", %tip_block_number, "Nothing to prune yet");
return Ok(PruneProgress::Finished.into())
}
self.event_sender.notify(PrunerEvent::Started { tip_block_number });
debug!(target: "pruner", %tip_block_number, "Pruner started");
let start = Instant::now();
let mut limiter = PruneLimiter::default().set_deleted_entries_limit(self.delete_limit);
if let Some(timeout) = self.timeout {
limiter = limiter.set_time_limit(timeout);
};
let (stats, deleted_entries, output) =
self.prune_segments(provider, tip_block_number, &mut limiter)?;
self.previous_tip_block_number = Some(tip_block_number);
let elapsed = start.elapsed();
self.metrics.duration_seconds.record(elapsed);
let message = match output.progress {
PruneProgress::HasMoreData(_) => "Pruner interrupted and has more data to prune",
PruneProgress::Finished => "Pruner finished",
};
debug!(
target: "pruner",
%tip_block_number,
?elapsed,
?deleted_entries,
?limiter,
?output,
?stats,
"{message}",
);
self.event_sender.notify(PrunerEvent::Finished { tip_block_number, elapsed, stats });
Ok(output)
}
/// Prunes the segments that the [Pruner] was initialized with, and the segments that needs to
/// be pruned according to the highest `static_files`. Segments are parts of the database that
/// represent one or more tables.
///
/// Returns a list of stats per pruned segment, total number of entries pruned, and
/// [`PruneProgress`].
fn prune_segments(
&mut self,
provider: &Provider,
tip_block_number: BlockNumber,
limiter: &mut PruneLimiter,
) -> Result<(Vec<PrunedSegmentInfo>, usize, PrunerOutput), PrunerError> {
let mut stats = Vec::with_capacity(self.segments.len());
let mut pruned = 0;
let mut output = PrunerOutput {
progress: PruneProgress::Finished,
segments: Vec::with_capacity(self.segments.len()),
};
for segment in &self.segments {
if limiter.is_limit_reached() {
break
}
if let Some((to_block, prune_mode)) = segment
.mode()
.map(|mode| {
mode.prune_target_block(tip_block_number, segment.segment(), segment.purpose())
})
.transpose()?
.flatten()
{
debug!(
target: "pruner",
segment = ?segment.segment(),
purpose = ?segment.purpose(),
%to_block,
?prune_mode,
"Segment pruning started"
);
let segment_start = Instant::now();
let previous_checkpoint = provider.get_prune_checkpoint(segment.segment())?;
let segment_output = segment.prune(
provider,
PruneInput { previous_checkpoint, to_block, limiter: limiter.clone() },
)?;
if let Some(checkpoint) = segment_output.checkpoint {
segment
.save_checkpoint(provider, checkpoint.as_prune_checkpoint(prune_mode))?;
}
self.metrics
.get_prune_segment_metrics(segment.segment())
.duration_seconds
.record(segment_start.elapsed());
if let Some(highest_pruned_block) =
segment_output.checkpoint.and_then(|checkpoint| checkpoint.block_number)
{
self.metrics
.get_prune_segment_metrics(segment.segment())
.highest_pruned_block
.set(highest_pruned_block as f64);
}
output.progress = segment_output.progress;
output.segments.push((segment.segment(), segment_output));
debug!(
target: "pruner",
segment = ?segment.segment(),
purpose = ?segment.purpose(),
%to_block,
?prune_mode,
%segment_output.pruned,
"Segment pruning finished"
);
if segment_output.pruned > 0 {
limiter.increment_deleted_entries_count_by(segment_output.pruned);
pruned += segment_output.pruned;
let info = PrunedSegmentInfo {
segment: segment.segment(),
pruned: segment_output.pruned,
progress: segment_output.progress,
};
stats.push(info);
}
} else {
debug!(target: "pruner", segment = ?segment.segment(), purpose = ?segment.purpose(), "Nothing to prune for the segment");
}
}
Ok((stats, pruned, output))
}
/// Returns `true` if the pruning is needed at the provided tip block number.
/// This is determined by the check against minimum pruning interval and last pruned block
/// number.
pub fn is_pruning_needed(&self, tip_block_number: BlockNumber) -> bool {
let Some(tip_block_number) =
self.adjust_tip_block_number_to_finished_exex_height(tip_block_number)
else {
return false
};
// Saturating subtraction is needed for the case when the chain was reverted, meaning
// current block number might be less than the previous tip block number.
// If that's the case, no pruning is needed as outdated data is also reverted.
if tip_block_number.saturating_sub(self.previous_tip_block_number.unwrap_or_default()) >=
self.min_block_interval as u64
{
debug!(
target: "pruner",
previous_tip_block_number = ?self.previous_tip_block_number,
%tip_block_number,
"Minimum pruning interval reached"
);
true
} else {
false
}
}
/// Adjusts the tip block number to the finished `ExEx` height. This is needed to not prune more
/// data than `ExExs` have processed. Depending on the height:
/// - [`FinishedExExHeight::NoExExs`] returns the tip block number as no adjustment for `ExExs`
/// is needed.
/// - [`FinishedExExHeight::NotReady`] returns `None` as not all `ExExs` have emitted a
/// `FinishedHeight` event yet.
/// - [`FinishedExExHeight::Height`] returns the finished `ExEx` height.
fn adjust_tip_block_number_to_finished_exex_height(
&self,
tip_block_number: BlockNumber,
) -> Option<BlockNumber> {
match *self.finished_exex_height.borrow() {
FinishedExExHeight::NoExExs => Some(tip_block_number),
FinishedExExHeight::NotReady => {
debug!(target: "pruner", %tip_block_number, "Not all ExExs have emitted a `FinishedHeight` event yet, can't prune");
None
}
FinishedExExHeight::Height(finished_exex_height) => {
debug!(target: "pruner", %tip_block_number, %finished_exex_height, "Adjusting tip block number to the finished ExEx height");
Some(finished_exex_height)
}
}
}
}
impl<PF> Pruner<PF::ProviderRW, PF>
where
PF: DatabaseProviderFactory<ProviderRW: PruneCheckpointWriter + PruneCheckpointReader>,
{
/// Run the pruner. This will only prune data up to the highest finished ExEx height, if there
/// are no ExExes.
///
/// Returns a [`PruneProgress`], indicating whether pruning is finished, or there is more data
/// to prune.
pub fn run(&mut self, tip_block_number: BlockNumber) -> PrunerResult {
let provider = self.provider_factory.database_provider_rw()?;
let result = self.run_with_provider(&provider, tip_block_number);
provider.commit()?;
result
}
}
#[cfg(test)]
mod tests {
use crate::Pruner;
use reth_exex_types::FinishedExExHeight;
use reth_provider::test_utils::create_test_provider_factory;
#[test]
fn is_pruning_needed() {
let provider_factory = create_test_provider_factory();
let (finished_exex_height_tx, finished_exex_height_rx) =
tokio::sync::watch::channel(FinishedExExHeight::NoExExs);
let mut pruner =
Pruner::new_with_factory(provider_factory, vec![], 5, 0, None, finished_exex_height_rx);
// No last pruned block number was set before
let first_block_number = 1;
assert!(!pruner.is_pruning_needed(first_block_number));
pruner.previous_tip_block_number = Some(first_block_number);
// Tip block number delta is >= than min block interval
let second_block_number = first_block_number + pruner.min_block_interval as u64;
assert!(pruner.is_pruning_needed(second_block_number));
pruner.previous_tip_block_number = Some(second_block_number);
// Tip block number delta is < than min block interval
assert!(!pruner.is_pruning_needed(second_block_number));
// Tip block number delta is >= than min block interval
let third_block_number = second_block_number + pruner.min_block_interval as u64;
assert!(pruner.is_pruning_needed(third_block_number));
// Not all ExExs have emitted a `FinishedHeight` event yet
finished_exex_height_tx.send(FinishedExExHeight::NotReady).unwrap();
assert!(!pruner.is_pruning_needed(third_block_number));
// Adjust tip block number to the finished ExEx height that doesn't reach the threshold
finished_exex_height_tx.send(FinishedExExHeight::Height(second_block_number)).unwrap();
assert!(!pruner.is_pruning_needed(third_block_number));
// Adjust tip block number to the finished ExEx height that reaches the threshold
finished_exex_height_tx.send(FinishedExExHeight::Height(third_block_number)).unwrap();
assert!(pruner.is_pruning_needed(third_block_number));
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/prune/prune/src/segments/mod.rs | crates/prune/prune/src/segments/mod.rs | mod receipts;
mod set;
mod static_file;
mod user;
use crate::{PruneLimiter, PrunerError};
use alloy_primitives::{BlockNumber, TxNumber};
use reth_provider::{errors::provider::ProviderResult, BlockReader, PruneCheckpointWriter};
use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, SegmentOutput};
pub use set::SegmentSet;
pub use static_file::{
Headers as StaticFileHeaders, Receipts as StaticFileReceipts,
Transactions as StaticFileTransactions,
};
use std::{fmt::Debug, ops::RangeInclusive};
use tracing::error;
pub use user::{
AccountHistory, Receipts as UserReceipts, ReceiptsByLogs, SenderRecovery, StorageHistory,
TransactionLookup,
};
/// A segment represents a pruning of some portion of the data.
///
/// Segments are called from [`Pruner`](crate::Pruner) with the following lifecycle:
/// 1. Call [`Segment::prune`] with `delete_limit` of [`PruneInput`].
/// 2. If [`Segment::prune`] returned a [`Some`] in `checkpoint` of [`SegmentOutput`], call
/// [`Segment::save_checkpoint`].
/// 3. Subtract `pruned` of [`SegmentOutput`] from `delete_limit` of next [`PruneInput`].
pub trait Segment<Provider>: Debug + Send + Sync {
/// Segment of data that's pruned.
fn segment(&self) -> PruneSegment;
/// Prune mode with which the segment was initialized.
fn mode(&self) -> Option<PruneMode>;
/// Purpose of the segment.
fn purpose(&self) -> PrunePurpose;
/// Prune data for [`Self::segment`] using the provided input.
fn prune(&self, provider: &Provider, input: PruneInput) -> Result<SegmentOutput, PrunerError>;
/// Save checkpoint for [`Self::segment`] to the database.
fn save_checkpoint(
&self,
provider: &Provider,
checkpoint: PruneCheckpoint,
) -> ProviderResult<()>
where
Provider: PruneCheckpointWriter,
{
provider.save_prune_checkpoint(self.segment(), checkpoint)
}
}
/// Segment pruning input, see [`Segment::prune`].
#[derive(Debug)]
#[cfg_attr(test, derive(Clone))]
pub struct PruneInput {
pub(crate) previous_checkpoint: Option<PruneCheckpoint>,
/// Target block up to which the pruning needs to be done, inclusive.
pub(crate) to_block: BlockNumber,
/// Limits pruning of a segment.
pub(crate) limiter: PruneLimiter,
}
impl PruneInput {
/// Get next inclusive tx number range to prune according to the checkpoint and `to_block` block
/// number.
///
/// To get the range start:
/// 1. If checkpoint exists, get next block body and return its first tx number.
/// 2. If checkpoint doesn't exist, return 0.
///
/// To get the range end: get last tx number for `to_block`.
pub(crate) fn get_next_tx_num_range<Provider: BlockReader>(
&self,
provider: &Provider,
) -> ProviderResult<Option<RangeInclusive<TxNumber>>> {
let from_tx_number = self.previous_checkpoint
// Checkpoint exists, prune from the next transaction after the highest pruned one
.and_then(|checkpoint| match checkpoint.tx_number {
Some(tx_number) => Some(tx_number + 1),
_ => {
error!(target: "pruner", ?checkpoint, "Expected transaction number in prune checkpoint, found None");
None
},
})
// No checkpoint exists, prune from genesis
.unwrap_or_default();
let to_tx_number = match provider.block_body_indices(self.to_block)? {
Some(body) => {
let last_tx = body.last_tx_num();
if last_tx + body.tx_count() == 0 {
// Prevents a scenario where the pruner correctly starts at a finalized block,
// but the first transaction (tx_num = 0) only appears on a non-finalized one.
// Should only happen on a test/hive scenario.
return Ok(None)
}
last_tx
}
None => return Ok(None),
};
let range = from_tx_number..=to_tx_number;
if range.is_empty() {
return Ok(None)
}
Ok(Some(range))
}
/// Get next inclusive block range to prune according to the checkpoint, `to_block` block
/// number and `limit`.
///
/// To get the range start (`from_block`):
/// 1. If checkpoint exists, use next block.
/// 2. If checkpoint doesn't exist, use block 0.
///
/// To get the range end: use block `to_block`.
pub(crate) fn get_next_block_range(&self) -> Option<RangeInclusive<BlockNumber>> {
let from_block = self.get_start_next_block_range();
let range = from_block..=self.to_block;
if range.is_empty() {
return None
}
Some(range)
}
/// Returns the start of the next block range.
///
/// 1. If checkpoint exists, use next block.
/// 2. If checkpoint doesn't exist, use block 0.
pub(crate) fn get_start_next_block_range(&self) -> u64 {
self.previous_checkpoint
.and_then(|checkpoint| checkpoint.block_number)
// Checkpoint exists, prune from the next block after the highest pruned one
.map(|block_number| block_number + 1)
// No checkpoint exists, prune from genesis
.unwrap_or(0)
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_primitives::B256;
use reth_provider::{
providers::BlockchainProvider,
test_utils::{create_test_provider_factory, MockEthProvider},
};
use reth_testing_utils::generators::{self, random_block_range, BlockRangeParams};
#[test]
fn test_prune_input_get_next_tx_num_range_no_to_block() {
let input = PruneInput {
previous_checkpoint: None,
to_block: 10,
limiter: PruneLimiter::default(),
};
// Default provider with no block corresponding to block 10
let provider = MockEthProvider::default();
// No block body for block 10, expected None
let range = input.get_next_tx_num_range(&provider).expect("Expected range");
assert!(range.is_none());
}
#[test]
fn test_prune_input_get_next_tx_num_range_no_tx() {
let input = PruneInput {
previous_checkpoint: None,
to_block: 10,
limiter: PruneLimiter::default(),
};
let mut rng = generators::rng();
let factory = create_test_provider_factory();
// Generate 10 random blocks with no transactions
let blocks = random_block_range(
&mut rng,
0..=10,
BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..1, ..Default::default() },
);
// Insert the blocks into the database
let provider_rw = factory.provider_rw().expect("failed to get provider_rw");
for block in &blocks {
provider_rw
.insert_historical_block(
block.clone().try_recover().expect("failed to seal block with senders"),
)
.expect("failed to insert block");
}
provider_rw.commit().expect("failed to commit");
// Create a new provider
let provider = BlockchainProvider::new(factory).unwrap();
// Since there are no transactions, expected None
let range = input.get_next_tx_num_range(&provider).expect("Expected range");
assert!(range.is_none());
}
#[test]
fn test_prune_input_get_next_tx_num_range_valid() {
// Create a new prune input
let input = PruneInput {
previous_checkpoint: None,
to_block: 10,
limiter: PruneLimiter::default(),
};
let mut rng = generators::rng();
let factory = create_test_provider_factory();
// Generate 10 random blocks with some transactions
let blocks = random_block_range(
&mut rng,
0..=10,
BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..5, ..Default::default() },
);
// Insert the blocks into the database
let provider_rw = factory.provider_rw().expect("failed to get provider_rw");
for block in &blocks {
provider_rw
.insert_historical_block(
block.clone().try_recover().expect("failed to seal block with senders"),
)
.expect("failed to insert block");
}
provider_rw.commit().expect("failed to commit");
// Create a new provider
let provider = BlockchainProvider::new(factory).unwrap();
// Get the next tx number range
let range = input.get_next_tx_num_range(&provider).expect("Expected range").unwrap();
// Calculate the total number of transactions
let num_txs = blocks.iter().map(|block| block.transaction_count() as u64).sum::<u64>();
assert_eq!(range, 0..=num_txs - 1);
}
#[test]
fn test_prune_input_get_next_tx_checkpoint_without_tx_number() {
// Create a prune input with a previous checkpoint without a tx number (unexpected)
let input = PruneInput {
previous_checkpoint: Some(PruneCheckpoint {
block_number: Some(5),
tx_number: None,
prune_mode: PruneMode::Full,
}),
to_block: 10,
limiter: PruneLimiter::default(),
};
let mut rng = generators::rng();
let factory = create_test_provider_factory();
// Generate 10 random blocks
let blocks = random_block_range(
&mut rng,
0..=10,
BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..5, ..Default::default() },
);
// Insert the blocks into the database
let provider_rw = factory.provider_rw().expect("failed to get provider_rw");
for block in &blocks {
provider_rw
.insert_historical_block(
block.clone().try_recover().expect("failed to seal block with senders"),
)
.expect("failed to insert block");
}
provider_rw.commit().expect("failed to commit");
// Create a new provider
let provider = BlockchainProvider::new(factory).unwrap();
// Fetch the range and check if it is correct
let range = input.get_next_tx_num_range(&provider).expect("Expected range").unwrap();
// Calculate the total number of transactions
let num_txs = blocks.iter().map(|block| block.transaction_count() as u64).sum::<u64>();
assert_eq!(range, 0..=num_txs - 1,);
}
#[test]
fn test_prune_input_get_next_tx_empty_range() {
// Create a new provider via factory
let mut rng = generators::rng();
let factory = create_test_provider_factory();
// Generate 10 random blocks
let blocks = random_block_range(
&mut rng,
0..=10,
BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..5, ..Default::default() },
);
// Insert the blocks into the database
let provider_rw = factory.provider_rw().expect("failed to get provider_rw");
for block in &blocks {
provider_rw
.insert_historical_block(
block.clone().try_recover().expect("failed to seal block with senders"),
)
.expect("failed to insert block");
}
provider_rw.commit().expect("failed to commit");
// Create a new provider
let provider = BlockchainProvider::new(factory).unwrap();
// Get the last tx number
// Calculate the total number of transactions
let num_txs = blocks.iter().map(|block| block.transaction_count() as u64).sum::<u64>();
let max_range = num_txs - 1;
// Create a prune input with a previous checkpoint that is the last tx number
let input = PruneInput {
previous_checkpoint: Some(PruneCheckpoint {
block_number: Some(5),
tx_number: Some(max_range),
prune_mode: PruneMode::Full,
}),
to_block: 10,
limiter: PruneLimiter::default(),
};
// We expect an empty range since the previous checkpoint is the last tx number
let range = input.get_next_tx_num_range(&provider).expect("Expected range");
assert!(range.is_none());
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/prune/prune/src/segments/receipts.rs | crates/prune/prune/src/segments/receipts.rs | //! Common receipts pruning logic shared between user and static file pruning segments.
//!
//! - [`crate::segments::user::Receipts`] is responsible for pruning receipts according to the
//! user-configured settings (for example, on a full node or with a custom prune config)
//! - [`crate::segments::static_file::Receipts`] is responsible for pruning receipts on an archive
//! node after static file producer has finished
use crate::{db_ext::DbTxPruneExt, segments::PruneInput, PrunerError};
use reth_db_api::{table::Value, tables, transaction::DbTxMut};
use reth_primitives_traits::NodePrimitives;
use reth_provider::{
errors::provider::ProviderResult, BlockReader, DBProvider, NodePrimitivesProvider,
PruneCheckpointWriter, TransactionsProvider,
};
use reth_prune_types::{PruneCheckpoint, PruneSegment, SegmentOutput, SegmentOutputCheckpoint};
use tracing::trace;
pub(crate) fn prune<Provider>(
provider: &Provider,
input: PruneInput,
) -> Result<SegmentOutput, PrunerError>
where
Provider: DBProvider<Tx: DbTxMut>
+ TransactionsProvider
+ BlockReader
+ NodePrimitivesProvider<Primitives: NodePrimitives<Receipt: Value>>,
{
let tx_range = match input.get_next_tx_num_range(provider)? {
Some(range) => range,
None => {
trace!(target: "pruner", "No receipts to prune");
return Ok(SegmentOutput::done())
}
};
let tx_range_end = *tx_range.end();
let mut limiter = input.limiter;
let mut last_pruned_transaction = tx_range_end;
let (pruned, done) = provider.tx_ref().prune_table_with_range::<tables::Receipts<
<Provider::Primitives as NodePrimitives>::Receipt,
>>(
tx_range,
&mut limiter,
|_| false,
|row| last_pruned_transaction = row.0,
)?;
trace!(target: "pruner", %pruned, %done, "Pruned receipts");
let last_pruned_block = provider
.transaction_block(last_pruned_transaction)?
.ok_or(PrunerError::InconsistentData("Block for transaction is not found"))?
// If there's more receipts to prune, set the checkpoint block number to previous,
// so we could finish pruning its receipts on the next run.
.checked_sub(if done { 0 } else { 1 });
let progress = limiter.progress(done);
Ok(SegmentOutput {
progress,
pruned,
checkpoint: Some(SegmentOutputCheckpoint {
block_number: last_pruned_block,
tx_number: Some(last_pruned_transaction),
}),
})
}
pub(crate) fn save_checkpoint(
provider: impl PruneCheckpointWriter,
checkpoint: PruneCheckpoint,
) -> ProviderResult<()> {
provider.save_prune_checkpoint(PruneSegment::Receipts, checkpoint)?;
// `PruneSegment::Receipts` overrides `PruneSegment::ContractLogs`, so we can preemptively
// limit their pruning start point.
provider.save_prune_checkpoint(PruneSegment::ContractLogs, checkpoint)?;
Ok(())
}
#[cfg(test)]
mod tests {
use crate::segments::{PruneInput, PruneLimiter, SegmentOutput};
use alloy_primitives::{BlockNumber, TxNumber, B256};
use assert_matches::assert_matches;
use itertools::{
FoldWhile::{Continue, Done},
Itertools,
};
use reth_db_api::tables;
use reth_provider::{DatabaseProviderFactory, PruneCheckpointReader};
use reth_prune_types::{
PruneCheckpoint, PruneInterruptReason, PruneMode, PruneProgress, PruneSegment,
};
use reth_stages::test_utils::{StorageKind, TestStageDB};
use reth_testing_utils::generators::{
self, random_block_range, random_receipt, BlockRangeParams,
};
use std::ops::Sub;
#[test]
fn prune() {
let db = TestStageDB::default();
let mut rng = generators::rng();
let blocks = random_block_range(
&mut rng,
1..=10,
BlockRangeParams { parent: Some(B256::ZERO), tx_count: 2..3, ..Default::default() },
);
db.insert_blocks(blocks.iter(), StorageKind::Database(None)).expect("insert blocks");
let mut receipts = Vec::new();
for block in &blocks {
receipts.reserve_exact(block.transaction_count());
for transaction in &block.body().transactions {
receipts.push((
receipts.len() as u64,
random_receipt(&mut rng, transaction, Some(0), None),
));
}
}
let receipts_len = receipts.len();
db.insert_receipts(receipts).expect("insert receipts");
assert_eq!(
db.table::<tables::Transactions>().unwrap().len(),
blocks.iter().map(|block| block.transaction_count()).sum::<usize>()
);
assert_eq!(
db.table::<tables::Transactions>().unwrap().len(),
db.table::<tables::Receipts>().unwrap().len()
);
let test_prune = |to_block: BlockNumber, expected_result: (PruneProgress, usize)| {
let prune_mode = PruneMode::Before(to_block);
let mut limiter = PruneLimiter::default().set_deleted_entries_limit(10);
let input = PruneInput {
previous_checkpoint: db
.factory
.provider()
.unwrap()
.get_prune_checkpoint(PruneSegment::Receipts)
.unwrap(),
to_block,
limiter: limiter.clone(),
};
let next_tx_number_to_prune = db
.factory
.provider()
.unwrap()
.get_prune_checkpoint(PruneSegment::Receipts)
.unwrap()
.and_then(|checkpoint| checkpoint.tx_number)
.map(|tx_number| tx_number + 1)
.unwrap_or_default();
let last_pruned_tx_number = blocks
.iter()
.take(to_block as usize)
.map(|block| block.transaction_count())
.sum::<usize>()
.min(
next_tx_number_to_prune as usize +
input.limiter.deleted_entries_limit().unwrap(),
)
.sub(1);
let provider = db.factory.database_provider_rw().unwrap();
let result = super::prune(&provider, input).unwrap();
limiter.increment_deleted_entries_count_by(result.pruned);
assert_matches!(
result,
SegmentOutput {progress, pruned, checkpoint: Some(_)}
if (progress, pruned) == expected_result
);
super::save_checkpoint(
&provider,
result.checkpoint.unwrap().as_prune_checkpoint(prune_mode),
)
.unwrap();
provider.commit().expect("commit");
let last_pruned_block_number = blocks
.iter()
.fold_while((0, 0), |(_, mut tx_count), block| {
tx_count += block.transaction_count();
if tx_count > last_pruned_tx_number {
Done((block.number, tx_count))
} else {
Continue((block.number, tx_count))
}
})
.into_inner()
.0
.checked_sub(if result.progress.is_finished() { 0 } else { 1 });
assert_eq!(
db.table::<tables::Receipts>().unwrap().len(),
receipts_len - (last_pruned_tx_number + 1)
);
assert_eq!(
db.factory
.provider()
.unwrap()
.get_prune_checkpoint(PruneSegment::Receipts)
.unwrap(),
Some(PruneCheckpoint {
block_number: last_pruned_block_number,
tx_number: Some(last_pruned_tx_number as TxNumber),
prune_mode
})
);
};
test_prune(
6,
(PruneProgress::HasMoreData(PruneInterruptReason::DeletedEntriesLimitReached), 10),
);
test_prune(6, (PruneProgress::Finished, 2));
test_prune(10, (PruneProgress::Finished, 8));
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/prune/prune/src/segments/set.rs | crates/prune/prune/src/segments/set.rs | use crate::segments::{
AccountHistory, ReceiptsByLogs, Segment, SenderRecovery, StorageHistory, TransactionLookup,
UserReceipts,
};
use alloy_eips::eip2718::Encodable2718;
use reth_db_api::{table::Value, transaction::DbTxMut};
use reth_primitives_traits::NodePrimitives;
use reth_provider::{
providers::StaticFileProvider, BlockReader, DBProvider, PruneCheckpointWriter,
StaticFileProviderFactory,
};
use reth_prune_types::PruneModes;
use super::{StaticFileHeaders, StaticFileReceipts, StaticFileTransactions};
/// Collection of [`Segment`]. Thread-safe, allocated on the heap.
#[derive(Debug)]
pub struct SegmentSet<Provider> {
inner: Vec<Box<dyn Segment<Provider>>>,
}
impl<Provider> SegmentSet<Provider> {
/// Returns empty [`SegmentSet`] collection.
pub fn new() -> Self {
Self::default()
}
/// Adds new [`Segment`] to collection.
pub fn segment<S: Segment<Provider> + 'static>(mut self, segment: S) -> Self {
self.inner.push(Box::new(segment));
self
}
/// Adds new [Segment] to collection if it's [Some].
pub fn segment_opt<S: Segment<Provider> + 'static>(self, segment: Option<S>) -> Self {
if let Some(segment) = segment {
return self.segment(segment)
}
self
}
/// Consumes [`SegmentSet`] and returns a [Vec].
pub fn into_vec(self) -> Vec<Box<dyn Segment<Provider>>> {
self.inner
}
}
impl<Provider> SegmentSet<Provider>
where
Provider: StaticFileProviderFactory<
Primitives: NodePrimitives<SignedTx: Value, Receipt: Value, BlockHeader: Value>,
> + DBProvider<Tx: DbTxMut>
+ PruneCheckpointWriter
+ BlockReader<Transaction: Encodable2718>,
{
/// Creates a [`SegmentSet`] from an existing components, such as [`StaticFileProvider`] and
/// [`PruneModes`].
pub fn from_components(
static_file_provider: StaticFileProvider<Provider::Primitives>,
prune_modes: PruneModes,
) -> Self {
let PruneModes {
sender_recovery,
transaction_lookup,
receipts,
account_history,
storage_history,
bodies_history: _,
receipts_log_filter,
} = prune_modes;
Self::default()
// Static file headers
.segment(StaticFileHeaders::new(static_file_provider.clone()))
// Static file transactions
.segment(StaticFileTransactions::new(static_file_provider.clone()))
// Static file receipts
.segment(StaticFileReceipts::new(static_file_provider))
// Account history
.segment_opt(account_history.map(AccountHistory::new))
// Storage history
.segment_opt(storage_history.map(StorageHistory::new))
// User receipts
.segment_opt(receipts.map(UserReceipts::new))
// Receipts by logs
.segment_opt(
(!receipts_log_filter.is_empty())
.then(|| ReceiptsByLogs::new(receipts_log_filter.clone())),
)
// Transaction lookup
.segment_opt(transaction_lookup.map(TransactionLookup::new))
// Sender recovery
.segment_opt(sender_recovery.map(SenderRecovery::new))
}
}
impl<Provider> Default for SegmentSet<Provider> {
fn default() -> Self {
Self { inner: Vec::new() }
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/prune/prune/src/segments/user/transaction_lookup.rs | crates/prune/prune/src/segments/user/transaction_lookup.rs | use crate::{
db_ext::DbTxPruneExt,
segments::{PruneInput, Segment, SegmentOutput},
PrunerError,
};
use alloy_eips::eip2718::Encodable2718;
use rayon::prelude::*;
use reth_db_api::{tables, transaction::DbTxMut};
use reth_provider::{BlockReader, DBProvider};
use reth_prune_types::{PruneMode, PrunePurpose, PruneSegment, SegmentOutputCheckpoint};
use tracing::{instrument, trace};
#[derive(Debug)]
pub struct TransactionLookup {
mode: PruneMode,
}
impl TransactionLookup {
pub const fn new(mode: PruneMode) -> Self {
Self { mode }
}
}
impl<Provider> Segment<Provider> for TransactionLookup
where
Provider: DBProvider<Tx: DbTxMut> + BlockReader<Transaction: Encodable2718>,
{
fn segment(&self) -> PruneSegment {
PruneSegment::TransactionLookup
}
fn mode(&self) -> Option<PruneMode> {
Some(self.mode)
}
fn purpose(&self) -> PrunePurpose {
PrunePurpose::User
}
#[instrument(level = "trace", target = "pruner", skip(self, provider), ret)]
fn prune(&self, provider: &Provider, input: PruneInput) -> Result<SegmentOutput, PrunerError> {
let (start, end) = match input.get_next_tx_num_range(provider)? {
Some(range) => range,
None => {
trace!(target: "pruner", "No transaction lookup entries to prune");
return Ok(SegmentOutput::done())
}
}
.into_inner();
let tx_range = start..=
Some(end)
.min(input.limiter.deleted_entries_limit_left().map(|left| start + left as u64 - 1))
.unwrap();
let tx_range_end = *tx_range.end();
// Retrieve transactions in the range and calculate their hashes in parallel
let hashes = provider
.transactions_by_tx_range(tx_range.clone())?
.into_par_iter()
.map(|transaction| transaction.trie_hash())
.collect::<Vec<_>>();
// Number of transactions retrieved from the database should match the tx range count
let tx_count = tx_range.count();
if hashes.len() != tx_count {
return Err(PrunerError::InconsistentData(
"Unexpected number of transaction hashes retrieved by transaction number range",
))
}
let mut limiter = input.limiter;
let mut last_pruned_transaction = None;
let (pruned, done) =
provider.tx_ref().prune_table_with_iterator::<tables::TransactionHashNumbers>(
hashes,
&mut limiter,
|row| {
last_pruned_transaction =
Some(last_pruned_transaction.unwrap_or(row.1).max(row.1))
},
)?;
let done = done && tx_range_end == end;
trace!(target: "pruner", %pruned, %done, "Pruned transaction lookup");
let last_pruned_transaction = last_pruned_transaction.unwrap_or(tx_range_end);
let last_pruned_block = provider
.transaction_block(last_pruned_transaction)?
.ok_or(PrunerError::InconsistentData("Block for transaction is not found"))?
// If there's more transaction lookup entries to prune, set the checkpoint block number
// to previous, so we could finish pruning its transaction lookup entries on the next
// run.
.checked_sub(if done { 0 } else { 1 });
let progress = limiter.progress(done);
Ok(SegmentOutput {
progress,
pruned,
checkpoint: Some(SegmentOutputCheckpoint {
block_number: last_pruned_block,
tx_number: Some(last_pruned_transaction),
}),
})
}
}
#[cfg(test)]
mod tests {
use crate::segments::{PruneInput, PruneLimiter, Segment, SegmentOutput, TransactionLookup};
use alloy_primitives::{BlockNumber, TxNumber, B256};
use assert_matches::assert_matches;
use itertools::{
FoldWhile::{Continue, Done},
Itertools,
};
use reth_db_api::tables;
use reth_provider::{DatabaseProviderFactory, PruneCheckpointReader};
use reth_prune_types::{
PruneCheckpoint, PruneInterruptReason, PruneMode, PruneProgress, PruneSegment,
};
use reth_stages::test_utils::{StorageKind, TestStageDB};
use reth_testing_utils::generators::{self, random_block_range, BlockRangeParams};
use std::ops::Sub;
#[test]
fn prune() {
let db = TestStageDB::default();
let mut rng = generators::rng();
let blocks = random_block_range(
&mut rng,
1..=10,
BlockRangeParams { parent: Some(B256::ZERO), tx_count: 2..3, ..Default::default() },
);
db.insert_blocks(blocks.iter(), StorageKind::Database(None)).expect("insert blocks");
let mut tx_hash_numbers = Vec::new();
for block in &blocks {
tx_hash_numbers.reserve_exact(block.transaction_count());
for transaction in &block.body().transactions {
tx_hash_numbers.push((*transaction.tx_hash(), tx_hash_numbers.len() as u64));
}
}
let tx_hash_numbers_len = tx_hash_numbers.len();
db.insert_tx_hash_numbers(tx_hash_numbers).expect("insert tx hash numbers");
assert_eq!(
db.table::<tables::Transactions>().unwrap().len(),
blocks.iter().map(|block| block.transaction_count()).sum::<usize>()
);
assert_eq!(
db.table::<tables::Transactions>().unwrap().len(),
db.table::<tables::TransactionHashNumbers>().unwrap().len()
);
let test_prune = |to_block: BlockNumber, expected_result: (PruneProgress, usize)| {
let prune_mode = PruneMode::Before(to_block);
let segment = TransactionLookup::new(prune_mode);
let mut limiter = PruneLimiter::default().set_deleted_entries_limit(10);
let input = PruneInput {
previous_checkpoint: db
.factory
.provider()
.unwrap()
.get_prune_checkpoint(PruneSegment::TransactionLookup)
.unwrap(),
to_block,
limiter: limiter.clone(),
};
let next_tx_number_to_prune = db
.factory
.provider()
.unwrap()
.get_prune_checkpoint(PruneSegment::TransactionLookup)
.unwrap()
.and_then(|checkpoint| checkpoint.tx_number)
.map(|tx_number| tx_number + 1)
.unwrap_or_default();
let last_pruned_tx_number = blocks
.iter()
.take(to_block as usize)
.map(|block| block.transaction_count())
.sum::<usize>()
.min(
next_tx_number_to_prune as usize +
input.limiter.deleted_entries_limit().unwrap(),
)
.sub(1);
let last_pruned_block_number = blocks
.iter()
.fold_while((0, 0), |(_, mut tx_count), block| {
tx_count += block.transaction_count();
if tx_count > last_pruned_tx_number {
Done((block.number, tx_count))
} else {
Continue((block.number, tx_count))
}
})
.into_inner()
.0;
let provider = db.factory.database_provider_rw().unwrap();
let result = segment.prune(&provider, input).unwrap();
limiter.increment_deleted_entries_count_by(result.pruned);
assert_matches!(
result,
SegmentOutput {progress, pruned, checkpoint: Some(_)}
if (progress, pruned) == expected_result
);
segment
.save_checkpoint(
&provider,
result.checkpoint.unwrap().as_prune_checkpoint(prune_mode),
)
.unwrap();
provider.commit().expect("commit");
let last_pruned_block_number = last_pruned_block_number
.checked_sub(if result.progress.is_finished() { 0 } else { 1 });
assert_eq!(
db.table::<tables::TransactionHashNumbers>().unwrap().len(),
tx_hash_numbers_len - (last_pruned_tx_number + 1)
);
assert_eq!(
db.factory
.provider()
.unwrap()
.get_prune_checkpoint(PruneSegment::TransactionLookup)
.unwrap(),
Some(PruneCheckpoint {
block_number: last_pruned_block_number,
tx_number: Some(last_pruned_tx_number as TxNumber),
prune_mode
})
);
};
test_prune(
6,
(PruneProgress::HasMoreData(PruneInterruptReason::DeletedEntriesLimitReached), 10),
);
test_prune(6, (PruneProgress::Finished, 2));
test_prune(10, (PruneProgress::Finished, 8));
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/prune/prune/src/segments/user/sender_recovery.rs | crates/prune/prune/src/segments/user/sender_recovery.rs | use crate::{
db_ext::DbTxPruneExt,
segments::{PruneInput, Segment},
PrunerError,
};
use reth_db_api::{tables, transaction::DbTxMut};
use reth_provider::{BlockReader, DBProvider, TransactionsProvider};
use reth_prune_types::{
PruneMode, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint,
};
use tracing::{instrument, trace};
#[derive(Debug)]
pub struct SenderRecovery {
mode: PruneMode,
}
impl SenderRecovery {
pub const fn new(mode: PruneMode) -> Self {
Self { mode }
}
}
impl<Provider> Segment<Provider> for SenderRecovery
where
Provider: DBProvider<Tx: DbTxMut> + TransactionsProvider + BlockReader,
{
fn segment(&self) -> PruneSegment {
PruneSegment::SenderRecovery
}
fn mode(&self) -> Option<PruneMode> {
Some(self.mode)
}
fn purpose(&self) -> PrunePurpose {
PrunePurpose::User
}
#[instrument(level = "trace", target = "pruner", skip(self, provider), ret)]
fn prune(&self, provider: &Provider, input: PruneInput) -> Result<SegmentOutput, PrunerError> {
let tx_range = match input.get_next_tx_num_range(provider)? {
Some(range) => range,
None => {
trace!(target: "pruner", "No transaction senders to prune");
return Ok(SegmentOutput::done())
}
};
let tx_range_end = *tx_range.end();
let mut limiter = input.limiter;
let mut last_pruned_transaction = tx_range_end;
let (pruned, done) =
provider.tx_ref().prune_table_with_range::<tables::TransactionSenders>(
tx_range,
&mut limiter,
|_| false,
|row| last_pruned_transaction = row.0,
)?;
trace!(target: "pruner", %pruned, %done, "Pruned transaction senders");
let last_pruned_block = provider
.transaction_block(last_pruned_transaction)?
.ok_or(PrunerError::InconsistentData("Block for transaction is not found"))?
// If there's more transaction senders to prune, set the checkpoint block number to
// previous, so we could finish pruning its transaction senders on the next run.
.checked_sub(if done { 0 } else { 1 });
let progress = limiter.progress(done);
Ok(SegmentOutput {
progress,
pruned,
checkpoint: Some(SegmentOutputCheckpoint {
block_number: last_pruned_block,
tx_number: Some(last_pruned_transaction),
}),
})
}
}
#[cfg(test)]
mod tests {
use crate::segments::{PruneInput, PruneLimiter, Segment, SegmentOutput, SenderRecovery};
use alloy_primitives::{BlockNumber, TxNumber, B256};
use assert_matches::assert_matches;
use itertools::{
FoldWhile::{Continue, Done},
Itertools,
};
use reth_db_api::tables;
use reth_primitives_traits::SignerRecoverable;
use reth_provider::{DatabaseProviderFactory, PruneCheckpointReader};
use reth_prune_types::{PruneCheckpoint, PruneMode, PruneProgress, PruneSegment};
use reth_stages::test_utils::{StorageKind, TestStageDB};
use reth_testing_utils::generators::{self, random_block_range, BlockRangeParams};
use std::ops::Sub;
#[test]
fn prune() {
let db = TestStageDB::default();
let mut rng = generators::rng();
let blocks = random_block_range(
&mut rng,
1..=10,
BlockRangeParams { parent: Some(B256::ZERO), tx_count: 2..3, ..Default::default() },
);
db.insert_blocks(blocks.iter(), StorageKind::Database(None)).expect("insert blocks");
let mut transaction_senders = Vec::new();
for block in &blocks {
transaction_senders.reserve_exact(block.transaction_count());
for transaction in &block.body().transactions {
transaction_senders.push((
transaction_senders.len() as u64,
transaction.recover_signer().expect("recover signer"),
));
}
}
let transaction_senders_len = transaction_senders.len();
db.insert_transaction_senders(transaction_senders).expect("insert transaction senders");
assert_eq!(
db.table::<tables::Transactions>().unwrap().len(),
blocks.iter().map(|block| block.transaction_count()).sum::<usize>()
);
assert_eq!(
db.table::<tables::Transactions>().unwrap().len(),
db.table::<tables::TransactionSenders>().unwrap().len()
);
let test_prune = |to_block: BlockNumber, expected_result: (PruneProgress, usize)| {
let prune_mode = PruneMode::Before(to_block);
let segment = SenderRecovery::new(prune_mode);
let mut limiter = PruneLimiter::default().set_deleted_entries_limit(10);
let input = PruneInput {
previous_checkpoint: db
.factory
.provider()
.unwrap()
.get_prune_checkpoint(PruneSegment::SenderRecovery)
.unwrap(),
to_block,
limiter: limiter.clone(),
};
let next_tx_number_to_prune = db
.factory
.provider()
.unwrap()
.get_prune_checkpoint(PruneSegment::SenderRecovery)
.unwrap()
.and_then(|checkpoint| checkpoint.tx_number)
.map(|tx_number| tx_number + 1)
.unwrap_or_default();
let last_pruned_tx_number = blocks
.iter()
.take(to_block as usize)
.map(|block| block.transaction_count())
.sum::<usize>()
.min(
next_tx_number_to_prune as usize +
input.limiter.deleted_entries_limit().unwrap(),
)
.sub(1);
let last_pruned_block_number = blocks
.iter()
.fold_while((0, 0), |(_, mut tx_count), block| {
tx_count += block.transaction_count();
if tx_count > last_pruned_tx_number {
Done((block.number, tx_count))
} else {
Continue((block.number, tx_count))
}
})
.into_inner()
.0;
let provider = db.factory.database_provider_rw().unwrap();
let result = segment.prune(&provider, input).unwrap();
limiter.increment_deleted_entries_count_by(result.pruned);
assert_matches!(
result,
SegmentOutput {progress, pruned, checkpoint: Some(_)}
if (progress, pruned) == expected_result
);
segment
.save_checkpoint(
&provider,
result.checkpoint.unwrap().as_prune_checkpoint(prune_mode),
)
.unwrap();
provider.commit().expect("commit");
let last_pruned_block_number = last_pruned_block_number
.checked_sub(if result.progress.is_finished() { 0 } else { 1 });
assert_eq!(
db.table::<tables::TransactionSenders>().unwrap().len(),
transaction_senders_len - (last_pruned_tx_number + 1)
);
assert_eq!(
db.factory
.provider()
.unwrap()
.get_prune_checkpoint(PruneSegment::SenderRecovery)
.unwrap(),
Some(PruneCheckpoint {
block_number: last_pruned_block_number,
tx_number: Some(last_pruned_tx_number as TxNumber),
prune_mode
})
);
};
test_prune(
6,
(
PruneProgress::HasMoreData(
reth_prune_types::PruneInterruptReason::DeletedEntriesLimitReached,
),
10,
),
);
test_prune(6, (PruneProgress::Finished, 2));
test_prune(10, (PruneProgress::Finished, 8));
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/prune/prune/src/segments/user/account_history.rs | crates/prune/prune/src/segments/user/account_history.rs | use crate::{
db_ext::DbTxPruneExt,
segments::{user::history::prune_history_indices, PruneInput, Segment},
PrunerError,
};
use itertools::Itertools;
use reth_db_api::{models::ShardedKey, tables, transaction::DbTxMut};
use reth_provider::DBProvider;
use reth_prune_types::{
PruneMode, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint,
};
use rustc_hash::FxHashMap;
use tracing::{instrument, trace};
/// Number of account history tables to prune in one step.
///
/// Account History consists of two tables: [`tables::AccountChangeSets`] and
/// [`tables::AccountsHistory`]. We want to prune them to the same block number.
const ACCOUNT_HISTORY_TABLES_TO_PRUNE: usize = 2;
#[derive(Debug)]
pub struct AccountHistory {
mode: PruneMode,
}
impl AccountHistory {
pub const fn new(mode: PruneMode) -> Self {
Self { mode }
}
}
impl<Provider> Segment<Provider> for AccountHistory
where
Provider: DBProvider<Tx: DbTxMut>,
{
fn segment(&self) -> PruneSegment {
PruneSegment::AccountHistory
}
fn mode(&self) -> Option<PruneMode> {
Some(self.mode)
}
fn purpose(&self) -> PrunePurpose {
PrunePurpose::User
}
#[instrument(level = "trace", target = "pruner", skip(self, provider), ret)]
fn prune(&self, provider: &Provider, input: PruneInput) -> Result<SegmentOutput, PrunerError> {
let range = match input.get_next_block_range() {
Some(range) => range,
None => {
trace!(target: "pruner", "No account history to prune");
return Ok(SegmentOutput::done())
}
};
let range_end = *range.end();
let mut limiter = if let Some(limit) = input.limiter.deleted_entries_limit() {
input.limiter.set_deleted_entries_limit(limit / ACCOUNT_HISTORY_TABLES_TO_PRUNE)
} else {
input.limiter
};
if limiter.is_limit_reached() {
return Ok(SegmentOutput::not_done(
limiter.interrupt_reason(),
input.previous_checkpoint.map(SegmentOutputCheckpoint::from_prune_checkpoint),
))
}
let mut last_changeset_pruned_block = None;
// Deleted account changeset keys (account addresses) with the highest block number deleted
// for that key.
//
// The size of this map it's limited by `prune_delete_limit * blocks_since_last_run /
// ACCOUNT_HISTORY_TABLES_TO_PRUNE`, and with current default it's usually `3500 * 5
// / 2`, so 8750 entries. Each entry is `160 bit + 256 bit + 64 bit`, so the total
// size should be up to 0.5MB + some hashmap overhead. `blocks_since_last_run` is
// additionally limited by the `max_reorg_depth`, so no OOM is expected here.
let mut highest_deleted_accounts = FxHashMap::default();
let (pruned_changesets, done) =
provider.tx_ref().prune_table_with_range::<tables::AccountChangeSets>(
range,
&mut limiter,
|_| false,
|(block_number, account)| {
highest_deleted_accounts.insert(account.address, block_number);
last_changeset_pruned_block = Some(block_number);
},
)?;
trace!(target: "pruner", pruned = %pruned_changesets, %done, "Pruned account history (changesets)");
let last_changeset_pruned_block = last_changeset_pruned_block
// If there's more account changesets to prune, set the checkpoint block number to
// previous, so we could finish pruning its account changesets on the next run.
.map(|block_number| if done { block_number } else { block_number.saturating_sub(1) })
.unwrap_or(range_end);
// Sort highest deleted block numbers by account address and turn them into sharded keys.
// We did not use `BTreeMap` from the beginning, because it's inefficient for hashes.
let highest_sharded_keys = highest_deleted_accounts
.into_iter()
.sorted_unstable() // Unstable is fine because no equal keys exist in the map
.map(|(address, block_number)| {
ShardedKey::new(address, block_number.min(last_changeset_pruned_block))
});
let outcomes = prune_history_indices::<Provider, tables::AccountsHistory, _>(
provider,
highest_sharded_keys,
|a, b| a.key == b.key,
)?;
trace!(target: "pruner", ?outcomes, %done, "Pruned account history (indices)");
let progress = limiter.progress(done);
Ok(SegmentOutput {
progress,
pruned: pruned_changesets + outcomes.deleted,
checkpoint: Some(SegmentOutputCheckpoint {
block_number: Some(last_changeset_pruned_block),
tx_number: None,
}),
})
}
}
#[cfg(test)]
mod tests {
use crate::segments::{
user::account_history::ACCOUNT_HISTORY_TABLES_TO_PRUNE, AccountHistory, PruneInput,
PruneLimiter, Segment, SegmentOutput,
};
use alloy_primitives::{BlockNumber, B256};
use assert_matches::assert_matches;
use reth_db_api::{tables, BlockNumberList};
use reth_provider::{DatabaseProviderFactory, PruneCheckpointReader};
use reth_prune_types::{
PruneCheckpoint, PruneInterruptReason, PruneMode, PruneProgress, PruneSegment,
};
use reth_stages::test_utils::{StorageKind, TestStageDB};
use reth_testing_utils::generators::{
self, random_block_range, random_changeset_range, random_eoa_accounts, BlockRangeParams,
};
use std::{collections::BTreeMap, ops::AddAssign};
#[test]
fn prune() {
let db = TestStageDB::default();
let mut rng = generators::rng();
let blocks = random_block_range(
&mut rng,
1..=5000,
BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..1, ..Default::default() },
);
db.insert_blocks(blocks.iter(), StorageKind::Database(None)).expect("insert blocks");
let accounts = random_eoa_accounts(&mut rng, 2).into_iter().collect::<BTreeMap<_, _>>();
let (changesets, _) = random_changeset_range(
&mut rng,
blocks.iter(),
accounts.into_iter().map(|(addr, acc)| (addr, (acc, Vec::new()))),
0..0,
0..0,
);
db.insert_changesets(changesets.clone(), None).expect("insert changesets");
db.insert_history(changesets.clone(), None).expect("insert history");
let account_occurrences = db.table::<tables::AccountsHistory>().unwrap().into_iter().fold(
BTreeMap::<_, usize>::new(),
|mut map, (key, _)| {
map.entry(key.key).or_default().add_assign(1);
map
},
);
assert!(account_occurrences.into_iter().any(|(_, occurrences)| occurrences > 1));
assert_eq!(
db.table::<tables::AccountChangeSets>().unwrap().len(),
changesets.iter().flatten().count()
);
let original_shards = db.table::<tables::AccountsHistory>().unwrap();
let test_prune =
|to_block: BlockNumber, run: usize, expected_result: (PruneProgress, usize)| {
let prune_mode = PruneMode::Before(to_block);
let deleted_entries_limit = 2000;
let mut limiter =
PruneLimiter::default().set_deleted_entries_limit(deleted_entries_limit);
let input = PruneInput {
previous_checkpoint: db
.factory
.provider()
.unwrap()
.get_prune_checkpoint(PruneSegment::AccountHistory)
.unwrap(),
to_block,
limiter: limiter.clone(),
};
let segment = AccountHistory::new(prune_mode);
let provider = db.factory.database_provider_rw().unwrap();
let result = segment.prune(&provider, input).unwrap();
limiter.increment_deleted_entries_count_by(result.pruned);
assert_matches!(
result,
SegmentOutput {progress, pruned, checkpoint: Some(_)}
if (progress, pruned) == expected_result
);
segment
.save_checkpoint(
&provider,
result.checkpoint.unwrap().as_prune_checkpoint(prune_mode),
)
.unwrap();
provider.commit().expect("commit");
let changesets = changesets
.iter()
.enumerate()
.flat_map(|(block_number, changeset)| {
changeset.iter().map(move |change| (block_number, change))
})
.collect::<Vec<_>>();
#[expect(clippy::skip_while_next)]
let pruned = changesets
.iter()
.enumerate()
.skip_while(|(i, (block_number, _))| {
*i < deleted_entries_limit / ACCOUNT_HISTORY_TABLES_TO_PRUNE * run &&
*block_number <= to_block as usize
})
.next()
.map(|(i, _)| i)
.unwrap_or_default();
let mut pruned_changesets = changesets
.iter()
// Skip what we've pruned so far, subtracting one to get last pruned block
// number further down
.skip(pruned.saturating_sub(1));
let last_pruned_block_number = pruned_changesets
.next()
.map(|(block_number, _)| if result.progress.is_finished() {
*block_number
} else {
block_number.saturating_sub(1)
} as BlockNumber)
.unwrap_or(to_block);
let pruned_changesets = pruned_changesets.fold(
BTreeMap::<_, Vec<_>>::new(),
|mut acc, (block_number, change)| {
acc.entry(block_number).or_default().push(change);
acc
},
);
assert_eq!(
db.table::<tables::AccountChangeSets>().unwrap().len(),
pruned_changesets.values().flatten().count()
);
let actual_shards = db.table::<tables::AccountsHistory>().unwrap();
let expected_shards = original_shards
.iter()
.filter(|(key, _)| key.highest_block_number > last_pruned_block_number)
.map(|(key, blocks)| {
let new_blocks =
blocks.iter().skip_while(|block| *block <= last_pruned_block_number);
(key.clone(), BlockNumberList::new_pre_sorted(new_blocks))
})
.collect::<Vec<_>>();
assert_eq!(actual_shards, expected_shards);
assert_eq!(
db.factory
.provider()
.unwrap()
.get_prune_checkpoint(PruneSegment::AccountHistory)
.unwrap(),
Some(PruneCheckpoint {
block_number: Some(last_pruned_block_number),
tx_number: None,
prune_mode
})
);
};
test_prune(
998,
1,
(PruneProgress::HasMoreData(PruneInterruptReason::DeletedEntriesLimitReached), 1000),
);
test_prune(998, 2, (PruneProgress::Finished, 998));
test_prune(1400, 3, (PruneProgress::Finished, 804));
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/prune/prune/src/segments/user/history.rs | crates/prune/prune/src/segments/user/history.rs | use alloy_primitives::BlockNumber;
use reth_db_api::{
cursor::{DbCursorRO, DbCursorRW},
models::ShardedKey,
table::Table,
transaction::DbTxMut,
BlockNumberList, DatabaseError, RawKey, RawTable, RawValue,
};
use reth_provider::DBProvider;
enum PruneShardOutcome {
Deleted,
Updated,
Unchanged,
}
#[derive(Debug, Default)]
pub(crate) struct PrunedIndices {
pub(crate) deleted: usize,
pub(crate) updated: usize,
pub(crate) unchanged: usize,
}
/// Prune history indices according to the provided list of highest sharded keys.
///
/// Returns total number of deleted, updated and unchanged entities.
pub(crate) fn prune_history_indices<Provider, T, SK>(
provider: &Provider,
highest_sharded_keys: impl IntoIterator<Item = T::Key>,
key_matches: impl Fn(&T::Key, &T::Key) -> bool,
) -> Result<PrunedIndices, DatabaseError>
where
Provider: DBProvider<Tx: DbTxMut>,
T: Table<Value = BlockNumberList>,
T::Key: AsRef<ShardedKey<SK>>,
{
let mut outcomes = PrunedIndices::default();
let mut cursor = provider.tx_ref().cursor_write::<RawTable<T>>()?;
for sharded_key in highest_sharded_keys {
// Seek to the shard that has the key >= the given sharded key
// TODO: optimize
let mut shard = cursor.seek(RawKey::new(sharded_key.clone()))?;
// Get the highest block number that needs to be deleted for this sharded key
let to_block = sharded_key.as_ref().highest_block_number;
'shard: loop {
let Some((key, block_nums)) =
shard.map(|(k, v)| Result::<_, DatabaseError>::Ok((k.key()?, v))).transpose()?
else {
break
};
if key_matches(&key, &sharded_key) {
match prune_shard(&mut cursor, key, block_nums, to_block, &key_matches)? {
PruneShardOutcome::Deleted => outcomes.deleted += 1,
PruneShardOutcome::Updated => outcomes.updated += 1,
PruneShardOutcome::Unchanged => outcomes.unchanged += 1,
}
} else {
// If such shard doesn't exist, skip to the next sharded key
break 'shard
}
shard = cursor.next()?;
}
}
Ok(outcomes)
}
/// Prunes one shard of a history table.
///
/// 1. If the shard has `highest_block_number` less than or equal to the target block number for
/// pruning, delete the shard completely.
/// 2. If the shard has `highest_block_number` greater than the target block number for pruning,
/// filter block numbers inside the shard which are less than the target block number for
/// pruning.
fn prune_shard<C, T, SK>(
cursor: &mut C,
key: T::Key,
raw_blocks: RawValue<T::Value>,
to_block: BlockNumber,
key_matches: impl Fn(&T::Key, &T::Key) -> bool,
) -> Result<PruneShardOutcome, DatabaseError>
where
C: DbCursorRO<RawTable<T>> + DbCursorRW<RawTable<T>>,
T: Table<Value = BlockNumberList>,
T::Key: AsRef<ShardedKey<SK>>,
{
// If shard consists only of block numbers less than the target one, delete shard
// completely.
if key.as_ref().highest_block_number <= to_block {
cursor.delete_current()?;
Ok(PruneShardOutcome::Deleted)
}
// Shard contains block numbers that are higher than the target one, so we need to
// filter it. It is guaranteed that further shards for this sharded key will not
// contain the target block number, as it's in this shard.
else {
let blocks = raw_blocks.value()?;
let higher_blocks =
blocks.iter().skip_while(|block| *block <= to_block).collect::<Vec<_>>();
// If there were blocks less than or equal to the target one
// (so the shard has changed), update the shard.
if blocks.len() as usize == higher_blocks.len() {
return Ok(PruneShardOutcome::Unchanged);
}
// If there will be no more blocks in the shard after pruning blocks below target
// block, we need to remove it, as empty shards are not allowed.
if higher_blocks.is_empty() {
if key.as_ref().highest_block_number == u64::MAX {
let prev_row = cursor
.prev()?
.map(|(k, v)| Result::<_, DatabaseError>::Ok((k.key()?, v)))
.transpose()?;
match prev_row {
// If current shard is the last shard for the sharded key that
// has previous shards, replace it with the previous shard.
Some((prev_key, prev_value)) if key_matches(&prev_key, &key) => {
cursor.delete_current()?;
// Upsert will replace the last shard for this sharded key with
// the previous value.
cursor.upsert(RawKey::new(key), &prev_value)?;
Ok(PruneShardOutcome::Updated)
}
// If there's no previous shard for this sharded key,
// just delete last shard completely.
_ => {
// If we successfully moved the cursor to a previous row,
// jump to the original last shard.
if prev_row.is_some() {
cursor.next()?;
}
// Delete shard.
cursor.delete_current()?;
Ok(PruneShardOutcome::Deleted)
}
}
}
// If current shard is not the last shard for this sharded key,
// just delete it.
else {
cursor.delete_current()?;
Ok(PruneShardOutcome::Deleted)
}
} else {
cursor.upsert(
RawKey::new(key),
&RawValue::new(BlockNumberList::new_pre_sorted(higher_blocks)),
)?;
Ok(PruneShardOutcome::Updated)
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/prune/prune/src/segments/user/storage_history.rs | crates/prune/prune/src/segments/user/storage_history.rs | use crate::{
db_ext::DbTxPruneExt,
segments::{user::history::prune_history_indices, PruneInput, Segment, SegmentOutput},
PrunerError,
};
use itertools::Itertools;
use reth_db_api::{
models::{storage_sharded_key::StorageShardedKey, BlockNumberAddress},
tables,
transaction::DbTxMut,
};
use reth_provider::DBProvider;
use reth_prune_types::{PruneMode, PrunePurpose, PruneSegment, SegmentOutputCheckpoint};
use rustc_hash::FxHashMap;
use tracing::{instrument, trace};
/// Number of storage history tables to prune in one step
///
/// Storage History consists of two tables: [`tables::StorageChangeSets`] and
/// [`tables::StoragesHistory`]. We want to prune them to the same block number.
const STORAGE_HISTORY_TABLES_TO_PRUNE: usize = 2;
#[derive(Debug)]
pub struct StorageHistory {
mode: PruneMode,
}
impl StorageHistory {
pub const fn new(mode: PruneMode) -> Self {
Self { mode }
}
}
impl<Provider> Segment<Provider> for StorageHistory
where
Provider: DBProvider<Tx: DbTxMut>,
{
fn segment(&self) -> PruneSegment {
PruneSegment::StorageHistory
}
fn mode(&self) -> Option<PruneMode> {
Some(self.mode)
}
fn purpose(&self) -> PrunePurpose {
PrunePurpose::User
}
#[instrument(level = "trace", target = "pruner", skip(self, provider), ret)]
fn prune(&self, provider: &Provider, input: PruneInput) -> Result<SegmentOutput, PrunerError> {
let range = match input.get_next_block_range() {
Some(range) => range,
None => {
trace!(target: "pruner", "No storage history to prune");
return Ok(SegmentOutput::done())
}
};
let range_end = *range.end();
let mut limiter = if let Some(limit) = input.limiter.deleted_entries_limit() {
input.limiter.set_deleted_entries_limit(limit / STORAGE_HISTORY_TABLES_TO_PRUNE)
} else {
input.limiter
};
if limiter.is_limit_reached() {
return Ok(SegmentOutput::not_done(
limiter.interrupt_reason(),
input.previous_checkpoint.map(SegmentOutputCheckpoint::from_prune_checkpoint),
))
}
let mut last_changeset_pruned_block = None;
// Deleted storage changeset keys (account addresses and storage slots) with the highest
// block number deleted for that key.
//
// The size of this map it's limited by `prune_delete_limit * blocks_since_last_run /
// ACCOUNT_HISTORY_TABLES_TO_PRUNE`, and with current default it's usually `3500 * 5
// / 2`, so 8750 entries. Each entry is `160 bit + 256 bit + 64 bit`, so the total
// size should be up to 0.5MB + some hashmap overhead. `blocks_since_last_run` is
// additionally limited by the `max_reorg_depth`, so no OOM is expected here.
let mut highest_deleted_storages = FxHashMap::default();
let (pruned_changesets, done) =
provider.tx_ref().prune_table_with_range::<tables::StorageChangeSets>(
BlockNumberAddress::range(range),
&mut limiter,
|_| false,
|(BlockNumberAddress((block_number, address)), entry)| {
highest_deleted_storages.insert((address, entry.key), block_number);
last_changeset_pruned_block = Some(block_number);
},
)?;
trace!(target: "pruner", deleted = %pruned_changesets, %done, "Pruned storage history (changesets)");
let last_changeset_pruned_block = last_changeset_pruned_block
// If there's more storage changesets to prune, set the checkpoint block number to
// previous, so we could finish pruning its storage changesets on the next run.
.map(|block_number| if done { block_number } else { block_number.saturating_sub(1) })
.unwrap_or(range_end);
// Sort highest deleted block numbers by account address and storage key and turn them into
// sharded keys.
// We did not use `BTreeMap` from the beginning, because it's inefficient for hashes.
let highest_sharded_keys = highest_deleted_storages
.into_iter()
.sorted_unstable() // Unstable is fine because no equal keys exist in the map
.map(|((address, storage_key), block_number)| {
StorageShardedKey::new(
address,
storage_key,
block_number.min(last_changeset_pruned_block),
)
});
let outcomes = prune_history_indices::<Provider, tables::StoragesHistory, _>(
provider,
highest_sharded_keys,
|a, b| a.address == b.address && a.sharded_key.key == b.sharded_key.key,
)?;
trace!(target: "pruner", ?outcomes, %done, "Pruned storage history (indices)");
let progress = limiter.progress(done);
Ok(SegmentOutput {
progress,
pruned: pruned_changesets + outcomes.deleted,
checkpoint: Some(SegmentOutputCheckpoint {
block_number: Some(last_changeset_pruned_block),
tx_number: None,
}),
})
}
}
#[cfg(test)]
mod tests {
use crate::segments::{
user::storage_history::STORAGE_HISTORY_TABLES_TO_PRUNE, PruneInput, PruneLimiter, Segment,
SegmentOutput, StorageHistory,
};
use alloy_primitives::{BlockNumber, B256};
use assert_matches::assert_matches;
use reth_db_api::{tables, BlockNumberList};
use reth_provider::{DatabaseProviderFactory, PruneCheckpointReader};
use reth_prune_types::{PruneCheckpoint, PruneMode, PruneProgress, PruneSegment};
use reth_stages::test_utils::{StorageKind, TestStageDB};
use reth_testing_utils::generators::{
self, random_block_range, random_changeset_range, random_eoa_accounts, BlockRangeParams,
};
use std::{collections::BTreeMap, ops::AddAssign};
#[test]
fn prune() {
let db = TestStageDB::default();
let mut rng = generators::rng();
let blocks = random_block_range(
&mut rng,
0..=5000,
BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..1, ..Default::default() },
);
db.insert_blocks(blocks.iter(), StorageKind::Database(None)).expect("insert blocks");
let accounts = random_eoa_accounts(&mut rng, 2).into_iter().collect::<BTreeMap<_, _>>();
let (changesets, _) = random_changeset_range(
&mut rng,
blocks.iter(),
accounts.into_iter().map(|(addr, acc)| (addr, (acc, Vec::new()))),
1..2,
1..2,
);
db.insert_changesets(changesets.clone(), None).expect("insert changesets");
db.insert_history(changesets.clone(), None).expect("insert history");
let storage_occurrences = db.table::<tables::StoragesHistory>().unwrap().into_iter().fold(
BTreeMap::<_, usize>::new(),
|mut map, (key, _)| {
map.entry((key.address, key.sharded_key.key)).or_default().add_assign(1);
map
},
);
assert!(storage_occurrences.into_iter().any(|(_, occurrences)| occurrences > 1));
assert_eq!(
db.table::<tables::StorageChangeSets>().unwrap().len(),
changesets.iter().flatten().flat_map(|(_, _, entries)| entries).count()
);
let original_shards = db.table::<tables::StoragesHistory>().unwrap();
let test_prune = |to_block: BlockNumber,
run: usize,
expected_result: (PruneProgress, usize)| {
let prune_mode = PruneMode::Before(to_block);
let deleted_entries_limit = 1000;
let mut limiter =
PruneLimiter::default().set_deleted_entries_limit(deleted_entries_limit);
let input = PruneInput {
previous_checkpoint: db
.factory
.provider()
.unwrap()
.get_prune_checkpoint(PruneSegment::StorageHistory)
.unwrap(),
to_block,
limiter: limiter.clone(),
};
let segment = StorageHistory::new(prune_mode);
let provider = db.factory.database_provider_rw().unwrap();
let result = segment.prune(&provider, input).unwrap();
limiter.increment_deleted_entries_count_by(result.pruned);
assert_matches!(
result,
SegmentOutput {progress, pruned, checkpoint: Some(_)}
if (progress, pruned) == expected_result
);
segment
.save_checkpoint(
&provider,
result.checkpoint.unwrap().as_prune_checkpoint(prune_mode),
)
.unwrap();
provider.commit().expect("commit");
let changesets = changesets
.iter()
.enumerate()
.flat_map(|(block_number, changeset)| {
changeset.iter().flat_map(move |(address, _, entries)| {
entries.iter().map(move |entry| (block_number, address, entry))
})
})
.collect::<Vec<_>>();
#[expect(clippy::skip_while_next)]
let pruned = changesets
.iter()
.enumerate()
.skip_while(|(i, (block_number, _, _))| {
*i < deleted_entries_limit / STORAGE_HISTORY_TABLES_TO_PRUNE * run &&
*block_number <= to_block as usize
})
.next()
.map(|(i, _)| i)
.unwrap_or_default();
let mut pruned_changesets = changesets
.iter()
// Skip what we've pruned so far, subtracting one to get last pruned block number
// further down
.skip(pruned.saturating_sub(1));
let last_pruned_block_number = pruned_changesets
.next()
.map(|(block_number, _, _)| if result.progress.is_finished() {
*block_number
} else {
block_number.saturating_sub(1)
} as BlockNumber)
.unwrap_or(to_block);
let pruned_changesets = pruned_changesets.fold(
BTreeMap::<_, Vec<_>>::new(),
|mut acc, (block_number, address, entry)| {
acc.entry((block_number, address)).or_default().push(entry);
acc
},
);
assert_eq!(
db.table::<tables::StorageChangeSets>().unwrap().len(),
pruned_changesets.values().flatten().count()
);
let actual_shards = db.table::<tables::StoragesHistory>().unwrap();
let expected_shards = original_shards
.iter()
.filter(|(key, _)| key.sharded_key.highest_block_number > last_pruned_block_number)
.map(|(key, blocks)| {
let new_blocks =
blocks.iter().skip_while(|block| *block <= last_pruned_block_number);
(key.clone(), BlockNumberList::new_pre_sorted(new_blocks))
})
.collect::<Vec<_>>();
assert_eq!(actual_shards, expected_shards);
assert_eq!(
db.factory
.provider()
.unwrap()
.get_prune_checkpoint(PruneSegment::StorageHistory)
.unwrap(),
Some(PruneCheckpoint {
block_number: Some(last_pruned_block_number),
tx_number: None,
prune_mode
})
);
};
test_prune(
998,
1,
(
PruneProgress::HasMoreData(
reth_prune_types::PruneInterruptReason::DeletedEntriesLimitReached,
),
500,
),
);
test_prune(998, 2, (PruneProgress::Finished, 499));
test_prune(1200, 3, (PruneProgress::Finished, 202));
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/prune/prune/src/segments/user/receipts_by_logs.rs | crates/prune/prune/src/segments/user/receipts_by_logs.rs | use crate::{
db_ext::DbTxPruneExt,
segments::{PruneInput, Segment},
PrunerError,
};
use alloy_consensus::TxReceipt;
use reth_db_api::{table::Value, tables, transaction::DbTxMut};
use reth_primitives_traits::NodePrimitives;
use reth_provider::{
BlockReader, DBProvider, NodePrimitivesProvider, PruneCheckpointWriter, TransactionsProvider,
};
use reth_prune_types::{
PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, ReceiptsLogPruneConfig, SegmentOutput,
MINIMUM_PRUNING_DISTANCE,
};
use tracing::{instrument, trace};
#[derive(Debug)]
pub struct ReceiptsByLogs {
config: ReceiptsLogPruneConfig,
}
impl ReceiptsByLogs {
pub const fn new(config: ReceiptsLogPruneConfig) -> Self {
Self { config }
}
}
impl<Provider> Segment<Provider> for ReceiptsByLogs
where
Provider: DBProvider<Tx: DbTxMut>
+ PruneCheckpointWriter
+ TransactionsProvider
+ BlockReader
+ NodePrimitivesProvider<Primitives: NodePrimitives<Receipt: Value>>,
{
fn segment(&self) -> PruneSegment {
PruneSegment::ContractLogs
}
fn mode(&self) -> Option<PruneMode> {
None
}
fn purpose(&self) -> PrunePurpose {
PrunePurpose::User
}
#[instrument(level = "trace", target = "pruner", skip(self, provider), ret)]
fn prune(&self, provider: &Provider, input: PruneInput) -> Result<SegmentOutput, PrunerError> {
// Contract log filtering removes every receipt possible except the ones in the list. So,
// for the other receipts it's as if they had a `PruneMode::Distance()` of
// `MINIMUM_PRUNING_DISTANCE`.
let to_block = PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)
.prune_target_block(input.to_block, PruneSegment::ContractLogs, PrunePurpose::User)?
.map(|(bn, _)| bn)
.unwrap_or_default();
// Get status checkpoint from latest run
let mut last_pruned_block =
input.previous_checkpoint.and_then(|checkpoint| checkpoint.block_number);
let initial_last_pruned_block = last_pruned_block;
let mut from_tx_number = match initial_last_pruned_block {
Some(block) => provider
.block_body_indices(block)?
.map(|block| block.last_tx_num() + 1)
.unwrap_or(0),
None => 0,
};
// Figure out what receipts have already been pruned, so we can have an accurate
// `address_filter`
let address_filter = self.config.group_by_block(input.to_block, last_pruned_block)?;
// Splits all transactions in different block ranges. Each block range will have its own
// filter address list and will check it while going through the table
//
// Example:
// For an `address_filter` such as:
// { block9: [a1, a2], block20: [a3, a4, a5] }
//
// The following structures will be created in the exact order as showed:
// `block_ranges`: [
// (block0, block8, 0 addresses),
// (block9, block19, 2 addresses),
// (block20, to_block, 5 addresses)
// ]
// `filtered_addresses`: [a1, a2, a3, a4, a5]
//
// The first range will delete all receipts between block0 - block8
// The second range will delete all receipts between block9 - 19, except the ones with
// emitter logs from these addresses: [a1, a2].
// The third range will delete all receipts between block20 - to_block, except the ones with
// emitter logs from these addresses: [a1, a2, a3, a4, a5]
let mut block_ranges = vec![];
let mut blocks_iter = address_filter.iter().peekable();
let mut filtered_addresses = vec![];
while let Some((start_block, addresses)) = blocks_iter.next() {
filtered_addresses.extend_from_slice(addresses);
// This will clear all receipts before the first appearance of a contract log or since
// the block after the last pruned one.
if block_ranges.is_empty() {
let init = last_pruned_block.map(|b| b + 1).unwrap_or_default();
if init < *start_block {
block_ranges.push((init, *start_block - 1, 0));
}
}
let end_block =
blocks_iter.peek().map(|(next_block, _)| *next_block - 1).unwrap_or(to_block);
// Addresses in lower block ranges, are still included in the inclusion list for future
// ranges.
block_ranges.push((*start_block, end_block, filtered_addresses.len()));
}
trace!(
target: "pruner",
?block_ranges,
?filtered_addresses,
"Calculated block ranges and filtered addresses",
);
let mut limiter = input.limiter;
let mut done = true;
let mut pruned = 0;
let mut last_pruned_transaction = None;
for (start_block, end_block, num_addresses) in block_ranges {
let block_range = start_block..=end_block;
// Calculate the transaction range from this block range
let tx_range_end = match provider.block_body_indices(end_block)? {
Some(body) => body.last_tx_num(),
None => {
trace!(
target: "pruner",
?block_range,
"No receipts to prune."
);
continue
}
};
let tx_range = from_tx_number..=tx_range_end;
// Delete receipts, except the ones in the inclusion list
let mut last_skipped_transaction = 0;
let deleted;
(deleted, done) = provider.tx_ref().prune_table_with_range::<tables::Receipts<
<Provider::Primitives as NodePrimitives>::Receipt,
>>(
tx_range,
&mut limiter,
|(tx_num, receipt)| {
let skip = num_addresses > 0 &&
receipt.logs().iter().any(|log| {
filtered_addresses[..num_addresses].contains(&&log.address)
});
if skip {
last_skipped_transaction = *tx_num;
}
skip
},
|row| last_pruned_transaction = Some(row.0),
)?;
trace!(target: "pruner", %deleted, %done, ?block_range, "Pruned receipts");
pruned += deleted;
// For accurate checkpoints we need to know that we have checked every transaction.
// Example: we reached the end of the range, and the last receipt is supposed to skip
// its deletion.
let last_pruned_transaction = *last_pruned_transaction
.insert(last_pruned_transaction.unwrap_or_default().max(last_skipped_transaction));
last_pruned_block = Some(
provider
.transaction_block(last_pruned_transaction)?
.ok_or(PrunerError::InconsistentData("Block for transaction is not found"))?
// If there's more receipts to prune, set the checkpoint block number to
// previous, so we could finish pruning its receipts on the
// next run.
.saturating_sub(if done { 0 } else { 1 }),
);
if limiter.is_limit_reached() {
done &= end_block == to_block;
break
}
from_tx_number = last_pruned_transaction + 1;
}
// If there are contracts using `PruneMode::Distance(_)` there will be receipts before
// `to_block` that become eligible to be pruned in future runs. Therefore, our checkpoint is
// not actually `to_block`, but the `lowest_block_with_distance` from any contract.
// This ensures that in future pruner runs we can prune all these receipts between the
// previous `lowest_block_with_distance` and the new one using
// `get_next_tx_num_range_from_checkpoint`.
//
// Only applies if we were able to prune everything intended for this run, otherwise the
// checkpoint is the `last_pruned_block`.
let prune_mode_block = self
.config
.lowest_block_with_distance(input.to_block, initial_last_pruned_block)?
.unwrap_or(to_block);
provider.save_prune_checkpoint(
PruneSegment::ContractLogs,
PruneCheckpoint {
block_number: Some(prune_mode_block.min(last_pruned_block.unwrap_or(u64::MAX))),
tx_number: last_pruned_transaction,
prune_mode: PruneMode::Before(prune_mode_block),
},
)?;
let progress = limiter.progress(done);
Ok(SegmentOutput { progress, pruned, checkpoint: None })
}
}
#[cfg(test)]
mod tests {
use crate::segments::{PruneInput, PruneLimiter, ReceiptsByLogs, Segment};
use alloy_primitives::B256;
use assert_matches::assert_matches;
use reth_db_api::{cursor::DbCursorRO, tables, transaction::DbTx};
use reth_primitives_traits::InMemorySize;
use reth_provider::{DatabaseProviderFactory, PruneCheckpointReader, TransactionsProvider};
use reth_prune_types::{PruneMode, PruneSegment, ReceiptsLogPruneConfig};
use reth_stages::test_utils::{StorageKind, TestStageDB};
use reth_testing_utils::generators::{
self, random_block_range, random_eoa_account, random_log, random_receipt, BlockRangeParams,
};
use std::collections::BTreeMap;
#[test]
fn prune_receipts_by_logs() {
reth_tracing::init_test_tracing();
let db = TestStageDB::default();
let mut rng = generators::rng();
let tip = 20000;
let blocks = [
random_block_range(
&mut rng,
0..=100,
BlockRangeParams { parent: Some(B256::ZERO), tx_count: 1..5, ..Default::default() },
),
random_block_range(
&mut rng,
(100 + 1)..=(tip - 100),
BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..1, ..Default::default() },
),
random_block_range(
&mut rng,
(tip - 100 + 1)..=tip,
BlockRangeParams { parent: Some(B256::ZERO), tx_count: 1..5, ..Default::default() },
),
]
.concat();
db.insert_blocks(blocks.iter(), StorageKind::Database(None)).expect("insert blocks");
let mut receipts = Vec::new();
let (deposit_contract_addr, _) = random_eoa_account(&mut rng);
for block in &blocks {
receipts.reserve_exact(block.body().size());
for (txi, transaction) in block.body().transactions.iter().enumerate() {
let mut receipt = random_receipt(&mut rng, transaction, Some(1), None);
receipt.logs.push(random_log(
&mut rng,
(txi == (block.transaction_count() - 1)).then_some(deposit_contract_addr),
Some(1),
));
receipts.push((receipts.len() as u64, receipt));
}
}
db.insert_receipts(receipts).expect("insert receipts");
assert_eq!(
db.table::<tables::Transactions>().unwrap().len(),
blocks.iter().map(|block| block.transaction_count()).sum::<usize>()
);
assert_eq!(
db.table::<tables::Transactions>().unwrap().len(),
db.table::<tables::Receipts>().unwrap().len()
);
let run_prune = || {
let provider = db.factory.database_provider_rw().unwrap();
let prune_before_block: usize = 20;
let prune_mode = PruneMode::Before(prune_before_block as u64);
let receipts_log_filter =
ReceiptsLogPruneConfig(BTreeMap::from([(deposit_contract_addr, prune_mode)]));
let limiter = PruneLimiter::default().set_deleted_entries_limit(10);
let result = ReceiptsByLogs::new(receipts_log_filter).prune(
&provider,
PruneInput {
previous_checkpoint: db
.factory
.provider()
.unwrap()
.get_prune_checkpoint(PruneSegment::ContractLogs)
.unwrap(),
to_block: tip,
limiter,
},
);
provider.commit().expect("commit");
assert_matches!(result, Ok(_));
let output = result.unwrap();
let (pruned_block, pruned_tx) = db
.factory
.provider()
.unwrap()
.get_prune_checkpoint(PruneSegment::ContractLogs)
.unwrap()
.map(|checkpoint| (checkpoint.block_number.unwrap(), checkpoint.tx_number.unwrap()))
.unwrap_or_default();
// All receipts are in the end of the block
let unprunable = pruned_block.saturating_sub(prune_before_block as u64 - 1);
assert_eq!(
db.table::<tables::Receipts>().unwrap().len(),
blocks.iter().map(|block| block.transaction_count()).sum::<usize>() -
((pruned_tx + 1) - unprunable) as usize
);
output.progress.is_finished()
};
while !run_prune() {}
let provider = db.factory.provider().unwrap();
let mut cursor = provider.tx_ref().cursor_read::<tables::Receipts>().unwrap();
let walker = cursor.walk(None).unwrap();
for receipt in walker {
let (tx_num, receipt) = receipt.unwrap();
// Either we only find our contract, or the receipt is part of the unprunable receipts
// set by tip - 128
assert!(
receipt.logs.iter().any(|l| l.address == deposit_contract_addr) ||
provider.transaction_block(tx_num).unwrap().unwrap() > tip - 128,
);
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/prune/prune/src/segments/user/mod.rs | crates/prune/prune/src/segments/user/mod.rs | mod account_history;
mod history;
mod receipts;
mod receipts_by_logs;
mod sender_recovery;
mod storage_history;
mod transaction_lookup;
pub use account_history::AccountHistory;
pub use receipts::Receipts;
pub use receipts_by_logs::ReceiptsByLogs;
pub use sender_recovery::SenderRecovery;
pub use storage_history::StorageHistory;
pub use transaction_lookup::TransactionLookup;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/prune/prune/src/segments/user/receipts.rs | crates/prune/prune/src/segments/user/receipts.rs | use crate::{
segments::{PruneInput, Segment},
PrunerError,
};
use reth_db_api::{table::Value, transaction::DbTxMut};
use reth_primitives_traits::NodePrimitives;
use reth_provider::{
errors::provider::ProviderResult, BlockReader, DBProvider, NodePrimitivesProvider,
PruneCheckpointWriter, TransactionsProvider,
};
use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, SegmentOutput};
use tracing::instrument;
#[derive(Debug)]
pub struct Receipts {
mode: PruneMode,
}
impl Receipts {
pub const fn new(mode: PruneMode) -> Self {
Self { mode }
}
}
impl<Provider> Segment<Provider> for Receipts
where
Provider: DBProvider<Tx: DbTxMut>
+ PruneCheckpointWriter
+ TransactionsProvider
+ BlockReader
+ NodePrimitivesProvider<Primitives: NodePrimitives<Receipt: Value>>,
{
fn segment(&self) -> PruneSegment {
PruneSegment::Receipts
}
fn mode(&self) -> Option<PruneMode> {
Some(self.mode)
}
fn purpose(&self) -> PrunePurpose {
PrunePurpose::User
}
#[instrument(level = "trace", target = "pruner", skip(self, provider), ret)]
fn prune(&self, provider: &Provider, input: PruneInput) -> Result<SegmentOutput, PrunerError> {
crate::segments::receipts::prune(provider, input)
}
fn save_checkpoint(
&self,
provider: &Provider,
checkpoint: PruneCheckpoint,
) -> ProviderResult<()> {
crate::segments::receipts::save_checkpoint(provider, checkpoint)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/prune/prune/src/segments/static_file/headers.rs | crates/prune/prune/src/segments/static_file/headers.rs | use crate::{
db_ext::DbTxPruneExt,
segments::{PruneInput, Segment},
PruneLimiter, PrunerError,
};
use alloy_primitives::BlockNumber;
use itertools::Itertools;
use reth_db_api::{
cursor::{DbCursorRO, RangeWalker},
table::Value,
tables,
transaction::DbTxMut,
};
use reth_primitives_traits::NodePrimitives;
use reth_provider::{providers::StaticFileProvider, DBProvider, StaticFileProviderFactory};
use reth_prune_types::{
PruneMode, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint,
};
use reth_static_file_types::StaticFileSegment;
use std::num::NonZeroUsize;
use tracing::trace;
/// Number of header tables to prune in one step
const HEADER_TABLES_TO_PRUNE: usize = 3;
#[derive(Debug)]
pub struct Headers<N> {
static_file_provider: StaticFileProvider<N>,
}
impl<N> Headers<N> {
pub const fn new(static_file_provider: StaticFileProvider<N>) -> Self {
Self { static_file_provider }
}
}
impl<Provider> Segment<Provider> for Headers<Provider::Primitives>
where
Provider: StaticFileProviderFactory<Primitives: NodePrimitives<BlockHeader: Value>>
+ DBProvider<Tx: DbTxMut>,
{
fn segment(&self) -> PruneSegment {
PruneSegment::Headers
}
fn mode(&self) -> Option<PruneMode> {
self.static_file_provider
.get_highest_static_file_block(StaticFileSegment::Headers)
.map(PruneMode::before_inclusive)
}
fn purpose(&self) -> PrunePurpose {
PrunePurpose::StaticFile
}
fn prune(&self, provider: &Provider, input: PruneInput) -> Result<SegmentOutput, PrunerError> {
let (block_range_start, block_range_end) = match input.get_next_block_range() {
Some(range) => (*range.start(), *range.end()),
None => {
trace!(target: "pruner", "No headers to prune");
return Ok(SegmentOutput::done())
}
};
let last_pruned_block =
if block_range_start == 0 { None } else { Some(block_range_start - 1) };
let range = last_pruned_block.map_or(0, |block| block + 1)..=block_range_end;
// let mut headers_cursor = provider.tx_ref().cursor_write::<tables::Headers>()?;
let mut headers_cursor = provider
.tx_ref()
.cursor_write::<tables::Headers<<Provider::Primitives as NodePrimitives>::BlockHeader>>(
)?;
let mut header_tds_cursor =
provider.tx_ref().cursor_write::<tables::HeaderTerminalDifficulties>()?;
let mut canonical_headers_cursor =
provider.tx_ref().cursor_write::<tables::CanonicalHeaders>()?;
let mut limiter = input.limiter.floor_deleted_entries_limit_to_multiple_of(
NonZeroUsize::new(HEADER_TABLES_TO_PRUNE).unwrap(),
);
let tables_iter = HeaderTablesIter::new(
provider,
&mut limiter,
headers_cursor.walk_range(range.clone())?,
header_tds_cursor.walk_range(range.clone())?,
canonical_headers_cursor.walk_range(range)?,
);
let mut last_pruned_block: Option<u64> = None;
let mut pruned = 0;
for res in tables_iter {
let HeaderTablesIterItem { pruned_block, entries_pruned } = res?;
last_pruned_block = Some(pruned_block);
pruned += entries_pruned;
}
let done = last_pruned_block == Some(block_range_end);
let progress = limiter.progress(done);
Ok(SegmentOutput {
progress,
pruned,
checkpoint: Some(SegmentOutputCheckpoint {
block_number: last_pruned_block,
tx_number: None,
}),
})
}
}
type Walker<'a, Provider, T> =
RangeWalker<'a, T, <<Provider as DBProvider>::Tx as DbTxMut>::CursorMut<T>>;
#[allow(missing_debug_implementations)]
struct HeaderTablesIter<'a, Provider>
where
Provider: StaticFileProviderFactory<Primitives: NodePrimitives<BlockHeader: Value>>
+ DBProvider<Tx: DbTxMut>,
{
provider: &'a Provider,
limiter: &'a mut PruneLimiter,
headers_walker: Walker<
'a,
Provider,
tables::Headers<<Provider::Primitives as NodePrimitives>::BlockHeader>,
>,
header_tds_walker: Walker<'a, Provider, tables::HeaderTerminalDifficulties>,
canonical_headers_walker: Walker<'a, Provider, tables::CanonicalHeaders>,
}
struct HeaderTablesIterItem {
pruned_block: BlockNumber,
entries_pruned: usize,
}
impl<'a, Provider> HeaderTablesIter<'a, Provider>
where
Provider: StaticFileProviderFactory<Primitives: NodePrimitives<BlockHeader: Value>>
+ DBProvider<Tx: DbTxMut>,
{
const fn new(
provider: &'a Provider,
limiter: &'a mut PruneLimiter,
headers_walker: Walker<
'a,
Provider,
tables::Headers<<Provider::Primitives as NodePrimitives>::BlockHeader>,
>,
header_tds_walker: Walker<'a, Provider, tables::HeaderTerminalDifficulties>,
canonical_headers_walker: Walker<'a, Provider, tables::CanonicalHeaders>,
) -> Self {
Self { provider, limiter, headers_walker, header_tds_walker, canonical_headers_walker }
}
}
impl<Provider> Iterator for HeaderTablesIter<'_, Provider>
where
Provider: StaticFileProviderFactory<Primitives: NodePrimitives<BlockHeader: Value>>
+ DBProvider<Tx: DbTxMut>,
{
type Item = Result<HeaderTablesIterItem, PrunerError>;
fn next(&mut self) -> Option<Self::Item> {
if self.limiter.is_limit_reached() {
return None
}
let mut pruned_block_headers = None;
let mut pruned_block_td = None;
let mut pruned_block_canonical = None;
if let Err(err) = self.provider.tx_ref().prune_table_with_range_step(
&mut self.headers_walker,
self.limiter,
&mut |_| false,
&mut |row| pruned_block_headers = Some(row.0),
) {
return Some(Err(err.into()))
}
if let Err(err) = self.provider.tx_ref().prune_table_with_range_step(
&mut self.header_tds_walker,
self.limiter,
&mut |_| false,
&mut |row| pruned_block_td = Some(row.0),
) {
return Some(Err(err.into()))
}
if let Err(err) = self.provider.tx_ref().prune_table_with_range_step(
&mut self.canonical_headers_walker,
self.limiter,
&mut |_| false,
&mut |row| pruned_block_canonical = Some(row.0),
) {
return Some(Err(err.into()))
}
if ![pruned_block_headers, pruned_block_td, pruned_block_canonical].iter().all_equal() {
return Some(Err(PrunerError::InconsistentData(
"All headers-related tables should be pruned up to the same height",
)))
}
pruned_block_headers.map(move |block| {
Ok(HeaderTablesIterItem { pruned_block: block, entries_pruned: HEADER_TABLES_TO_PRUNE })
})
}
}
#[cfg(test)]
mod tests {
use crate::segments::{
static_file::headers::HEADER_TABLES_TO_PRUNE, PruneInput, PruneLimiter, Segment,
SegmentOutput,
};
use alloy_primitives::{BlockNumber, B256, U256};
use assert_matches::assert_matches;
use reth_db_api::{tables, transaction::DbTx};
use reth_provider::{
DatabaseProviderFactory, PruneCheckpointReader, PruneCheckpointWriter,
StaticFileProviderFactory,
};
use reth_prune_types::{
PruneCheckpoint, PruneInterruptReason, PruneMode, PruneProgress, PruneSegment,
SegmentOutputCheckpoint,
};
use reth_stages::test_utils::TestStageDB;
use reth_testing_utils::{generators, generators::random_header_range};
use tracing::trace;
#[test]
fn prune() {
reth_tracing::init_test_tracing();
let db = TestStageDB::default();
let mut rng = generators::rng();
let headers = random_header_range(&mut rng, 0..100, B256::ZERO);
let tx = db.factory.provider_rw().unwrap().into_tx();
for header in &headers {
TestStageDB::insert_header(None, &tx, header, U256::ZERO).unwrap();
}
tx.commit().unwrap();
assert_eq!(db.table::<tables::CanonicalHeaders>().unwrap().len(), headers.len());
assert_eq!(db.table::<tables::Headers>().unwrap().len(), headers.len());
assert_eq!(db.table::<tables::HeaderTerminalDifficulties>().unwrap().len(), headers.len());
let test_prune = |to_block: BlockNumber, expected_result: (PruneProgress, usize)| {
let segment = super::Headers::new(db.factory.static_file_provider());
let prune_mode = PruneMode::Before(to_block);
let mut limiter = PruneLimiter::default().set_deleted_entries_limit(10);
let input = PruneInput {
previous_checkpoint: db
.factory
.provider()
.unwrap()
.get_prune_checkpoint(PruneSegment::Headers)
.unwrap(),
to_block,
limiter: limiter.clone(),
};
let next_block_number_to_prune = db
.factory
.provider()
.unwrap()
.get_prune_checkpoint(PruneSegment::Headers)
.unwrap()
.and_then(|checkpoint| checkpoint.block_number)
.map(|block_number| block_number + 1)
.unwrap_or_default();
let provider = db.factory.database_provider_rw().unwrap();
let result = segment.prune(&provider, input.clone()).unwrap();
limiter.increment_deleted_entries_count_by(result.pruned);
trace!(target: "pruner::test",
expected_prune_progress=?expected_result.0,
expected_pruned=?expected_result.1,
result=?result,
"SegmentOutput"
);
assert_matches!(
result,
SegmentOutput {progress, pruned, checkpoint: Some(_)}
if (progress, pruned) == expected_result
);
provider
.save_prune_checkpoint(
PruneSegment::Headers,
result.checkpoint.unwrap().as_prune_checkpoint(prune_mode),
)
.unwrap();
provider.commit().expect("commit");
let last_pruned_block_number = to_block.min(
next_block_number_to_prune +
(input.limiter.deleted_entries_limit().unwrap() / HEADER_TABLES_TO_PRUNE - 1)
as u64,
);
assert_eq!(
db.table::<tables::CanonicalHeaders>().unwrap().len(),
headers.len() - (last_pruned_block_number + 1) as usize
);
assert_eq!(
db.table::<tables::Headers>().unwrap().len(),
headers.len() - (last_pruned_block_number + 1) as usize
);
assert_eq!(
db.table::<tables::HeaderTerminalDifficulties>().unwrap().len(),
headers.len() - (last_pruned_block_number + 1) as usize
);
assert_eq!(
db.factory.provider().unwrap().get_prune_checkpoint(PruneSegment::Headers).unwrap(),
Some(PruneCheckpoint {
block_number: Some(last_pruned_block_number),
tx_number: None,
prune_mode
})
);
};
test_prune(
3,
(PruneProgress::HasMoreData(PruneInterruptReason::DeletedEntriesLimitReached), 9),
);
test_prune(3, (PruneProgress::Finished, 3));
}
#[test]
fn prune_cannot_be_done() {
let db = TestStageDB::default();
let limiter = PruneLimiter::default().set_deleted_entries_limit(0);
let input = PruneInput {
previous_checkpoint: None,
to_block: 1,
// Less than total number of tables for `Headers` segment
limiter,
};
let provider = db.factory.database_provider_rw().unwrap();
let segment = super::Headers::new(db.factory.static_file_provider());
let result = segment.prune(&provider, input).unwrap();
assert_eq!(
result,
SegmentOutput::not_done(
PruneInterruptReason::DeletedEntriesLimitReached,
Some(SegmentOutputCheckpoint::default())
)
);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/prune/prune/src/segments/static_file/transactions.rs | crates/prune/prune/src/segments/static_file/transactions.rs | use crate::{
db_ext::DbTxPruneExt,
segments::{PruneInput, Segment},
PrunerError,
};
use reth_db_api::{table::Value, tables, transaction::DbTxMut};
use reth_primitives_traits::NodePrimitives;
use reth_provider::{
providers::StaticFileProvider, BlockReader, DBProvider, StaticFileProviderFactory,
TransactionsProvider,
};
use reth_prune_types::{
PruneMode, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint,
};
use reth_static_file_types::StaticFileSegment;
use tracing::trace;
/// The type responsible for pruning transactions in the database and history expiry.
#[derive(Debug)]
pub struct Transactions<N> {
static_file_provider: StaticFileProvider<N>,
}
impl<N> Transactions<N> {
pub const fn new(static_file_provider: StaticFileProvider<N>) -> Self {
Self { static_file_provider }
}
}
impl<Provider> Segment<Provider> for Transactions<Provider::Primitives>
where
Provider: DBProvider<Tx: DbTxMut>
+ TransactionsProvider
+ BlockReader
+ StaticFileProviderFactory<Primitives: NodePrimitives<SignedTx: Value>>,
{
fn segment(&self) -> PruneSegment {
PruneSegment::Transactions
}
fn mode(&self) -> Option<PruneMode> {
self.static_file_provider
.get_highest_static_file_block(StaticFileSegment::Transactions)
.map(PruneMode::before_inclusive)
}
fn purpose(&self) -> PrunePurpose {
PrunePurpose::StaticFile
}
fn prune(&self, provider: &Provider, input: PruneInput) -> Result<SegmentOutput, PrunerError> {
let tx_range = match input.get_next_tx_num_range(provider)? {
Some(range) => range,
None => {
trace!(target: "pruner", "No transactions to prune");
return Ok(SegmentOutput::done())
}
};
let mut limiter = input.limiter;
let mut last_pruned_transaction = *tx_range.end();
let (pruned, done) = provider.tx_ref().prune_table_with_range::<tables::Transactions<
<Provider::Primitives as NodePrimitives>::SignedTx,
>>(
tx_range,
&mut limiter,
|_| false,
|row| last_pruned_transaction = row.0,
)?;
trace!(target: "pruner", %pruned, %done, "Pruned transactions");
let last_pruned_block = provider
.transaction_block(last_pruned_transaction)?
.ok_or(PrunerError::InconsistentData("Block for transaction is not found"))?
// If there's more transactions to prune, set the checkpoint block number to previous,
// so we could finish pruning its transactions on the next run.
.checked_sub(if done { 0 } else { 1 });
let progress = limiter.progress(done);
Ok(SegmentOutput {
progress,
pruned,
checkpoint: Some(SegmentOutputCheckpoint {
block_number: last_pruned_block,
tx_number: Some(last_pruned_transaction),
}),
})
}
}
#[cfg(test)]
mod tests {
use crate::segments::{PruneInput, PruneLimiter, Segment};
use alloy_primitives::{BlockNumber, TxNumber, B256};
use assert_matches::assert_matches;
use itertools::{
FoldWhile::{Continue, Done},
Itertools,
};
use reth_db_api::tables;
use reth_provider::{
DatabaseProviderFactory, PruneCheckpointReader, PruneCheckpointWriter,
StaticFileProviderFactory,
};
use reth_prune_types::{
PruneCheckpoint, PruneInterruptReason, PruneMode, PruneProgress, PruneSegment,
SegmentOutput,
};
use reth_stages::test_utils::{StorageKind, TestStageDB};
use reth_testing_utils::generators::{self, random_block_range, BlockRangeParams};
use std::ops::Sub;
#[test]
fn prune() {
let db = TestStageDB::default();
let mut rng = generators::rng();
let blocks = random_block_range(
&mut rng,
1..=100,
BlockRangeParams { parent: Some(B256::ZERO), tx_count: 2..3, ..Default::default() },
);
db.insert_blocks(blocks.iter(), StorageKind::Database(None)).expect("insert blocks");
let transactions =
blocks.iter().flat_map(|block| &block.body().transactions).collect::<Vec<_>>();
assert_eq!(db.table::<tables::Transactions>().unwrap().len(), transactions.len());
let test_prune = |to_block: BlockNumber, expected_result: (PruneProgress, usize)| {
let segment = super::Transactions::new(db.factory.static_file_provider());
let prune_mode = PruneMode::Before(to_block);
let mut limiter = PruneLimiter::default().set_deleted_entries_limit(10);
let input = PruneInput {
previous_checkpoint: db
.factory
.provider()
.unwrap()
.get_prune_checkpoint(PruneSegment::Transactions)
.unwrap(),
to_block,
limiter: limiter.clone(),
};
let next_tx_number_to_prune = db
.factory
.provider()
.unwrap()
.get_prune_checkpoint(PruneSegment::Transactions)
.unwrap()
.and_then(|checkpoint| checkpoint.tx_number)
.map(|tx_number| tx_number + 1)
.unwrap_or_default();
let provider = db.factory.database_provider_rw().unwrap();
let result = segment.prune(&provider, input.clone()).unwrap();
limiter.increment_deleted_entries_count_by(result.pruned);
assert_matches!(
result,
SegmentOutput {progress, pruned, checkpoint: Some(_)}
if (progress, pruned) == expected_result
);
provider
.save_prune_checkpoint(
PruneSegment::Transactions,
result.checkpoint.unwrap().as_prune_checkpoint(prune_mode),
)
.unwrap();
provider.commit().expect("commit");
let last_pruned_tx_number = blocks
.iter()
.take(to_block as usize)
.map(|block| block.transaction_count())
.sum::<usize>()
.min(
next_tx_number_to_prune as usize +
input.limiter.deleted_entries_limit().unwrap(),
)
.sub(1);
let last_pruned_block_number = blocks
.iter()
.fold_while((0, 0), |(_, mut tx_count), block| {
tx_count += block.transaction_count();
if tx_count > last_pruned_tx_number {
Done((block.number, tx_count))
} else {
Continue((block.number, tx_count))
}
})
.into_inner()
.0
.checked_sub(if result.progress.is_finished() { 0 } else { 1 });
assert_eq!(
db.table::<tables::Transactions>().unwrap().len(),
transactions.len() - (last_pruned_tx_number + 1)
);
assert_eq!(
db.factory
.provider()
.unwrap()
.get_prune_checkpoint(PruneSegment::Transactions)
.unwrap(),
Some(PruneCheckpoint {
block_number: last_pruned_block_number,
tx_number: Some(last_pruned_tx_number as TxNumber),
prune_mode
})
);
};
test_prune(
6,
(PruneProgress::HasMoreData(PruneInterruptReason::DeletedEntriesLimitReached), 10),
);
test_prune(6, (PruneProgress::Finished, 2));
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/prune/prune/src/segments/static_file/mod.rs | crates/prune/prune/src/segments/static_file/mod.rs | mod headers;
mod receipts;
mod transactions;
pub use headers::Headers;
pub use receipts::Receipts;
pub use transactions::Transactions;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/prune/prune/src/segments/static_file/receipts.rs | crates/prune/prune/src/segments/static_file/receipts.rs | use crate::{
segments::{PruneInput, Segment},
PrunerError,
};
use reth_db_api::{table::Value, transaction::DbTxMut};
use reth_primitives_traits::NodePrimitives;
use reth_provider::{
errors::provider::ProviderResult, providers::StaticFileProvider, BlockReader, DBProvider,
PruneCheckpointWriter, StaticFileProviderFactory, TransactionsProvider,
};
use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, SegmentOutput};
use reth_static_file_types::StaticFileSegment;
#[derive(Debug)]
pub struct Receipts<N> {
static_file_provider: StaticFileProvider<N>,
}
impl<N> Receipts<N> {
pub const fn new(static_file_provider: StaticFileProvider<N>) -> Self {
Self { static_file_provider }
}
}
impl<Provider> Segment<Provider> for Receipts<Provider::Primitives>
where
Provider: StaticFileProviderFactory<Primitives: NodePrimitives<Receipt: Value>>
+ DBProvider<Tx: DbTxMut>
+ PruneCheckpointWriter
+ TransactionsProvider
+ BlockReader,
{
fn segment(&self) -> PruneSegment {
PruneSegment::Receipts
}
fn mode(&self) -> Option<PruneMode> {
self.static_file_provider
.get_highest_static_file_block(StaticFileSegment::Receipts)
.map(PruneMode::before_inclusive)
}
fn purpose(&self) -> PrunePurpose {
PrunePurpose::StaticFile
}
fn prune(&self, provider: &Provider, input: PruneInput) -> Result<SegmentOutput, PrunerError> {
crate::segments::receipts::prune(provider, input)
}
fn save_checkpoint(
&self,
provider: &Provider,
checkpoint: PruneCheckpoint,
) -> ProviderResult<()> {
crate::segments::receipts::save_checkpoint(provider, checkpoint)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stateless/src/lib.rs | crates/stateless/src/lib.rs | //! Provides types and functions for stateless execution and validation of Ethereum blocks.
//!
//! This crate enables the verification of block execution without requiring access to a
//! full node's persistent database. Instead, it relies on pre-generated "witness" data
//! that proves the specific state accessed during the block's execution.
//!
//! # Key Components
//!
//! * `WitnessDatabase`: An implementation of [`reth_revm::Database`] that uses a
//! [`reth_trie_sparse::SparseStateTrie`] populated from witness data, along with provided
//! bytecode and ancestor block hashes, to serve state reads during execution.
//! * `stateless_validation`: The core function that orchestrates the stateless validation process.
//! It takes a block, its execution witness, ancestor headers, and chain specification, then
//! performs:
//! 1. Witness verification against the parent block's state root.
//! 2. Block execution using the `WitnessDatabase`.
//! 3. Post-execution consensus checks.
//! 4. Post-state root calculation and comparison against the block header.
//!
//! # Usage
//!
//! The primary entry point is typically the `validation::stateless_validation` function. Callers
//! need to provide the block to be validated along with accurately generated `ExecutionWitness`
//! data corresponding to that block's execution trace and the necessary Headers of ancestor
//! blocks.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![no_std]
extern crate alloc;
/// Sparse trie implementation for stateless validation
pub mod trie;
#[doc(inline)]
pub use trie::StatelessTrie;
#[doc(inline)]
pub use validation::stateless_validation_with_trie;
/// Implementation of stateless validation
pub mod validation;
pub(crate) mod witness_db;
#[doc(inline)]
pub use alloy_rpc_types_debug::ExecutionWitness;
use reth_ethereum_primitives::Block;
/// `StatelessInput` is a convenience structure for serializing the input needed
/// for the stateless validation function.
#[serde_with::serde_as]
#[derive(Clone, Debug, Default, serde::Serialize, serde::Deserialize)]
pub struct StatelessInput {
/// The block being executed in the stateless validation function
#[serde_as(
as = "reth_primitives_traits::serde_bincode_compat::Block<reth_ethereum_primitives::TransactionSigned, alloy_consensus::Header>"
)]
pub block: Block,
/// `ExecutionWitness` for the stateless validation function
pub witness: ExecutionWitness,
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stateless/src/validation.rs | crates/stateless/src/validation.rs | use crate::{
trie::{StatelessSparseTrie, StatelessTrie},
witness_db::WitnessDatabase,
ExecutionWitness,
};
use alloc::{
collections::BTreeMap,
fmt::Debug,
string::{String, ToString},
sync::Arc,
vec::Vec,
};
use alloy_consensus::{BlockHeader, Header};
use alloy_primitives::{keccak256, B256};
use reth_chainspec::{EthChainSpec, EthereumHardforks};
use reth_consensus::{Consensus, HeaderValidator};
use reth_errors::ConsensusError;
use reth_ethereum_consensus::{validate_block_post_execution, EthBeaconConsensus};
use reth_ethereum_primitives::{Block, EthPrimitives};
use reth_evm::{execute::Executor, ConfigureEvm};
use reth_primitives_traits::{RecoveredBlock, SealedHeader};
use reth_trie_common::{HashedPostState, KeccakKeyHasher};
/// Errors that can occur during stateless validation.
#[derive(Debug, thiserror::Error)]
pub enum StatelessValidationError {
/// Error when the number of ancestor headers exceeds the limit.
#[error("ancestor header count ({count}) exceeds limit ({limit})")]
AncestorHeaderLimitExceeded {
/// The number of headers provided.
count: usize,
/// The limit.
limit: usize,
},
/// Error when the ancestor headers do not form a contiguous chain.
#[error("invalid ancestor chain")]
InvalidAncestorChain,
/// Error when revealing the witness data failed.
#[error("failed to reveal witness data for pre-state root {pre_state_root}")]
WitnessRevealFailed {
/// The pre-state root used for verification.
pre_state_root: B256,
},
/// Error during stateless block execution.
#[error("stateless block execution failed")]
StatelessExecutionFailed(String),
/// Error during consensus validation of the block.
#[error("consensus validation failed: {0}")]
ConsensusValidationFailed(#[from] ConsensusError),
/// Error during stateless state root calculation.
#[error("stateless state root calculation failed")]
StatelessStateRootCalculationFailed,
/// Error calculating the pre-state root from the witness data.
#[error("stateless pre-state root calculation failed")]
StatelessPreStateRootCalculationFailed,
/// Error when required ancestor headers are missing (e.g., parent header for pre-state root).
#[error("missing required ancestor headers")]
MissingAncestorHeader,
/// Error when deserializing ancestor headers
#[error("could not deserialize ancestor headers")]
HeaderDeserializationFailed,
/// Error when the computed state root does not match the one in the block header.
#[error("mismatched post-state root: {got}\n {expected}")]
PostStateRootMismatch {
/// The computed post-state root
got: B256,
/// The expected post-state root; in the block header
expected: B256,
},
/// Error when the computed pre-state root does not match the expected one.
#[error("mismatched pre-state root: {got} \n {expected}")]
PreStateRootMismatch {
/// The computed pre-state root
got: B256,
/// The expected pre-state root from the previous block
expected: B256,
},
/// Custom error.
#[error("{0}")]
Custom(&'static str),
}
/// Performs stateless validation of a block using the provided witness data.
///
/// This function attempts to fully validate a given `current_block` statelessly, ie without access
/// to a persistent database.
/// It relies entirely on the `witness` data and `ancestor_headers`
/// provided alongside the block.
///
/// The witness data is validated in the following way:
///
/// 1. **Ancestor Header Verification:** Checks if the `ancestor_headers` are present, form a
/// contiguous chain back from `current_block`'s parent, and do not exceed the `BLOCKHASH` opcode
/// limit using `compute_ancestor_hashes`. We must have at least one ancestor header, even if the
/// `BLOCKHASH` opcode is not used because we need the state root of the previous block to verify
/// the pre state reads.
///
/// 2. **Pre-State Verification:** Retrieves the expected `pre_state_root` from the parent header
/// from `ancestor_headers`. Verifies the provided [`ExecutionWitness`] against the
/// `pre_state_root`.
///
/// 3. **Chain Verification:** The code currently does not verify the [`EthChainSpec`] and expects a
/// higher level function to assert that this is correct by, for example, asserting that it is
/// equal to the Ethereum Mainnet `ChainSpec` or asserting against the genesis hash that this
/// `ChainSpec` defines.
///
/// High Level Overview of functionality:
///
/// - Verify all state accesses against a trusted pre-state root
/// - Put all state accesses into an in-memory database
/// - Use the in-memory database to execute the block
/// - Validate the output of block execution (e.g. receipts, logs, requests)
/// - Compute the post-state root using the state-diff from block execution
/// - Check that the post-state root is the state root in the block.
///
/// If all steps succeed the function returns `Some` containing the hash of the validated
/// `current_block`.
pub fn stateless_validation<ChainSpec, E>(
current_block: RecoveredBlock<Block>,
witness: ExecutionWitness,
chain_spec: Arc<ChainSpec>,
evm_config: E,
) -> Result<B256, StatelessValidationError>
where
ChainSpec: Send + Sync + EthChainSpec<Header = Header> + EthereumHardforks + Debug,
E: ConfigureEvm<Primitives = EthPrimitives> + Clone + 'static,
{
stateless_validation_with_trie::<StatelessSparseTrie, ChainSpec, E>(
current_block,
witness,
chain_spec,
evm_config,
)
}
/// Performs stateless validation of a block using a custom `StatelessTrie` implementation.
///
/// This is a generic version of `stateless_validation` that allows users to provide their own
/// implementation of the `StatelessTrie` for custom trie backends or optimizations.
///
/// See `stateless_validation` for detailed documentation of the validation process.
pub fn stateless_validation_with_trie<T, ChainSpec, E>(
current_block: RecoveredBlock<Block>,
witness: ExecutionWitness,
chain_spec: Arc<ChainSpec>,
evm_config: E,
) -> Result<B256, StatelessValidationError>
where
T: StatelessTrie,
ChainSpec: Send + Sync + EthChainSpec<Header = Header> + EthereumHardforks + Debug,
E: ConfigureEvm<Primitives = EthPrimitives> + Clone + 'static,
{
let mut ancestor_headers: Vec<_> = witness
.headers
.iter()
.map(|bytes| {
let hash = keccak256(bytes);
alloy_rlp::decode_exact::<Header>(bytes)
.map(|h| SealedHeader::new(h, hash))
.map_err(|_| StatelessValidationError::HeaderDeserializationFailed)
})
.collect::<Result<_, _>>()?;
// Sort the headers by their block number to ensure that they are in
// ascending order.
ancestor_headers.sort_by_key(|header| header.number());
// Check that the ancestor headers form a contiguous chain and are not just random headers.
let ancestor_hashes = compute_ancestor_hashes(¤t_block, &ancestor_headers)?;
// There should be at least one ancestor header.
// The edge case here would be the genesis block, but we do not create proofs for the genesis
// block.
let parent = match ancestor_headers.last() {
Some(prev_header) => prev_header,
None => return Err(StatelessValidationError::MissingAncestorHeader),
};
// Validate block against pre-execution consensus rules
validate_block_consensus(chain_spec.clone(), ¤t_block, parent)?;
// First verify that the pre-state reads are correct
let (mut trie, bytecode) = T::new(&witness, parent.state_root)?;
// Create an in-memory database that will use the reads to validate the block
let db = WitnessDatabase::new(&trie, bytecode, ancestor_hashes);
// Execute the block
let executor = evm_config.executor(db);
let output = executor
.execute(¤t_block)
.map_err(|e| StatelessValidationError::StatelessExecutionFailed(e.to_string()))?;
// Post validation checks
validate_block_post_execution(¤t_block, &chain_spec, &output.receipts, &output.requests)
.map_err(StatelessValidationError::ConsensusValidationFailed)?;
// Compute and check the post state root
let hashed_state = HashedPostState::from_bundle_state::<KeccakKeyHasher>(&output.state.state);
let state_root = trie.calculate_state_root(hashed_state)?;
if state_root != current_block.state_root {
return Err(StatelessValidationError::PostStateRootMismatch {
got: state_root,
expected: current_block.state_root,
});
}
// Return block hash
Ok(current_block.hash_slow())
}
/// Performs consensus validation checks on a block without execution or state validation.
///
/// This function validates a block against Ethereum consensus rules by:
///
/// 1. **Header Validation:** Validates the sealed header against protocol specifications,
/// including:
/// - Gas limit checks
/// - Base fee validation for EIP-1559
/// - Withdrawals root validation for Shanghai fork
/// - Blob-related fields validation for Cancun fork
///
/// 2. **Pre-Execution Validation:** Validates block structure, transaction format, signature
/// validity, and other pre-execution requirements.
///
/// This function acts as a preliminary validation before executing and validating the state
/// transition function.
fn validate_block_consensus<ChainSpec>(
chain_spec: Arc<ChainSpec>,
block: &RecoveredBlock<Block>,
parent: &SealedHeader<Header>,
) -> Result<(), StatelessValidationError>
where
ChainSpec: Send + Sync + EthChainSpec<Header = Header> + EthereumHardforks + Debug,
{
let consensus = EthBeaconConsensus::new(chain_spec);
consensus.validate_header(block.sealed_header())?;
consensus.validate_header_against_parent(block.sealed_header(), parent)?;
consensus.validate_block_pre_execution(block)?;
Ok(())
}
/// Verifies the contiguity, number of ancestor headers and extracts their hashes.
///
/// This function is used to prepare the data required for the `BLOCKHASH`
/// opcode in a stateless execution context.
///
/// It verifies that the provided `ancestor_headers` form a valid, unbroken chain leading back from
/// the parent of the `current_block`.
///
/// Note: This function becomes obsolete if EIP-2935 is implemented.
/// Note: The headers are assumed to be in ascending order.
///
/// If both checks pass, it returns a [`BTreeMap`] mapping the block number of each
/// ancestor header to its corresponding block hash.
fn compute_ancestor_hashes(
current_block: &RecoveredBlock<Block>,
ancestor_headers: &[SealedHeader],
) -> Result<BTreeMap<u64, B256>, StatelessValidationError> {
let mut ancestor_hashes = BTreeMap::new();
let mut child_header = current_block.sealed_header();
// Next verify that headers supplied are contiguous
for parent_header in ancestor_headers.iter().rev() {
let parent_hash = child_header.parent_hash();
ancestor_hashes.insert(parent_header.number, parent_hash);
if parent_hash != parent_header.hash() {
return Err(StatelessValidationError::InvalidAncestorChain); // Blocks must be contiguous
}
if parent_header.number + 1 != child_header.number {
return Err(StatelessValidationError::InvalidAncestorChain); // Header number should be
// contiguous
}
child_header = parent_header
}
Ok(ancestor_hashes)
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stateless/src/witness_db.rs | crates/stateless/src/witness_db.rs | //! Provides the [`WitnessDatabase`] type, an implementation of [`reth_revm::Database`]
//! specifically designed for stateless execution environments.
use crate::trie::StatelessTrie;
use alloc::{collections::btree_map::BTreeMap, format};
use alloy_primitives::{map::B256Map, Address, B256, U256};
use reth_errors::ProviderError;
use reth_revm::{bytecode::Bytecode, state::AccountInfo, Database};
/// An EVM database implementation backed by witness data.
///
/// This struct implements the [`reth_revm::Database`] trait, allowing the EVM to execute
/// transactions using:
/// - Account and storage slot data provided by a [`StatelessTrie`] implementation.
/// - Bytecode and ancestor block hashes provided by in-memory maps.
///
/// This is designed for stateless execution scenarios where direct access to a full node's
/// database is not available or desired.
#[derive(Debug)]
pub(crate) struct WitnessDatabase<'a, T>
where
T: StatelessTrie,
{
/// Map of block numbers to block hashes.
/// This is used to service the `BLOCKHASH` opcode.
// TODO: use Vec instead -- ancestors should be contiguous
// TODO: so we can use the current_block_number and an offset to
// TODO: get the block number of a particular ancestor
block_hashes_by_block_number: BTreeMap<u64, B256>,
/// Map of code hashes to bytecode.
/// Used to fetch contract code needed during execution.
bytecode: B256Map<Bytecode>,
/// The sparse Merkle Patricia Trie containing account and storage state.
/// This is used to provide account/storage values during EVM execution.
/// TODO: Ideally we do not have this trie and instead a simple map.
/// TODO: Then as a corollary we can avoid unnecessary hashing in `Database::storage`
/// TODO: and `Database::basic` without needing to cache the hashed Addresses and Keys
trie: &'a T,
}
impl<'a, T> WitnessDatabase<'a, T>
where
T: StatelessTrie,
{
/// Creates a new [`WitnessDatabase`] instance.
///
/// # Assumptions
///
/// This function assumes:
/// 1. The provided `trie` has been populated with state data consistent with a known state root
/// (e.g., using witness data and verifying against a parent block's state root).
/// 2. The `bytecode` map contains all bytecode corresponding to code hashes present in the
/// account data within the `trie`.
/// 3. The `ancestor_hashes` map contains the block hashes for the relevant ancestor blocks (up
/// to 256 including the current block number). It assumes these hashes correspond to a
/// contiguous chain of blocks. The caller is responsible for verifying the contiguity and
/// the block limit.
pub(crate) const fn new(
trie: &'a T,
bytecode: B256Map<Bytecode>,
ancestor_hashes: BTreeMap<u64, B256>,
) -> Self {
Self { trie, block_hashes_by_block_number: ancestor_hashes, bytecode }
}
}
impl<T> Database for WitnessDatabase<'_, T>
where
T: StatelessTrie,
{
/// The database error type.
type Error = ProviderError;
/// Get basic account information by hashing the address and looking up the account RLP
/// in the underlying [`StatelessTrie`] implementation.
///
/// Returns `Ok(None)` if the account is not found in the trie.
fn basic(&mut self, address: Address) -> Result<Option<AccountInfo>, Self::Error> {
self.trie.account(address).map(|opt| {
opt.map(|account| AccountInfo {
balance: account.balance,
nonce: account.nonce,
code_hash: account.code_hash,
code: None,
})
})
}
/// Get storage value of an account at a specific slot.
///
/// Returns `U256::ZERO` if the slot is not found in the trie.
fn storage(
&mut self,
address: Address,
slot: U256,
) -> Result<alloy_primitives::FlaggedStorage, Self::Error> {
self.trie.storage(address, slot)
}
/// Get account code by its hash from the provided bytecode map.
///
/// Returns an error if the bytecode for the given hash is not found in the map.
fn code_by_hash(&mut self, code_hash: B256) -> Result<Bytecode, Self::Error> {
self.bytecode.get(&code_hash).cloned().ok_or_else(|| {
ProviderError::TrieWitnessError(format!("bytecode for {code_hash} not found"))
})
}
/// Get block hash by block number from the provided ancestor hashes map.
///
/// Returns an error if the hash for the given block number is not found in the map.
fn block_hash(&mut self, block_number: u64) -> Result<B256, Self::Error> {
self.block_hashes_by_block_number
.get(&block_number)
.copied()
.ok_or(ProviderError::StateForNumberNotFound(block_number))
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stateless/src/trie.rs | crates/stateless/src/trie.rs | use crate::validation::StatelessValidationError;
use alloc::{format, vec::Vec};
use alloy_primitives::{keccak256, map::B256Map, Address, B256, U256};
use alloy_rlp::{Decodable, Encodable};
use alloy_rpc_types_debug::ExecutionWitness;
use alloy_trie::{TrieAccount, EMPTY_ROOT_HASH};
use itertools::Itertools;
use reth_errors::ProviderError;
use reth_revm::state::Bytecode;
use reth_trie_common::{HashedPostState, Nibbles, TRIE_ACCOUNT_RLP_MAX_SIZE};
use reth_trie_sparse::{
errors::SparseStateTrieResult,
provider::{DefaultTrieNodeProvider, DefaultTrieNodeProviderFactory},
SparseStateTrie, SparseTrie, SparseTrieInterface,
};
/// Trait for stateless trie implementations that can be used for stateless validation.
pub trait StatelessTrie: core::fmt::Debug {
/// Initialize the stateless trie using the `ExecutionWitness`
fn new(
witness: &ExecutionWitness,
pre_state_root: B256,
) -> Result<(Self, B256Map<Bytecode>), StatelessValidationError>
where
Self: Sized;
/// Returns the `TrieAccount` that corresponds to the `Address`
///
/// This method will error if the `ExecutionWitness` is not able to guarantee
/// that the account is missing from the Trie _and_ the witness was complete.
fn account(&self, address: Address) -> Result<Option<TrieAccount>, ProviderError>;
/// Returns the storage slot value that corresponds to the given (address, slot) tuple.
///
/// This method will error if the `ExecutionWitness` is not able to guarantee
/// that the storage was missing from the Trie _and_ the witness was complete.
fn storage(
&self,
address: Address,
slot: U256,
) -> Result<alloy_primitives::FlaggedStorage, ProviderError>;
/// Computes the new state root from the `HashedPostState`.
fn calculate_state_root(
&mut self,
state: HashedPostState,
) -> Result<B256, StatelessValidationError>;
}
/// `StatelessSparseTrie` structure for usage during stateless validation
#[derive(Debug)]
pub struct StatelessSparseTrie {
inner: SparseStateTrie,
}
impl StatelessSparseTrie {
/// Initialize the stateless trie using the `ExecutionWitness`
///
/// Note: Currently this method does not check that the `ExecutionWitness`
/// is complete for all of the preimage keys.
pub fn new(
witness: &ExecutionWitness,
pre_state_root: B256,
) -> Result<(Self, B256Map<Bytecode>), StatelessValidationError> {
verify_execution_witness(witness, pre_state_root)
.map(|(inner, bytecode)| (Self { inner }, bytecode))
}
/// Returns the `TrieAccount` that corresponds to the `Address`
///
/// This method will error if the `ExecutionWitness` is not able to guarantee
/// that the account is missing from the Trie _and_ the witness was complete.
pub fn account(&self, address: Address) -> Result<Option<TrieAccount>, ProviderError> {
let hashed_address = keccak256(address);
if let Some(bytes) = self.inner.get_account_value(&hashed_address) {
let account = TrieAccount::decode(&mut bytes.as_slice())?;
return Ok(Some(account))
}
if !self.inner.check_valid_account_witness(hashed_address) {
return Err(ProviderError::TrieWitnessError(format!(
"incomplete account witness for {hashed_address:?}"
)));
}
Ok(None)
}
/// Returns the storage slot value that corresponds to the given (address, slot) tuple.
///
/// This method will error if the `ExecutionWitness` is not able to guarantee
/// that the storage was missing from the Trie _and_ the witness was complete.
pub fn storage(
&self,
address: Address,
slot: U256,
) -> Result<alloy_primitives::FlaggedStorage, ProviderError> {
let hashed_address = keccak256(address);
let hashed_slot = keccak256(B256::from(slot));
if let Some(raw) = self.inner.get_storage_slot_value(&hashed_address, &hashed_slot) {
return Ok(alloy_primitives::FlaggedStorage::decode(&mut raw.as_slice())?)
}
// Storage slot value is not present in the trie, validate that the witness is complete.
// If the account exists in the trie...
if let Some(bytes) = self.inner.get_account_value(&hashed_address) {
// ...check that its storage is either empty or the storage trie was sufficiently
// revealed...
let account = TrieAccount::decode(&mut bytes.as_slice())?;
if account.storage_root != EMPTY_ROOT_HASH &&
!self.inner.check_valid_storage_witness(hashed_address, hashed_slot)
{
return Err(ProviderError::TrieWitnessError(format!(
"incomplete storage witness: prover must supply exclusion proof for slot {hashed_slot:?} in account {hashed_address:?}"
)));
}
} else if !self.inner.check_valid_account_witness(hashed_address) {
// ...else if account is missing, validate that the account trie was sufficiently
// revealed.
return Err(ProviderError::TrieWitnessError(format!(
"incomplete account witness for {hashed_address:?}"
)));
}
Ok(alloy_primitives::FlaggedStorage::ZERO)
}
/// Computes the new state root from the `HashedPostState`.
pub fn calculate_state_root(
&mut self,
state: HashedPostState,
) -> Result<B256, StatelessValidationError> {
calculate_state_root(&mut self.inner, state)
.map_err(|_e| StatelessValidationError::StatelessStateRootCalculationFailed)
}
}
impl StatelessTrie for StatelessSparseTrie {
fn new(
witness: &ExecutionWitness,
pre_state_root: B256,
) -> Result<(Self, B256Map<Bytecode>), StatelessValidationError> {
Self::new(witness, pre_state_root)
}
fn account(&self, address: Address) -> Result<Option<TrieAccount>, ProviderError> {
self.account(address)
}
fn storage(
&self,
address: Address,
slot: U256,
) -> Result<alloy_primitives::FlaggedStorage, ProviderError> {
self.storage(address, slot)
}
fn calculate_state_root(
&mut self,
state: HashedPostState,
) -> Result<B256, StatelessValidationError> {
self.calculate_state_root(state)
}
}
/// Verifies execution witness [`ExecutionWitness`] against an expected pre-state root.
///
/// This function takes the RLP-encoded values provided in [`ExecutionWitness`]
/// (which includes state trie nodes, storage trie nodes, and contract bytecode)
/// and uses it to populate a new [`SparseStateTrie`].
///
/// If the computed root hash matches the `pre_state_root`, it signifies that the
/// provided execution witness is consistent with that pre-state root. In this case, the function
/// returns the populated [`SparseStateTrie`] and a [`B256Map`] containing the
/// contract bytecode (mapping code hash to [`Bytecode`]).
///
/// The bytecode has a separate mapping because the [`SparseStateTrie`] does not store the
/// contract bytecode, only the hash of it (code hash).
///
/// If the roots do not match, it returns an error indicating the witness is invalid
/// for the given `pre_state_root` (see `StatelessValidationError::PreStateRootMismatch`).
// Note: This approach might be inefficient for ZKVMs requiring minimal memory operations, which
// would explain why they have for the most part re-implemented this function.
fn verify_execution_witness(
witness: &ExecutionWitness,
pre_state_root: B256,
) -> Result<(SparseStateTrie, B256Map<Bytecode>), StatelessValidationError> {
let provider_factory = DefaultTrieNodeProviderFactory;
let mut trie = SparseStateTrie::new();
let mut state_witness = B256Map::default();
let mut bytecode = B256Map::default();
for rlp_encoded in &witness.state {
let hash = keccak256(rlp_encoded);
state_witness.insert(hash, rlp_encoded.clone());
}
for rlp_encoded in &witness.codes {
let hash = keccak256(rlp_encoded);
bytecode.insert(hash, Bytecode::new_raw(rlp_encoded.clone()));
}
// Reveal the witness with our state root
// This method builds a trie using the sparse trie using the state_witness with
// the root being the pre_state_root.
// Here are some things to note:
// - You can pass in more witnesses than is needed for the block execution.
// - If you try to get an account and it has not been seen. This means that the account
// was not inserted into the Trie. It does not mean that the account does not exist.
// In order to determine an account not existing, we must do an exclusion proof.
trie.reveal_witness(pre_state_root, &state_witness)
.map_err(|_e| StatelessValidationError::WitnessRevealFailed { pre_state_root })?;
// Calculate the root
let computed_root = trie
.root(&provider_factory)
.map_err(|_e| StatelessValidationError::StatelessPreStateRootCalculationFailed)?;
if computed_root == pre_state_root {
Ok((trie, bytecode))
} else {
Err(StatelessValidationError::PreStateRootMismatch {
got: computed_root,
expected: pre_state_root,
})
}
}
// Copied and modified from ress: https://github.com/paradigmxyz/ress/blob/06bf2c4788e45b8fcbd640e38b6243e6f87c4d0e/crates/engine/src/tree/root.rs
/// Calculates the post-execution state root by applying state changes to a sparse trie.
///
/// This function takes a [`SparseStateTrie`] with the pre-state and a [`HashedPostState`]
/// containing account and storage changes resulting from block execution (state diff).
///
/// It modifies the input `trie` in place to reflect these changes and then calculates the
/// final post-execution state root.
fn calculate_state_root(
trie: &mut SparseStateTrie,
state: HashedPostState,
) -> SparseStateTrieResult<B256> {
// 1. Apply storage‑slot updates and compute each contract’s storage root
//
//
// We walk over every (address, storage) pair in deterministic order
// and update the corresponding per‑account storage trie in‑place.
// When we’re done we collect (address, updated_storage_trie) in a `Vec`
// so that we can insert them back into the outer state trie afterwards ― this avoids
// borrowing issues.
let mut storage_results = Vec::with_capacity(state.storages.len());
// In `verify_execution_witness` a `DefaultTrieNodeProviderFactory` is used, so we use the same
// again in here.
let provider_factory = DefaultTrieNodeProviderFactory;
let storage_provider = DefaultTrieNodeProvider;
for (address, storage) in state.storages.into_iter().sorted_unstable_by_key(|(addr, _)| *addr) {
// Take the existing storage trie (or create an empty, “revealed” one)
let mut storage_trie =
trie.take_storage_trie(&address).unwrap_or_else(SparseTrie::revealed_empty);
if storage.wiped {
storage_trie.wipe()?;
}
// Apply slot‑level changes
for (hashed_slot, value) in
storage.storage.into_iter().sorted_unstable_by_key(|(slot, _)| *slot)
{
let nibbles = Nibbles::unpack(hashed_slot);
if value.is_zero() {
storage_trie.remove_leaf(&nibbles, &storage_provider)?;
} else {
storage_trie.update_leaf(
nibbles,
alloy_rlp::encode_fixed_size(&value).to_vec(),
value.is_private,
&storage_provider,
)?;
}
}
// Finalise the storage‑trie root before pushing the result
storage_trie.root();
storage_results.push((address, storage_trie));
}
// Insert every updated storage trie back into the outer state trie
for (address, storage_trie) in storage_results {
trie.insert_storage_trie(address, storage_trie);
}
// 2. Apply account‑level updates and (re)encode the account nodes
// Update accounts with new values
// TODO: upstream changes into reth so that `SparseStateTrie::update_account` handles this
let mut account_rlp_buf = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE);
for (hashed_address, account) in
state.accounts.into_iter().sorted_unstable_by_key(|(addr, _)| *addr)
{
let nibbles = Nibbles::unpack(hashed_address);
// Determine which storage root should be used for this account
let storage_root = if let Some(storage_trie) = trie.storage_trie_mut(&hashed_address) {
storage_trie.root()
} else if let Some(value) = trie.get_account_value(&hashed_address) {
TrieAccount::decode(&mut &value[..])?.storage_root
} else {
EMPTY_ROOT_HASH
};
// Decide whether to remove or update the account leaf
if let Some(account) = account {
account_rlp_buf.clear();
account.into_trie_account(storage_root).encode(&mut account_rlp_buf);
trie.update_account_leaf(nibbles, account_rlp_buf.clone(), &provider_factory)?;
} else {
trie.remove_account_leaf(&nibbles, &provider_factory)?;
}
}
// Return new state root
trie.root(&provider_factory)
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/tokio-util/src/lib.rs | crates/tokio-util/src/lib.rs | //! Event listeners
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
mod event_sender;
mod event_stream;
pub use event_sender::EventSender;
pub use event_stream::EventStream;
#[cfg(feature = "time")]
pub mod ratelimit;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/tokio-util/src/event_sender.rs | crates/tokio-util/src/event_sender.rs | use crate::EventStream;
use tokio::sync::broadcast::{self, Sender};
use tracing::trace;
const DEFAULT_SIZE_BROADCAST_CHANNEL: usize = 2000;
/// A bounded multi-producer, multi-consumer broadcast channel.
#[derive(Debug)]
pub struct EventSender<T> {
/// The sender part of the broadcast channel
sender: Sender<T>,
}
impl<T> Default for EventSender<T>
where
T: Clone + Send + Sync + 'static,
{
fn default() -> Self {
Self::new(DEFAULT_SIZE_BROADCAST_CHANNEL)
}
}
impl<T> Clone for EventSender<T> {
fn clone(&self) -> Self {
Self { sender: self.sender.clone() }
}
}
impl<T: Clone + Send + Sync + 'static> EventSender<T> {
/// Creates a new `EventSender`.
pub fn new(events_channel_size: usize) -> Self {
let (sender, _) = broadcast::channel(events_channel_size);
Self { sender }
}
/// Broadcasts an event to all listeners.
pub fn notify(&self, event: T) {
if self.sender.send(event).is_err() {
trace!("no receivers for broadcast events");
}
}
/// Creates a new event stream with a subscriber to the sender as the
/// receiver.
pub fn new_listener(&self) -> EventStream<T> {
EventStream::new(self.sender.subscribe())
}
}
#[cfg(test)]
mod tests {
use super::*;
use tokio::{
task,
time::{timeout, Duration},
};
use tokio_stream::StreamExt;
#[tokio::test]
async fn test_event_broadcast_to_listener() {
let sender = EventSender::default();
// Create a listener for the events
let mut listener = sender.new_listener();
// Broadcast an event
sender.notify("event1");
// Check if the listener receives the event
let received_event = listener.next().await;
assert_eq!(received_event, Some("event1"));
}
#[tokio::test]
async fn test_event_no_listener() {
let sender = EventSender::default();
// Broadcast an event with no listeners
sender.notify("event2");
// Ensure it doesn't panic or fail when no listeners are present
// (this test passes if it runs without errors).
}
#[tokio::test]
async fn test_multiple_listeners_receive_event() {
let sender = EventSender::default();
// Create two listeners
let mut listener1 = sender.new_listener();
let mut listener2 = sender.new_listener();
// Broadcast an event
sender.notify("event3");
// Both listeners should receive the same event
let event1 = listener1.next().await;
let event2 = listener2.next().await;
assert_eq!(event1, Some("event3"));
assert_eq!(event2, Some("event3"));
}
#[tokio::test]
async fn test_bounded_channel_size() {
// Create a channel with size 2
let sender = EventSender::new(2);
// Create a listener
let mut listener = sender.new_listener();
// Broadcast 3 events, which exceeds the channel size
sender.notify("event4");
sender.notify("event5");
sender.notify("event6");
// Only the last two should be received due to the size limit
let received_event1 = listener.next().await;
let received_event2 = listener.next().await;
assert_eq!(received_event1, Some("event5"));
assert_eq!(received_event2, Some("event6"));
}
#[tokio::test]
async fn test_event_listener_timeout() {
let sender = EventSender::default();
let mut listener = sender.new_listener();
// Broadcast an event asynchronously
task::spawn(async move {
tokio::time::sleep(Duration::from_millis(50)).await;
sender.notify("delayed_event");
});
// Use a timeout to ensure that the event is received within a certain time
let result = timeout(Duration::from_millis(100), listener.next()).await;
assert!(result.is_ok());
assert_eq!(result.unwrap(), Some("delayed_event"));
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/tokio-util/src/event_stream.rs | crates/tokio-util/src/event_stream.rs | //! Event streams related functionality.
use std::{
pin::Pin,
task::{Context, Poll},
};
use tokio_stream::Stream;
use tracing::warn;
/// Thin wrapper around tokio's `BroadcastStream` to allow skipping broadcast errors.
#[derive(Debug)]
pub struct EventStream<T> {
inner: tokio_stream::wrappers::BroadcastStream<T>,
}
impl<T> EventStream<T>
where
T: Clone + Send + 'static,
{
/// Creates a new `EventStream`.
pub fn new(receiver: tokio::sync::broadcast::Receiver<T>) -> Self {
let inner = tokio_stream::wrappers::BroadcastStream::new(receiver);
Self { inner }
}
}
impl<T> Stream for EventStream<T>
where
T: Clone + Send + 'static,
{
type Item = T;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
loop {
match Pin::new(&mut self.inner).poll_next(cx) {
Poll::Ready(Some(Ok(item))) => return Poll::Ready(Some(item)),
Poll::Ready(Some(Err(e))) => {
warn!("BroadcastStream lagged: {e:?}");
}
Poll::Ready(None) => return Poll::Ready(None),
Poll::Pending => return Poll::Pending,
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use tokio::sync::broadcast;
use tokio_stream::StreamExt;
#[tokio::test]
async fn test_event_stream_yields_items() {
let (tx, _) = broadcast::channel(16);
let my_stream = EventStream::new(tx.subscribe());
tx.send(1).unwrap();
tx.send(2).unwrap();
tx.send(3).unwrap();
// drop the sender to terminate the stream and allow collect to work.
drop(tx);
let items: Vec<i32> = my_stream.collect().await;
assert_eq!(items, vec![1, 2, 3]);
}
#[tokio::test]
async fn test_event_stream_skips_lag_errors() {
let (tx, _) = broadcast::channel(2);
let my_stream = EventStream::new(tx.subscribe());
let mut _rx2 = tx.subscribe();
let mut _rx3 = tx.subscribe();
tx.send(1).unwrap();
tx.send(2).unwrap();
tx.send(3).unwrap();
tx.send(4).unwrap(); // This will cause lag for the first subscriber
// drop the sender to terminate the stream and allow collect to work.
drop(tx);
// Ensure lag errors are skipped and only valid items are collected
let items: Vec<i32> = my_stream.collect().await;
assert_eq!(items, vec![3, 4]);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/tokio-util/src/ratelimit.rs | crates/tokio-util/src/ratelimit.rs | //! A rate limit implementation to enforce a specific rate.
use std::{
future::{poll_fn, Future},
pin::Pin,
task::{Context, Poll},
time::Duration,
};
use tokio::time::Sleep;
/// Given a [`Rate`] this type enforces a rate limit.
#[derive(Debug)]
pub struct RateLimit {
rate: Rate,
state: State,
sleep: Pin<Box<Sleep>>,
}
// === impl RateLimit ===
impl RateLimit {
/// Create a new rate limiter
pub fn new(rate: Rate) -> Self {
let until = tokio::time::Instant::now();
let state = State::Ready { until, remaining: rate.limit() };
Self { rate, state, sleep: Box::pin(tokio::time::sleep_until(until)) }
}
/// Returns the configured limit of the [`RateLimit`]
pub const fn limit(&self) -> u64 {
self.rate.limit()
}
/// Checks if the [`RateLimit`] is ready to handle a new call
pub fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<()> {
match self.state {
State::Ready { .. } => return Poll::Ready(()),
State::Limited => {
if Pin::new(&mut self.sleep).poll(cx).is_pending() {
return Poll::Pending
}
}
}
self.state = State::Ready {
until: tokio::time::Instant::now() + self.rate.duration(),
remaining: self.rate.limit(),
};
Poll::Ready(())
}
/// Wait until the [`RateLimit`] is ready.
pub async fn wait(&mut self) {
poll_fn(|cx| self.poll_ready(cx)).await
}
/// Updates the [`RateLimit`] when a new call was triggered
///
/// # Panics
///
/// Panics if [`RateLimit::poll_ready`] returned [`Poll::Pending`]
pub fn tick(&mut self) {
match self.state {
State::Ready { mut until, remaining: mut rem } => {
let now = tokio::time::Instant::now();
// If the period has elapsed, reset it.
if now >= until {
until = now + self.rate.duration();
rem = self.rate.limit();
}
if rem > 1 {
rem -= 1;
self.state = State::Ready { until, remaining: rem };
} else {
// rate limited until elapsed
self.sleep.as_mut().reset(until);
self.state = State::Limited;
}
}
State::Limited => panic!("RateLimit limited; poll_ready must be called first"),
}
}
}
/// Tracks the state of the [`RateLimit`]
#[derive(Debug)]
enum State {
/// Currently limited
Limited,
Ready {
until: tokio::time::Instant,
remaining: u64,
},
}
/// A rate of requests per time period.
#[derive(Debug, Copy, Clone)]
pub struct Rate {
limit: u64,
duration: Duration,
}
impl Rate {
/// Create a new [Rate] with the given `limit/duration` ratio.
pub const fn new(limit: u64, duration: Duration) -> Self {
Self { limit, duration }
}
const fn limit(&self) -> u64 {
self.limit
}
const fn duration(&self) -> Duration {
self.duration
}
}
#[cfg(test)]
mod tests {
use super::*;
use tokio::time;
#[tokio::test]
async fn test_rate_limit() {
let mut limit = RateLimit::new(Rate::new(2, Duration::from_millis(500)));
poll_fn(|cx| {
assert!(limit.poll_ready(cx).is_ready());
Poll::Ready(())
})
.await;
limit.tick();
poll_fn(|cx| {
assert!(limit.poll_ready(cx).is_ready());
Poll::Ready(())
})
.await;
limit.tick();
poll_fn(|cx| {
assert!(limit.poll_ready(cx).is_pending());
Poll::Ready(())
})
.await;
tokio::time::sleep(limit.rate.duration).await;
poll_fn(|cx| {
assert!(limit.poll_ready(cx).is_ready());
Poll::Ready(())
})
.await;
}
#[tokio::test]
async fn test_rate_limit_initialization() {
let rate = Rate::new(5, Duration::from_secs(1));
let limit = RateLimit::new(rate);
// Verify the limit is correctly set
assert_eq!(limit.limit(), 5);
}
#[tokio::test]
async fn test_rate_limit_allows_within_limit() {
let mut limit = RateLimit::new(Rate::new(3, Duration::from_millis(1)));
// Check that the rate limiter is ready initially
for _ in 0..3 {
poll_fn(|cx| {
// Should be ready within the limit
assert!(limit.poll_ready(cx).is_ready());
Poll::Ready(())
})
.await;
// Signal that a request has been made
limit.tick();
}
// After 3 requests, it should be pending (rate limit hit)
poll_fn(|cx| {
// Exceeded limit, should now be limited
assert!(limit.poll_ready(cx).is_pending());
Poll::Ready(())
})
.await;
}
#[tokio::test]
async fn test_rate_limit_enforces_wait_after_limit() {
let mut limit = RateLimit::new(Rate::new(2, Duration::from_millis(500)));
// Consume the limit
for _ in 0..2 {
poll_fn(|cx| {
assert!(limit.poll_ready(cx).is_ready());
Poll::Ready(())
})
.await;
limit.tick();
}
// Should now be limited (pending)
poll_fn(|cx| {
assert!(limit.poll_ready(cx).is_pending());
Poll::Ready(())
})
.await;
// Wait until the rate period elapses
time::sleep(limit.rate.duration()).await;
// Now it should be ready again after the wait
poll_fn(|cx| {
assert!(limit.poll_ready(cx).is_ready());
Poll::Ready(())
})
.await;
}
#[tokio::test]
async fn test_wait_method_awaits_readiness() {
let mut limit = RateLimit::new(Rate::new(1, Duration::from_millis(500)));
poll_fn(|cx| {
assert!(limit.poll_ready(cx).is_ready());
Poll::Ready(())
})
.await;
limit.tick();
// The limit should now be exceeded
poll_fn(|cx| {
assert!(limit.poll_ready(cx).is_pending());
Poll::Ready(())
})
.await;
// The `wait` method should block until the rate period elapses
limit.wait().await;
// After `wait`, it should now be ready
poll_fn(|cx| {
assert!(limit.poll_ready(cx).is_ready());
Poll::Ready(())
})
.await;
}
#[tokio::test]
#[should_panic(expected = "RateLimit limited; poll_ready must be called first")]
async fn test_tick_panics_when_limited() {
let mut limit = RateLimit::new(Rate::new(1, Duration::from_secs(1)));
poll_fn(|cx| {
assert!(limit.poll_ready(cx).is_ready());
Poll::Ready(())
})
.await;
// Consume the limit
limit.tick();
// Attempting to tick again without poll_ready being ready should panic
limit.tick();
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/consensus/debug-client/src/lib.rs | crates/consensus/debug-client/src/lib.rs | //! Debug consensus client.
//!
//! This is a worker that sends FCUs and new payloads by fetching recent blocks from an external
//! provider like Etherscan or an RPC endpoint. This allows to quickly test the execution client
//! without running a consensus node.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
mod client;
mod providers;
pub use client::{BlockProvider, DebugConsensusClient};
pub use providers::{EtherscanBlockProvider, RpcBlockProvider};
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/consensus/debug-client/src/client.rs | crates/consensus/debug-client/src/client.rs | use alloy_consensus::Sealable;
use alloy_primitives::B256;
use reth_node_api::{
BuiltPayload, ConsensusEngineHandle, EngineApiMessageVersion, ExecutionPayload, NodePrimitives,
PayloadTypes,
};
use reth_primitives_traits::{Block, SealedBlock};
use reth_tracing::tracing::warn;
use ringbuffer::{AllocRingBuffer, RingBuffer};
use std::future::Future;
use tokio::sync::mpsc;
/// Supplies consensus client with new blocks sent in `tx` and a callback to find specific blocks
/// by number to fetch past finalized and safe blocks.
#[auto_impl::auto_impl(&, Arc, Box)]
pub trait BlockProvider: Send + Sync + 'static {
/// The block type.
type Block: Block;
/// Runs a block provider to send new blocks to the given sender.
///
/// Note: This is expected to be spawned in a separate task, and as such it should ignore
/// errors.
fn subscribe_blocks(&self, tx: mpsc::Sender<Self::Block>) -> impl Future<Output = ()> + Send;
/// Get a past block by number.
fn get_block(
&self,
block_number: u64,
) -> impl Future<Output = eyre::Result<Self::Block>> + Send;
/// Get previous block hash using previous block hash buffer. If it isn't available (buffer
/// started more recently than `offset`), fetch it using `get_block`.
fn get_or_fetch_previous_block(
&self,
previous_block_hashes: &AllocRingBuffer<B256>,
current_block_number: u64,
offset: usize,
) -> impl Future<Output = eyre::Result<B256>> + Send {
async move {
let stored_hash = previous_block_hashes
.len()
.checked_sub(offset)
.and_then(|index| previous_block_hashes.get(index));
if let Some(hash) = stored_hash {
return Ok(*hash);
}
// Return zero hash if the chain isn't long enough to have the block at the offset.
let previous_block_number = match current_block_number.checked_sub(offset as u64) {
Some(number) => number,
None => return Ok(B256::default()),
};
let block = self.get_block(previous_block_number).await?;
Ok(block.header().hash_slow())
}
}
}
/// Debug consensus client that sends FCUs and new payloads using recent blocks from an external
/// provider like Etherscan or an RPC endpoint.
#[derive(Debug)]
pub struct DebugConsensusClient<P: BlockProvider, T: PayloadTypes> {
/// Handle to execution client.
engine_handle: ConsensusEngineHandle<T>,
/// Provider to get consensus blocks from.
block_provider: P,
}
impl<P: BlockProvider, T: PayloadTypes> DebugConsensusClient<P, T> {
/// Create a new debug consensus client with the given handle to execution
/// client and block provider.
pub const fn new(engine_handle: ConsensusEngineHandle<T>, block_provider: P) -> Self {
Self { engine_handle, block_provider }
}
}
impl<P, T> DebugConsensusClient<P, T>
where
P: BlockProvider + Clone,
T: PayloadTypes<BuiltPayload: BuiltPayload<Primitives: NodePrimitives<Block = P::Block>>>,
{
/// Spawn the client to start sending FCUs and new payloads by periodically fetching recent
/// blocks.
pub async fn run(self) {
let mut previous_block_hashes = AllocRingBuffer::new(64);
let mut block_stream = {
let (tx, rx) = mpsc::channel::<P::Block>(64);
let block_provider = self.block_provider.clone();
tokio::spawn(async move {
block_provider.subscribe_blocks(tx).await;
});
rx
};
while let Some(block) = block_stream.recv().await {
let payload = T::block_to_payload(SealedBlock::new_unhashed(block));
let block_hash = payload.block_hash();
let block_number = payload.block_number();
previous_block_hashes.push(block_hash);
// Send new events to execution client
let _ = self.engine_handle.new_payload(payload).await;
// Load previous block hashes. We're using (head - 32) and (head - 64) as the safe and
// finalized block hashes.
let safe_block_hash = self.block_provider.get_or_fetch_previous_block(
&previous_block_hashes,
block_number,
32,
);
let finalized_block_hash = self.block_provider.get_or_fetch_previous_block(
&previous_block_hashes,
block_number,
64,
);
let (safe_block_hash, finalized_block_hash) =
tokio::join!(safe_block_hash, finalized_block_hash);
let (safe_block_hash, finalized_block_hash) = match (
safe_block_hash,
finalized_block_hash,
) {
(Ok(safe_block_hash), Ok(finalized_block_hash)) => {
(safe_block_hash, finalized_block_hash)
}
(safe_block_hash, finalized_block_hash) => {
warn!(target: "consensus::debug-client", ?safe_block_hash, ?finalized_block_hash, "failed to fetch safe or finalized hash from etherscan");
continue;
}
};
let state = alloy_rpc_types_engine::ForkchoiceState {
head_block_hash: block_hash,
safe_block_hash,
finalized_block_hash,
};
let _ = self
.engine_handle
.fork_choice_updated(state, None, EngineApiMessageVersion::V3)
.await;
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/consensus/debug-client/src/providers/etherscan.rs | crates/consensus/debug-client/src/providers/etherscan.rs | use crate::BlockProvider;
use alloy_consensus::BlockHeader;
use alloy_eips::BlockNumberOrTag;
use alloy_json_rpc::{Response, ResponsePayload};
use reqwest::Client;
use reth_tracing::tracing::{debug, warn};
use serde::{de::DeserializeOwned, Serialize};
use std::{sync::Arc, time::Duration};
use tokio::{sync::mpsc, time::interval};
/// Block provider that fetches new blocks from Etherscan API.
#[derive(derive_more::Debug, Clone)]
pub struct EtherscanBlockProvider<RpcBlock, PrimitiveBlock> {
http_client: Client,
base_url: String,
api_key: String,
chain_id: u64,
interval: Duration,
#[debug(skip)]
convert: Arc<dyn Fn(RpcBlock) -> PrimitiveBlock + Send + Sync>,
}
impl<RpcBlock, PrimitiveBlock> EtherscanBlockProvider<RpcBlock, PrimitiveBlock>
where
RpcBlock: Serialize + DeserializeOwned,
{
/// Create a new Etherscan block provider with the given base URL and API key.
pub fn new(
base_url: String,
api_key: String,
chain_id: u64,
convert: impl Fn(RpcBlock) -> PrimitiveBlock + Send + Sync + 'static,
) -> Self {
Self {
http_client: Client::new(),
base_url,
api_key,
chain_id,
interval: Duration::from_secs(3),
convert: Arc::new(convert),
}
}
/// Sets the interval at which the provider fetches new blocks.
pub const fn with_interval(mut self, interval: Duration) -> Self {
self.interval = interval;
self
}
/// Load block using Etherscan API. Note: only `BlockNumberOrTag::Latest`,
/// `BlockNumberOrTag::Earliest`, `BlockNumberOrTag::Pending`, `BlockNumberOrTag::Number(u64)`
/// are supported.
pub async fn load_block(
&self,
block_number_or_tag: BlockNumberOrTag,
) -> eyre::Result<PrimitiveBlock> {
let tag = match block_number_or_tag {
BlockNumberOrTag::Number(num) => format!("{num:#02x}"),
tag => tag.to_string(),
};
let mut req = self.http_client.get(&self.base_url).query(&[
("module", "proxy"),
("action", "eth_getBlockByNumber"),
("tag", &tag),
("boolean", "true"),
("apikey", &self.api_key),
]);
if !self.base_url.contains("chainid=") {
// only append chainid if not part of the base url already
req = req.query(&[("chainid", &self.chain_id.to_string())]);
}
let resp = req.send().await?.text().await?;
debug!(target: "etherscan", %resp, "fetched block from etherscan");
let resp: Response<RpcBlock> = serde_json::from_str(&resp).inspect_err(|err| {
warn!(target: "etherscan", "Failed to parse block response from etherscan: {}", err);
})?;
let payload = resp.payload;
match payload {
ResponsePayload::Success(block) => Ok((self.convert)(block)),
ResponsePayload::Failure(err) => Err(eyre::eyre!("Failed to get block: {err}")),
}
}
}
impl<RpcBlock, PrimitiveBlock> BlockProvider for EtherscanBlockProvider<RpcBlock, PrimitiveBlock>
where
RpcBlock: Serialize + DeserializeOwned + 'static,
PrimitiveBlock: reth_primitives_traits::Block + 'static,
{
type Block = PrimitiveBlock;
async fn subscribe_blocks(&self, tx: mpsc::Sender<Self::Block>) {
let mut last_block_number: Option<u64> = None;
let mut interval = interval(self.interval);
loop {
interval.tick().await;
let block = match self.load_block(BlockNumberOrTag::Latest).await {
Ok(block) => block,
Err(err) => {
warn!(
target: "consensus::debug-client",
%err,
"Failed to fetch a block from Etherscan",
);
continue
}
};
let block_number = block.header().number();
if Some(block_number) == last_block_number {
continue;
}
if tx.send(block).await.is_err() {
// Channel closed.
break;
}
last_block_number = Some(block_number);
}
}
async fn get_block(&self, block_number: u64) -> eyre::Result<Self::Block> {
self.load_block(BlockNumberOrTag::Number(block_number)).await
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/consensus/debug-client/src/providers/mod.rs | crates/consensus/debug-client/src/providers/mod.rs | mod etherscan;
mod rpc;
pub use etherscan::EtherscanBlockProvider;
pub use rpc::RpcBlockProvider;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/consensus/debug-client/src/providers/rpc.rs | crates/consensus/debug-client/src/providers/rpc.rs | use crate::BlockProvider;
use alloy_consensus::BlockHeader;
use alloy_provider::{Network, Provider, ProviderBuilder};
use futures::StreamExt;
use reth_node_api::Block;
use reth_tracing::tracing::warn;
use std::sync::Arc;
use tokio::sync::mpsc::Sender;
/// Block provider that fetches new blocks from an RPC endpoint using a connection that supports
/// RPC subscriptions.
#[derive(derive_more::Debug, Clone)]
pub struct RpcBlockProvider<N: Network, PrimitiveBlock> {
#[debug(skip)]
provider: Arc<dyn Provider<N>>,
url: String,
#[debug(skip)]
convert: Arc<dyn Fn(N::BlockResponse) -> PrimitiveBlock + Send + Sync>,
}
impl<N: Network, PrimitiveBlock> RpcBlockProvider<N, PrimitiveBlock> {
/// Create a new RPC block provider with the given RPC URL.
pub async fn new(
rpc_url: &str,
convert: impl Fn(N::BlockResponse) -> PrimitiveBlock + Send + Sync + 'static,
) -> eyre::Result<Self> {
Ok(Self {
provider: Arc::new(ProviderBuilder::default().connect(rpc_url).await?),
url: rpc_url.to_string(),
convert: Arc::new(convert),
})
}
}
impl<N: Network, PrimitiveBlock> BlockProvider for RpcBlockProvider<N, PrimitiveBlock>
where
PrimitiveBlock: Block + 'static,
{
type Block = PrimitiveBlock;
async fn subscribe_blocks(&self, tx: Sender<Self::Block>) {
let mut stream = match self.provider.subscribe_blocks().await {
Ok(sub) => sub.into_stream(),
Err(err) => {
warn!(
target: "consensus::debug-client",
%err,
url=%self.url,
"Failed to subscribe to blocks",
);
return;
}
};
while let Some(header) = stream.next().await {
match self.get_block(header.number()).await {
Ok(block) => {
if tx.send(block).await.is_err() {
// Channel closed.
break;
}
}
Err(err) => {
warn!(
target: "consensus::debug-client",
%err,
url=%self.url,
"Failed to fetch a block",
);
}
}
}
}
async fn get_block(&self, block_number: u64) -> eyre::Result<Self::Block> {
let block = self
.provider
.get_block_by_number(block_number.into())
.full()
.await?
.ok_or_else(|| eyre::eyre!("block not found by number {}", block_number))?;
Ok((self.convert)(block))
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/consensus/common/src/lib.rs | crates/consensus/common/src/lib.rs | //! Commonly used consensus methods.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(feature = "std"), no_std)]
/// Collection of consensus validation methods.
pub mod validation;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/consensus/common/src/validation.rs | crates/consensus/common/src/validation.rs | //! Collection of methods for block validation.
use alloy_consensus::{
constants::MAXIMUM_EXTRA_DATA_SIZE, BlockHeader as _, EMPTY_OMMER_ROOT_HASH,
};
use alloy_eips::{eip4844::DATA_GAS_PER_BLOB, eip7840::BlobParams};
use reth_chainspec::{EthChainSpec, EthereumHardfork, EthereumHardforks};
use reth_consensus::ConsensusError;
use reth_primitives_traits::{
constants::{GAS_LIMIT_BOUND_DIVISOR, MAXIMUM_GAS_LIMIT_BLOCK, MINIMUM_GAS_LIMIT},
Block, BlockBody, BlockHeader, GotExpected, SealedBlock, SealedHeader,
};
/// The maximum RLP length of a block, defined in [EIP-7934](https://eips.ethereum.org/EIPS/eip-7934).
///
/// Calculated as `MAX_BLOCK_SIZE` - `SAFETY_MARGIN` where
/// `MAX_BLOCK_SIZE` = `10_485_760`
/// `SAFETY_MARGIN` = `2_097_152`
pub const MAX_RLP_BLOCK_SIZE: usize = 8_388_608;
/// Gas used needs to be less than gas limit. Gas used is going to be checked after execution.
#[inline]
pub fn validate_header_gas<H: BlockHeader>(header: &H) -> Result<(), ConsensusError> {
if header.gas_used() > header.gas_limit() {
return Err(ConsensusError::HeaderGasUsedExceedsGasLimit {
gas_used: header.gas_used(),
gas_limit: header.gas_limit(),
})
}
// Check that the gas limit is below the maximum allowed gas limit
if header.gas_limit() > MAXIMUM_GAS_LIMIT_BLOCK {
return Err(ConsensusError::HeaderGasLimitExceedsMax { gas_limit: header.gas_limit() })
}
Ok(())
}
/// Ensure the EIP-1559 base fee is set if the London hardfork is active.
#[inline]
pub fn validate_header_base_fee<H: BlockHeader, ChainSpec: EthereumHardforks>(
header: &H,
chain_spec: &ChainSpec,
) -> Result<(), ConsensusError> {
if chain_spec.is_london_active_at_block(header.number()) && header.base_fee_per_gas().is_none()
{
return Err(ConsensusError::BaseFeeMissing)
}
Ok(())
}
/// Validate that withdrawals are present in Shanghai
///
/// See [EIP-4895]: Beacon chain push withdrawals as operations
///
/// [EIP-4895]: https://eips.ethereum.org/EIPS/eip-4895
#[inline]
pub fn validate_shanghai_withdrawals<B: Block>(
block: &SealedBlock<B>,
) -> Result<(), ConsensusError> {
let withdrawals = block.body().withdrawals().ok_or(ConsensusError::BodyWithdrawalsMissing)?;
let withdrawals_root = alloy_consensus::proofs::calculate_withdrawals_root(withdrawals);
let header_withdrawals_root =
block.withdrawals_root().ok_or(ConsensusError::WithdrawalsRootMissing)?;
if withdrawals_root != *header_withdrawals_root {
return Err(ConsensusError::BodyWithdrawalsRootDiff(
GotExpected { got: withdrawals_root, expected: header_withdrawals_root }.into(),
));
}
Ok(())
}
/// Validate that blob gas is present in the block if Cancun is active.
///
/// See [EIP-4844]: Shard Blob Transactions
///
/// [EIP-4844]: https://eips.ethereum.org/EIPS/eip-4844
#[inline]
pub fn validate_cancun_gas<B: Block>(block: &SealedBlock<B>) -> Result<(), ConsensusError> {
// Check that the blob gas used in the header matches the sum of the blob gas used by each
// blob tx
let header_blob_gas_used = block.blob_gas_used().ok_or(ConsensusError::BlobGasUsedMissing)?;
let total_blob_gas = block.body().blob_gas_used();
if total_blob_gas != header_blob_gas_used {
return Err(ConsensusError::BlobGasUsedDiff(GotExpected {
got: header_blob_gas_used,
expected: total_blob_gas,
}));
}
Ok(())
}
/// Ensures the block response data matches the header.
///
/// This ensures the body response items match the header's hashes:
/// - ommer hash
/// - transaction root
/// - withdrawals root
pub fn validate_body_against_header<B, H>(body: &B, header: &H) -> Result<(), ConsensusError>
where
B: BlockBody,
H: BlockHeader,
{
let ommers_hash = body.calculate_ommers_root();
if Some(header.ommers_hash()) != ommers_hash {
return Err(ConsensusError::BodyOmmersHashDiff(
GotExpected {
got: ommers_hash.unwrap_or(EMPTY_OMMER_ROOT_HASH),
expected: header.ommers_hash(),
}
.into(),
))
}
let tx_root = body.calculate_tx_root();
if header.transactions_root() != tx_root {
return Err(ConsensusError::BodyTransactionRootDiff(
GotExpected { got: tx_root, expected: header.transactions_root() }.into(),
))
}
match (header.withdrawals_root(), body.calculate_withdrawals_root()) {
(Some(header_withdrawals_root), Some(withdrawals_root)) => {
if withdrawals_root != header_withdrawals_root {
return Err(ConsensusError::BodyWithdrawalsRootDiff(
GotExpected { got: withdrawals_root, expected: header_withdrawals_root }.into(),
))
}
}
(None, None) => {
// this is ok because we assume the fork is not active in this case
}
_ => return Err(ConsensusError::WithdrawalsRootUnexpected),
}
Ok(())
}
/// Validate a block without regard for state:
///
/// - Compares the ommer hash in the block header to the block body
/// - Compares the transactions root in the block header to the block body
/// - Pre-execution transaction validation
pub fn validate_block_pre_execution<B, ChainSpec>(
block: &SealedBlock<B>,
chain_spec: &ChainSpec,
) -> Result<(), ConsensusError>
where
B: Block,
ChainSpec: EthereumHardforks,
{
post_merge_hardfork_fields(block, chain_spec)?;
// Check transaction root
if let Err(error) = block.ensure_transaction_root_valid() {
return Err(ConsensusError::BodyTransactionRootDiff(error.into()))
}
Ok(())
}
/// Validates the ommers hash and other fork-specific fields.
///
/// These fork-specific validations are:
/// * EIP-4895 withdrawals validation, if shanghai is active based on the given chainspec. See more
/// information about the specific checks in [`validate_shanghai_withdrawals`].
/// * EIP-4844 blob gas validation, if cancun is active based on the given chainspec. See more
/// information about the specific checks in [`validate_cancun_gas`].
/// * EIP-7934 block size limit validation, if osaka is active based on the given chainspec.
pub fn post_merge_hardfork_fields<B, ChainSpec>(
block: &SealedBlock<B>,
chain_spec: &ChainSpec,
) -> Result<(), ConsensusError>
where
B: Block,
ChainSpec: EthereumHardforks,
{
// Check ommers hash
let ommers_hash = block.body().calculate_ommers_root();
if Some(block.ommers_hash()) != ommers_hash {
return Err(ConsensusError::BodyOmmersHashDiff(
GotExpected {
got: ommers_hash.unwrap_or(EMPTY_OMMER_ROOT_HASH),
expected: block.ommers_hash(),
}
.into(),
))
}
// EIP-4895: Beacon chain push withdrawals as operations
if chain_spec.is_shanghai_active_at_timestamp(block.timestamp_seconds()) {
validate_shanghai_withdrawals(block)?;
}
if chain_spec.is_cancun_active_at_timestamp(block.timestamp_seconds()) {
validate_cancun_gas(block)?;
}
if chain_spec.is_osaka_active_at_timestamp(block.timestamp_seconds()) &&
block.rlp_length() > MAX_RLP_BLOCK_SIZE
{
return Err(ConsensusError::BlockTooLarge {
rlp_length: block.rlp_length(),
max_rlp_length: MAX_RLP_BLOCK_SIZE,
})
}
Ok(())
}
/// Validates that the EIP-4844 header fields exist and conform to the spec. This ensures that:
///
/// * `blob_gas_used` exists as a header field
/// * `excess_blob_gas` exists as a header field
/// * `parent_beacon_block_root` exists as a header field
/// * `blob_gas_used` is a multiple of `DATA_GAS_PER_BLOB`
/// * `excess_blob_gas` is a multiple of `DATA_GAS_PER_BLOB`
/// * `blob_gas_used` doesn't exceed the max allowed blob gas based on the given params
///
/// Note: This does not enforce any restrictions on `blob_gas_used`
pub fn validate_4844_header_standalone<H: BlockHeader>(
header: &H,
blob_params: BlobParams,
) -> Result<(), ConsensusError> {
let blob_gas_used = header.blob_gas_used().ok_or(ConsensusError::BlobGasUsedMissing)?;
if header.parent_beacon_block_root().is_none() {
return Err(ConsensusError::ParentBeaconBlockRootMissing)
}
if !blob_gas_used.is_multiple_of(DATA_GAS_PER_BLOB) {
return Err(ConsensusError::BlobGasUsedNotMultipleOfBlobGasPerBlob {
blob_gas_used,
blob_gas_per_blob: DATA_GAS_PER_BLOB,
})
}
if blob_gas_used > blob_params.max_blob_gas_per_block() {
return Err(ConsensusError::BlobGasUsedExceedsMaxBlobGasPerBlock {
blob_gas_used,
max_blob_gas_per_block: blob_params.max_blob_gas_per_block(),
})
}
Ok(())
}
/// Validates the header's extra data according to the beacon consensus rules.
///
/// From yellow paper: extraData: An arbitrary byte array containing data relevant to this block.
/// This must be 32 bytes or fewer; formally Hx.
#[inline]
pub fn validate_header_extra_data<H: BlockHeader>(header: &H) -> Result<(), ConsensusError> {
let extra_data_len = header.extra_data().len();
if extra_data_len > MAXIMUM_EXTRA_DATA_SIZE {
Err(ConsensusError::ExtraDataExceedsMax { len: extra_data_len })
} else {
Ok(())
}
}
/// Validates against the parent hash and number.
///
/// This function ensures that the header block number is sequential and that the hash of the parent
/// header matches the parent hash in the header.
#[inline]
pub fn validate_against_parent_hash_number<H: BlockHeader>(
header: &H,
parent: &SealedHeader<H>,
) -> Result<(), ConsensusError> {
// Parent number is consistent.
if parent.number() + 1 != header.number() {
return Err(ConsensusError::ParentBlockNumberMismatch {
parent_block_number: parent.number(),
block_number: header.number(),
})
}
if parent.hash() != header.parent_hash() {
return Err(ConsensusError::ParentHashMismatch(
GotExpected { got: header.parent_hash(), expected: parent.hash() }.into(),
))
}
Ok(())
}
/// Validates the base fee against the parent and EIP-1559 rules.
#[inline]
pub fn validate_against_parent_eip1559_base_fee<ChainSpec: EthChainSpec + EthereumHardforks>(
header: &ChainSpec::Header,
parent: &ChainSpec::Header,
chain_spec: &ChainSpec,
) -> Result<(), ConsensusError> {
if chain_spec.is_london_active_at_block(header.number()) {
let base_fee = header.base_fee_per_gas().ok_or(ConsensusError::BaseFeeMissing)?;
let expected_base_fee = if chain_spec
.ethereum_fork_activation(EthereumHardfork::London)
.transitions_at_block(header.number())
{
alloy_eips::eip1559::INITIAL_BASE_FEE
} else {
chain_spec
.next_block_base_fee(parent, header.timestamp_seconds())
.ok_or(ConsensusError::BaseFeeMissing)?
};
if expected_base_fee != base_fee {
return Err(ConsensusError::BaseFeeDiff(GotExpected {
expected: expected_base_fee,
got: base_fee,
}))
}
}
Ok(())
}
/// Validates the timestamp against the parent to make sure it is in the past.
#[inline]
pub fn validate_against_parent_timestamp<H: BlockHeader>(
header: &H,
parent: &H,
) -> Result<(), ConsensusError> {
if header.timestamp() <= parent.timestamp() {
return Err(ConsensusError::TimestampIsInPast {
parent_timestamp: parent.timestamp_seconds(),
timestamp: header.timestamp_seconds(),
})
}
Ok(())
}
/// Validates gas limit against parent gas limit.
///
/// The maximum allowable difference between self and parent gas limits is determined by the
/// parent's gas limit divided by the [`GAS_LIMIT_BOUND_DIVISOR`].
#[inline]
pub fn validate_against_parent_gas_limit<
H: BlockHeader,
ChainSpec: EthChainSpec + EthereumHardforks,
>(
header: &SealedHeader<H>,
parent: &SealedHeader<H>,
chain_spec: &ChainSpec,
) -> Result<(), ConsensusError> {
// Determine the parent gas limit, considering elasticity multiplier on the London fork.
let parent_gas_limit = if !chain_spec.is_london_active_at_block(parent.number()) &&
chain_spec.is_london_active_at_block(header.number())
{
parent.gas_limit() *
chain_spec
.base_fee_params_at_timestamp(header.timestamp_seconds())
.elasticity_multiplier as u64
} else {
parent.gas_limit()
};
// Check for an increase in gas limit beyond the allowed threshold.
if header.gas_limit() > parent_gas_limit {
if header.gas_limit() - parent_gas_limit >= parent_gas_limit / GAS_LIMIT_BOUND_DIVISOR {
return Err(ConsensusError::GasLimitInvalidIncrease {
parent_gas_limit,
child_gas_limit: header.gas_limit(),
})
}
}
// Check for a decrease in gas limit beyond the allowed threshold.
else if parent_gas_limit - header.gas_limit() >= parent_gas_limit / GAS_LIMIT_BOUND_DIVISOR {
return Err(ConsensusError::GasLimitInvalidDecrease {
parent_gas_limit,
child_gas_limit: header.gas_limit(),
})
}
// Check if the self gas limit is below the minimum required limit.
else if header.gas_limit() < MINIMUM_GAS_LIMIT {
return Err(ConsensusError::GasLimitInvalidMinimum { child_gas_limit: header.gas_limit() })
}
Ok(())
}
/// Validates that the EIP-4844 header fields are correct with respect to the parent block. This
/// ensures that the `blob_gas_used` and `excess_blob_gas` fields exist in the child header, and
/// that the `excess_blob_gas` field matches the expected `excess_blob_gas` calculated from the
/// parent header fields.
pub fn validate_against_parent_4844<H: BlockHeader>(
header: &H,
parent: &H,
blob_params: BlobParams,
) -> Result<(), ConsensusError> {
// From [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#header-extension):
//
// > For the first post-fork block, both parent.blob_gas_used and parent.excess_blob_gas
// > are evaluated as 0.
//
// This means in the first post-fork block, calc_excess_blob_gas will return 0.
let parent_blob_gas_used = parent.blob_gas_used().unwrap_or(0);
let parent_excess_blob_gas = parent.excess_blob_gas().unwrap_or(0);
if header.blob_gas_used().is_none() {
return Err(ConsensusError::BlobGasUsedMissing)
}
let excess_blob_gas = header.excess_blob_gas().ok_or(ConsensusError::ExcessBlobGasMissing)?;
let parent_base_fee_per_gas = parent.base_fee_per_gas().unwrap_or(0);
let expected_excess_blob_gas = blob_params.next_block_excess_blob_gas_osaka(
parent_excess_blob_gas,
parent_blob_gas_used,
parent_base_fee_per_gas,
);
if expected_excess_blob_gas != excess_blob_gas {
return Err(ConsensusError::ExcessBlobGasDiff {
diff: GotExpected { got: excess_blob_gas, expected: expected_excess_blob_gas },
parent_excess_blob_gas,
parent_blob_gas_used,
})
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_consensus::{BlockBody, Header, TxEip4844};
use alloy_eips::eip4895::Withdrawals;
use alloy_primitives::{Address, Bytes, Signature, U256};
use rand::Rng;
use reth_chainspec::ChainSpecBuilder;
use reth_ethereum_primitives::{Transaction, TransactionSigned};
use reth_primitives_traits::proofs;
fn mock_blob_tx(nonce: u64, num_blobs: usize) -> TransactionSigned {
let mut rng = rand::rng();
let request = Transaction::Eip4844(TxEip4844 {
chain_id: 1u64,
nonce,
max_fee_per_gas: 0x28f000fff,
max_priority_fee_per_gas: 0x28f000fff,
max_fee_per_blob_gas: 0x7,
gas_limit: 10,
to: Address::default(),
value: U256::from(3_u64),
input: Bytes::from(vec![1, 2]),
access_list: Default::default(),
blob_versioned_hashes: std::iter::repeat_with(|| rng.random())
.take(num_blobs)
.collect(),
});
let signature = Signature::new(U256::default(), U256::default(), true);
TransactionSigned::new_unhashed(request, signature)
}
#[test]
fn cancun_block_incorrect_blob_gas_used() {
let chain_spec = ChainSpecBuilder::mainnet().cancun_activated().build();
// create a tx with 10 blobs
let transaction = mock_blob_tx(1, 10);
let header = Header {
base_fee_per_gas: Some(1337),
withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])),
blob_gas_used: Some(1),
transactions_root: proofs::calculate_transaction_root(std::slice::from_ref(
&transaction,
)),
..Default::default()
};
let body = BlockBody {
transactions: vec![transaction],
ommers: vec![],
withdrawals: Some(Withdrawals::default()),
};
let block = SealedBlock::seal_slow(alloy_consensus::Block { header, body });
// 10 blobs times the blob gas per blob.
let expected_blob_gas_used = 10 * DATA_GAS_PER_BLOB;
// validate blob, it should fail blob gas used validation
assert_eq!(
validate_block_pre_execution(&block, &chain_spec),
Err(ConsensusError::BlobGasUsedDiff(GotExpected {
got: 1,
expected: expected_blob_gas_used
}))
);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/consensus/consensus/src/lib.rs | crates/consensus/consensus/src/lib.rs | //! Consensus protocol functions
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(feature = "std"), no_std)]
extern crate alloc;
use alloc::{fmt::Debug, string::String, vec::Vec};
use alloy_consensus::Header;
use alloy_primitives::{BlockHash, BlockNumber, Bloom, B256};
use reth_execution_types::BlockExecutionResult;
use reth_primitives_traits::{
constants::{MAXIMUM_GAS_LIMIT_BLOCK, MINIMUM_GAS_LIMIT},
transaction::error::InvalidTransactionError,
Block, GotExpected, GotExpectedBoxed, NodePrimitives, RecoveredBlock, SealedBlock,
SealedHeader,
};
/// A consensus implementation that does nothing.
pub mod noop;
#[cfg(any(test, feature = "test-utils"))]
/// test helpers for mocking consensus
pub mod test_utils;
/// [`Consensus`] implementation which knows full node primitives and is able to validation block's
/// execution outcome.
#[auto_impl::auto_impl(&, Arc)]
pub trait FullConsensus<N: NodePrimitives>: Consensus<N::Block> {
/// Validate a block considering world state, i.e. things that can not be checked before
/// execution.
///
/// See the Yellow Paper sections 4.3.2 "Holistic Validity".
///
/// Note: validating blocks does not include other validations of the Consensus
fn validate_block_post_execution(
&self,
block: &RecoveredBlock<N::Block>,
result: &BlockExecutionResult<N::Receipt>,
) -> Result<(), ConsensusError>;
}
/// Consensus is a protocol that chooses canonical chain.
#[auto_impl::auto_impl(&, Arc)]
pub trait Consensus<B: Block>: HeaderValidator<B::Header> {
/// The error type related to consensus.
type Error;
/// Ensures that body field values match the header.
fn validate_body_against_header(
&self,
body: &B::Body,
header: &SealedHeader<B::Header>,
) -> Result<(), Self::Error>;
/// Validate a block disregarding world state, i.e. things that can be checked before sender
/// recovery and execution.
///
/// See the Yellow Paper sections 4.3.2 "Holistic Validity", 4.3.4 "Block Header Validity", and
/// 11.1 "Ommer Validation".
///
/// **This should not be called for the genesis block**.
///
/// Note: validating blocks does not include other validations of the Consensus
fn validate_block_pre_execution(&self, block: &SealedBlock<B>) -> Result<(), Self::Error>;
}
/// `HeaderValidator` is a protocol that validates headers and their relationships.
#[auto_impl::auto_impl(&, Arc)]
pub trait HeaderValidator<H = Header>: Debug + Send + Sync {
/// Validate if header is correct and follows consensus specification.
///
/// This is called on standalone header to check if all hashes are correct.
fn validate_header(&self, header: &SealedHeader<H>) -> Result<(), ConsensusError>;
/// Validate that the header information regarding parent are correct.
/// This checks the block number, timestamp, basefee and gas limit increment.
///
/// This is called before properties that are not in the header itself (like total difficulty)
/// have been computed.
///
/// **This should not be called for the genesis block**.
///
/// Note: Validating header against its parent does not include other `HeaderValidator`
/// validations.
fn validate_header_against_parent(
&self,
header: &SealedHeader<H>,
parent: &SealedHeader<H>,
) -> Result<(), ConsensusError>;
/// Validates the given headers
///
/// This ensures that the first header is valid on its own and all subsequent headers are valid
/// on its own and valid against its parent.
///
/// Note: this expects that the headers are in natural order (ascending block number)
fn validate_header_range(
&self,
headers: &[SealedHeader<H>],
) -> Result<(), HeaderConsensusError<H>>
where
H: Clone,
{
if let Some((initial_header, remaining_headers)) = headers.split_first() {
self.validate_header(initial_header)
.map_err(|e| HeaderConsensusError(e, initial_header.clone()))?;
let mut parent = initial_header;
for child in remaining_headers {
self.validate_header(child).map_err(|e| HeaderConsensusError(e, child.clone()))?;
self.validate_header_against_parent(child, parent)
.map_err(|e| HeaderConsensusError(e, child.clone()))?;
parent = child;
}
}
Ok(())
}
}
/// Consensus Errors
#[derive(Debug, PartialEq, Eq, Clone, thiserror::Error)]
pub enum ConsensusError {
/// Error when the gas used in the header exceeds the gas limit.
#[error("block used gas ({gas_used}) is greater than gas limit ({gas_limit})")]
HeaderGasUsedExceedsGasLimit {
/// The gas used in the block header.
gas_used: u64,
/// The gas limit in the block header.
gas_limit: u64,
},
/// Error when the gas the gas limit is more than the maximum allowed.
#[error(
"header gas limit ({gas_limit}) exceed the maximum allowed gas limit ({MAXIMUM_GAS_LIMIT_BLOCK})"
)]
HeaderGasLimitExceedsMax {
/// The gas limit in the block header.
gas_limit: u64,
},
/// Error when block gas used doesn't match expected value
#[error("block gas used mismatch: {gas}; gas spent by each transaction: {gas_spent_by_tx:?}")]
BlockGasUsed {
/// The gas diff.
gas: GotExpected<u64>,
/// Gas spent by each transaction
gas_spent_by_tx: Vec<(u64, u64)>,
},
/// Error when the hash of block ommer is different from the expected hash.
#[error("mismatched block ommer hash: {0}")]
BodyOmmersHashDiff(GotExpectedBoxed<B256>),
/// Error when the state root in the block is different from the expected state root.
#[error("mismatched block state root: {0}")]
BodyStateRootDiff(GotExpectedBoxed<B256>),
/// Error when the transaction root in the block is different from the expected transaction
/// root.
#[error("mismatched block transaction root: {0}")]
BodyTransactionRootDiff(GotExpectedBoxed<B256>),
/// Error when the receipt root in the block is different from the expected receipt root.
#[error("receipt root mismatch: {0}")]
BodyReceiptRootDiff(GotExpectedBoxed<B256>),
/// Error when header bloom filter is different from the expected bloom filter.
#[error("header bloom filter mismatch: {0}")]
BodyBloomLogDiff(GotExpectedBoxed<Bloom>),
/// Error when the withdrawals root in the block is different from the expected withdrawals
/// root.
#[error("mismatched block withdrawals root: {0}")]
BodyWithdrawalsRootDiff(GotExpectedBoxed<B256>),
/// Error when the requests hash in the block is different from the expected requests
/// hash.
#[error("mismatched block requests hash: {0}")]
BodyRequestsHashDiff(GotExpectedBoxed<B256>),
/// Error when a block with a specific hash and number is already known.
#[error("block with [hash={hash}, number={number}] is already known")]
BlockKnown {
/// The hash of the known block.
hash: BlockHash,
/// The block number of the known block.
number: BlockNumber,
},
/// Error when the parent hash of a block is not known.
#[error("block parent [hash={hash}] is not known")]
ParentUnknown {
/// The hash of the unknown parent block.
hash: BlockHash,
},
/// Error when the block number does not match the parent block number.
#[error(
"block number {block_number} does not match parent block number {parent_block_number}"
)]
ParentBlockNumberMismatch {
/// The parent block number.
parent_block_number: BlockNumber,
/// The block number.
block_number: BlockNumber,
},
/// Error when the parent hash does not match the expected parent hash.
#[error("mismatched parent hash: {0}")]
ParentHashMismatch(GotExpectedBoxed<B256>),
/// Error when the block timestamp is in the future compared to our clock time.
#[error(
"block timestamp {timestamp} is in the future compared to our clock time {present_timestamp}"
)]
TimestampIsInFuture {
/// The block's timestamp.
timestamp: u64,
/// The current timestamp.
present_timestamp: u64,
},
/// Error when the base fee is missing.
#[error("base fee missing")]
BaseFeeMissing,
/// Error when there is a transaction signer recovery error.
#[error("transaction signer recovery error")]
TransactionSignerRecoveryError,
/// Error when the extra data length exceeds the maximum allowed.
#[error("extra data {len} exceeds max length")]
ExtraDataExceedsMax {
/// The length of the extra data.
len: usize,
},
/// Error when the difficulty after a merge is not zero.
#[error("difficulty after merge is not zero")]
TheMergeDifficultyIsNotZero,
/// Error when the nonce after a merge is not zero.
#[error("nonce after merge is not zero")]
TheMergeNonceIsNotZero,
/// Error when the ommer root after a merge is not empty.
#[error("ommer root after merge is not empty")]
TheMergeOmmerRootIsNotEmpty,
/// Error when the withdrawals root is missing.
#[error("missing withdrawals root")]
WithdrawalsRootMissing,
/// Error when the requests hash is missing.
#[error("missing requests hash")]
RequestsHashMissing,
/// Error when an unexpected withdrawals root is encountered.
#[error("unexpected withdrawals root")]
WithdrawalsRootUnexpected,
/// Error when an unexpected requests hash is encountered.
#[error("unexpected requests hash")]
RequestsHashUnexpected,
/// Error when withdrawals are missing.
#[error("missing withdrawals")]
BodyWithdrawalsMissing,
/// Error when requests are missing.
#[error("missing requests")]
BodyRequestsMissing,
/// Error when blob gas used is missing.
#[error("missing blob gas used")]
BlobGasUsedMissing,
/// Error when unexpected blob gas used is encountered.
#[error("unexpected blob gas used")]
BlobGasUsedUnexpected,
/// Error when excess blob gas is missing.
#[error("missing excess blob gas")]
ExcessBlobGasMissing,
/// Error when unexpected excess blob gas is encountered.
#[error("unexpected excess blob gas")]
ExcessBlobGasUnexpected,
/// Error when the parent beacon block root is missing.
#[error("missing parent beacon block root")]
ParentBeaconBlockRootMissing,
/// Error when an unexpected parent beacon block root is encountered.
#[error("unexpected parent beacon block root")]
ParentBeaconBlockRootUnexpected,
/// Error when blob gas used exceeds the maximum allowed.
#[error("blob gas used {blob_gas_used} exceeds maximum allowance {max_blob_gas_per_block}")]
BlobGasUsedExceedsMaxBlobGasPerBlock {
/// The actual blob gas used.
blob_gas_used: u64,
/// The maximum allowed blob gas per block.
max_blob_gas_per_block: u64,
},
/// Error when blob gas used is not a multiple of blob gas per blob.
#[error(
"blob gas used {blob_gas_used} is not a multiple of blob gas per blob {blob_gas_per_blob}"
)]
BlobGasUsedNotMultipleOfBlobGasPerBlob {
/// The actual blob gas used.
blob_gas_used: u64,
/// The blob gas per blob.
blob_gas_per_blob: u64,
},
/// Error when the blob gas used in the header does not match the expected blob gas used.
#[error("blob gas used mismatch: {0}")]
BlobGasUsedDiff(GotExpected<u64>),
/// Error for a transaction that violates consensus.
#[error(transparent)]
InvalidTransaction(InvalidTransactionError),
/// Error when the block's base fee is different from the expected base fee.
#[error("block base fee mismatch: {0}")]
BaseFeeDiff(GotExpected<u64>),
/// Error when there is an invalid excess blob gas.
#[error(
"invalid excess blob gas: {diff}; \
parent excess blob gas: {parent_excess_blob_gas}, \
parent blob gas used: {parent_blob_gas_used}"
)]
ExcessBlobGasDiff {
/// The excess blob gas diff.
diff: GotExpected<u64>,
/// The parent excess blob gas.
parent_excess_blob_gas: u64,
/// The parent blob gas used.
parent_blob_gas_used: u64,
},
/// Error when the child gas limit exceeds the maximum allowed increase.
#[error("child gas_limit {child_gas_limit} max increase is {parent_gas_limit}/1024")]
GasLimitInvalidIncrease {
/// The parent gas limit.
parent_gas_limit: u64,
/// The child gas limit.
child_gas_limit: u64,
},
/// Error indicating that the child gas limit is below the minimum allowed limit.
///
/// This error occurs when the child gas limit is less than the specified minimum gas limit.
#[error(
"child gas limit {child_gas_limit} is below the minimum allowed limit ({MINIMUM_GAS_LIMIT})"
)]
GasLimitInvalidMinimum {
/// The child gas limit.
child_gas_limit: u64,
},
/// Error indicating that the block gas limit is above the allowed maximum.
///
/// This error occurs when the gas limit is more than the specified maximum gas limit.
#[error("child gas limit {block_gas_limit} is above the maximum allowed limit ({MAXIMUM_GAS_LIMIT_BLOCK})")]
GasLimitInvalidBlockMaximum {
/// block gas limit.
block_gas_limit: u64,
},
/// Error when the child gas limit exceeds the maximum allowed decrease.
#[error("child gas_limit {child_gas_limit} max decrease is {parent_gas_limit}/1024")]
GasLimitInvalidDecrease {
/// The parent gas limit.
parent_gas_limit: u64,
/// The child gas limit.
child_gas_limit: u64,
},
/// Error when the block timestamp is in the past compared to the parent timestamp.
#[error(
"block timestamp {timestamp} is in the past compared to the parent timestamp {parent_timestamp}"
)]
TimestampIsInPast {
/// The parent block's timestamp.
parent_timestamp: u64,
/// The block's timestamp.
timestamp: u64,
},
/// Error when the block is too large.
#[error("block is too large: {rlp_length} > {max_rlp_length}")]
BlockTooLarge {
/// The actual RLP length of the block.
rlp_length: usize,
/// The maximum allowed RLP length.
max_rlp_length: usize,
},
/// Other, likely an injected L2 error.
#[error("{0}")]
Other(String),
}
impl ConsensusError {
/// Returns `true` if the error is a state root error.
pub const fn is_state_root_error(&self) -> bool {
matches!(self, Self::BodyStateRootDiff(_))
}
}
impl From<InvalidTransactionError> for ConsensusError {
fn from(value: InvalidTransactionError) -> Self {
Self::InvalidTransaction(value)
}
}
/// `HeaderConsensusError` combines a `ConsensusError` with the `SealedHeader` it relates to.
#[derive(thiserror::Error, Debug)]
#[error("Consensus error: {0}, Invalid header: {1:?}")]
pub struct HeaderConsensusError<H>(ConsensusError, SealedHeader<H>);
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/consensus/consensus/src/noop.rs | crates/consensus/consensus/src/noop.rs | use crate::{Consensus, ConsensusError, FullConsensus, HeaderValidator};
use alloc::sync::Arc;
use reth_execution_types::BlockExecutionResult;
use reth_primitives_traits::{Block, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader};
/// A Consensus implementation that does nothing.
#[derive(Debug, Copy, Clone, Default)]
#[non_exhaustive]
pub struct NoopConsensus;
impl NoopConsensus {
/// Creates an Arc instance of Self.
pub fn arc() -> Arc<Self> {
Arc::new(Self::default())
}
}
impl<H> HeaderValidator<H> for NoopConsensus {
fn validate_header(&self, _header: &SealedHeader<H>) -> Result<(), ConsensusError> {
Ok(())
}
fn validate_header_against_parent(
&self,
_header: &SealedHeader<H>,
_parent: &SealedHeader<H>,
) -> Result<(), ConsensusError> {
Ok(())
}
}
impl<B: Block> Consensus<B> for NoopConsensus {
type Error = ConsensusError;
fn validate_body_against_header(
&self,
_body: &B::Body,
_header: &SealedHeader<B::Header>,
) -> Result<(), Self::Error> {
Ok(())
}
fn validate_block_pre_execution(&self, _block: &SealedBlock<B>) -> Result<(), Self::Error> {
Ok(())
}
}
impl<N: NodePrimitives> FullConsensus<N> for NoopConsensus {
fn validate_block_post_execution(
&self,
_block: &RecoveredBlock<N::Block>,
_result: &BlockExecutionResult<N::Receipt>,
) -> Result<(), ConsensusError> {
Ok(())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/consensus/consensus/src/test_utils.rs | crates/consensus/consensus/src/test_utils.rs | use crate::{Consensus, ConsensusError, FullConsensus, HeaderValidator};
use core::sync::atomic::{AtomicBool, Ordering};
use reth_execution_types::BlockExecutionResult;
use reth_primitives_traits::{Block, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader};
/// Consensus engine implementation for testing
#[derive(Debug)]
pub struct TestConsensus {
/// Flag whether the header validation should purposefully fail
fail_validation: AtomicBool,
/// Separate flag for setting whether `validate_body_against_header` should fail. It is needed
/// for testing networking logic for which the body failing this check is getting completely
/// rejected while more high-level failures are handled by the sync logic.
fail_body_against_header: AtomicBool,
}
impl Default for TestConsensus {
fn default() -> Self {
Self {
fail_validation: AtomicBool::new(false),
fail_body_against_header: AtomicBool::new(false),
}
}
}
impl TestConsensus {
/// Get the failed validation flag.
pub fn fail_validation(&self) -> bool {
self.fail_validation.load(Ordering::SeqCst)
}
/// Update the validation flag.
pub fn set_fail_validation(&self, val: bool) {
self.fail_validation.store(val, Ordering::SeqCst);
self.fail_body_against_header.store(val, Ordering::SeqCst);
}
/// Returns the body validation flag.
pub fn fail_body_against_header(&self) -> bool {
self.fail_body_against_header.load(Ordering::SeqCst)
}
/// Update the body validation flag.
pub fn set_fail_body_against_header(&self, val: bool) {
self.fail_body_against_header.store(val, Ordering::SeqCst);
}
}
impl<N: NodePrimitives> FullConsensus<N> for TestConsensus {
fn validate_block_post_execution(
&self,
_block: &RecoveredBlock<N::Block>,
_result: &BlockExecutionResult<N::Receipt>,
) -> Result<(), ConsensusError> {
if self.fail_validation() {
Err(ConsensusError::BaseFeeMissing)
} else {
Ok(())
}
}
}
impl<B: Block> Consensus<B> for TestConsensus {
type Error = ConsensusError;
fn validate_body_against_header(
&self,
_body: &B::Body,
_header: &SealedHeader<B::Header>,
) -> Result<(), Self::Error> {
if self.fail_body_against_header() {
Err(ConsensusError::BaseFeeMissing)
} else {
Ok(())
}
}
fn validate_block_pre_execution(&self, _block: &SealedBlock<B>) -> Result<(), Self::Error> {
if self.fail_validation() {
Err(ConsensusError::BaseFeeMissing)
} else {
Ok(())
}
}
}
impl<H> HeaderValidator<H> for TestConsensus {
fn validate_header(&self, _header: &SealedHeader<H>) -> Result<(), ConsensusError> {
if self.fail_validation() {
Err(ConsensusError::BaseFeeMissing)
} else {
Ok(())
}
}
fn validate_header_against_parent(
&self,
_header: &SealedHeader<H>,
_parent: &SealedHeader<H>,
) -> Result<(), ConsensusError> {
if self.fail_validation() {
Err(ConsensusError::BaseFeeMissing)
} else {
Ok(())
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/config/src/config.rs | crates/config/src/config.rs | //! Configuration files.
use reth_network_types::{PeersConfig, SessionsConfig};
use reth_prune_types::PruneModes;
use reth_stages_types::ExecutionStageThresholds;
use std::{
path::{Path, PathBuf},
time::Duration,
};
use url::Url;
#[cfg(feature = "serde")]
const EXTENSION: &str = "toml";
/// The default prune block interval
pub const DEFAULT_BLOCK_INTERVAL: usize = 5;
/// Configuration for the reth node.
#[derive(Debug, Clone, Default, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "serde", serde(default))]
pub struct Config {
/// Configuration for each stage in the pipeline.
// TODO(onbjerg): Can we make this easier to maintain when we add/remove stages?
pub stages: StageConfig,
/// Configuration for pruning.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub prune: Option<PruneConfig>,
/// Configuration for the discovery service.
pub peers: PeersConfig,
/// Configuration for peer sessions.
pub sessions: SessionsConfig,
}
impl Config {
/// Sets the pruning configuration.
pub fn update_prune_config(&mut self, prune_config: PruneConfig) {
self.prune = Some(prune_config);
}
}
#[cfg(feature = "serde")]
impl Config {
/// Load a [`Config`] from a specified path.
///
/// A new configuration file is created with default values if none
/// exists.
pub fn from_path(path: impl AsRef<Path>) -> eyre::Result<Self> {
let path = path.as_ref();
match std::fs::read_to_string(path) {
Ok(cfg_string) => {
toml::from_str(&cfg_string).map_err(|e| eyre::eyre!("Failed to parse TOML: {e}"))
}
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent)
.map_err(|e| eyre::eyre!("Failed to create directory: {e}"))?;
}
let cfg = Self::default();
let s = toml::to_string_pretty(&cfg)
.map_err(|e| eyre::eyre!("Failed to serialize to TOML: {e}"))?;
std::fs::write(path, s)
.map_err(|e| eyre::eyre!("Failed to write configuration file: {e}"))?;
Ok(cfg)
}
Err(e) => Err(eyre::eyre!("Failed to load configuration: {e}")),
}
}
/// Returns the [`PeersConfig`] for the node.
///
/// If a peers file is provided, the basic nodes from the file are added to the configuration.
pub fn peers_config_with_basic_nodes_from_file(
&self,
peers_file: Option<&Path>,
) -> PeersConfig {
self.peers
.clone()
.with_basic_nodes_from_file(peers_file)
.unwrap_or_else(|_| self.peers.clone())
}
/// Save the configuration to toml file.
pub fn save(&self, path: &Path) -> Result<(), std::io::Error> {
if path.extension() != Some(std::ffi::OsStr::new(EXTENSION)) {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput,
format!("reth config file extension must be '{EXTENSION}'"),
));
}
std::fs::write(
path,
toml::to_string(self)
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e.to_string()))?,
)
}
}
/// Configuration for each stage in the pipeline.
#[derive(Debug, Clone, Default, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "serde", serde(default))]
pub struct StageConfig {
/// ERA stage configuration.
pub era: EraConfig,
/// Header stage configuration.
pub headers: HeadersConfig,
/// Body stage configuration.
pub bodies: BodiesConfig,
/// Sender Recovery stage configuration.
pub sender_recovery: SenderRecoveryConfig,
/// Execution stage configuration.
pub execution: ExecutionConfig,
/// Prune stage configuration.
pub prune: PruneStageConfig,
/// Account Hashing stage configuration.
pub account_hashing: HashingConfig,
/// Storage Hashing stage configuration.
pub storage_hashing: HashingConfig,
/// Merkle stage configuration.
pub merkle: MerkleConfig,
/// Transaction Lookup stage configuration.
pub transaction_lookup: TransactionLookupConfig,
/// Index Account History stage configuration.
pub index_account_history: IndexHistoryConfig,
/// Index Storage History stage configuration.
pub index_storage_history: IndexHistoryConfig,
/// Common ETL related configuration.
pub etl: EtlConfig,
}
impl StageConfig {
/// The highest threshold (in number of blocks) for switching between incremental and full
/// calculations across `MerkleStage`, `AccountHashingStage` and `StorageHashingStage`. This is
/// required to figure out if can prune or not changesets on subsequent pipeline runs during
/// `ExecutionStage`
pub fn execution_external_clean_threshold(&self) -> u64 {
self.merkle
.incremental_threshold
.max(self.account_hashing.clean_threshold)
.max(self.storage_hashing.clean_threshold)
}
}
/// ERA stage configuration.
#[derive(Debug, Clone, Default, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "serde", serde(default))]
pub struct EraConfig {
/// Path to a local directory where ERA1 files are located.
///
/// Conflicts with `url`.
pub path: Option<PathBuf>,
/// The base URL of an ERA1 file host to download from.
///
/// Conflicts with `path`.
pub url: Option<Url>,
/// Path to a directory where files downloaded from `url` will be stored until processed.
///
/// Required for `url`.
pub folder: Option<PathBuf>,
}
impl EraConfig {
/// Sets `folder` for temporary downloads as a directory called "era" inside `dir`.
pub fn with_datadir(mut self, dir: impl AsRef<Path>) -> Self {
self.folder = Some(dir.as_ref().join("era"));
self
}
}
/// Header stage configuration.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "serde", serde(default))]
pub struct HeadersConfig {
/// The maximum number of requests to send concurrently.
///
/// Default: 100
pub downloader_max_concurrent_requests: usize,
/// The minimum number of requests to send concurrently.
///
/// Default: 5
pub downloader_min_concurrent_requests: usize,
/// Maximum amount of responses to buffer internally.
/// The response contains multiple headers.
pub downloader_max_buffered_responses: usize,
/// The maximum number of headers to request from a peer at a time.
pub downloader_request_limit: u64,
/// The maximum number of headers to download before committing progress to the database.
pub commit_threshold: u64,
}
impl Default for HeadersConfig {
fn default() -> Self {
Self {
commit_threshold: 10_000,
downloader_request_limit: 1_000,
downloader_max_concurrent_requests: 100,
downloader_min_concurrent_requests: 5,
downloader_max_buffered_responses: 100,
}
}
}
/// Body stage configuration.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "serde", serde(default))]
pub struct BodiesConfig {
/// The batch size of non-empty blocks per one request
///
/// Default: 200
pub downloader_request_limit: u64,
/// The maximum number of block bodies returned at once from the stream
///
/// Default: `1_000`
pub downloader_stream_batch_size: usize,
/// The size of the internal block buffer in bytes.
///
/// Default: 2GB
pub downloader_max_buffered_blocks_size_bytes: usize,
/// The minimum number of requests to send concurrently.
///
/// Default: 5
pub downloader_min_concurrent_requests: usize,
/// The maximum number of requests to send concurrently.
/// This is equal to the max number of peers.
///
/// Default: 100
pub downloader_max_concurrent_requests: usize,
}
impl Default for BodiesConfig {
fn default() -> Self {
Self {
downloader_request_limit: 200,
downloader_stream_batch_size: 1_000,
downloader_max_buffered_blocks_size_bytes: 2 * 1024 * 1024 * 1024, // ~2GB
downloader_min_concurrent_requests: 5,
downloader_max_concurrent_requests: 100,
}
}
}
/// Sender recovery stage configuration.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "serde", serde(default))]
pub struct SenderRecoveryConfig {
/// The maximum number of transactions to process before committing progress to the database.
pub commit_threshold: u64,
}
impl Default for SenderRecoveryConfig {
fn default() -> Self {
Self { commit_threshold: 5_000_000 }
}
}
/// Execution stage configuration.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "serde", serde(default))]
pub struct ExecutionConfig {
/// The maximum number of blocks to process before the execution stage commits.
pub max_blocks: Option<u64>,
/// The maximum number of state changes to keep in memory before the execution stage commits.
pub max_changes: Option<u64>,
/// The maximum cumulative amount of gas to process before the execution stage commits.
pub max_cumulative_gas: Option<u64>,
/// The maximum time spent on blocks processing before the execution stage commits.
#[cfg_attr(
feature = "serde",
serde(
serialize_with = "humantime_serde::serialize",
deserialize_with = "deserialize_duration"
)
)]
pub max_duration: Option<Duration>,
}
impl Default for ExecutionConfig {
fn default() -> Self {
Self {
max_blocks: Some(500_000),
max_changes: Some(5_000_000),
// 50k full blocks of 30M gas
max_cumulative_gas: Some(30_000_000 * 50_000),
// 10 minutes
max_duration: Some(Duration::from_secs(10 * 60)),
}
}
}
impl From<ExecutionConfig> for ExecutionStageThresholds {
fn from(config: ExecutionConfig) -> Self {
Self {
max_blocks: config.max_blocks,
max_changes: config.max_changes,
max_cumulative_gas: config.max_cumulative_gas,
max_duration: config.max_duration,
}
}
}
/// Prune stage configuration.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "serde", serde(default))]
pub struct PruneStageConfig {
/// The maximum number of entries to prune before committing progress to the database.
pub commit_threshold: usize,
}
impl Default for PruneStageConfig {
fn default() -> Self {
Self { commit_threshold: 1_000_000 }
}
}
/// Hashing stage configuration.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "serde", serde(default))]
pub struct HashingConfig {
/// The threshold (in number of blocks) for switching between
/// incremental hashing and full hashing.
pub clean_threshold: u64,
/// The maximum number of entities to process before committing progress to the database.
pub commit_threshold: u64,
}
impl Default for HashingConfig {
fn default() -> Self {
Self { clean_threshold: 500_000, commit_threshold: 100_000 }
}
}
/// Merkle stage configuration.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "serde", serde(default))]
pub struct MerkleConfig {
/// The number of blocks we will run the incremental root method for when we are catching up on
/// the merkle stage for a large number of blocks.
///
/// When we are catching up for a large number of blocks, we can only run the incremental root
/// for a limited number of blocks, otherwise the incremental root method may cause the node to
/// OOM. This number determines how many blocks in a row we will run the incremental root
/// method for.
pub incremental_threshold: u64,
/// The threshold (in number of blocks) for switching from incremental trie building of changes
/// to whole rebuild.
pub rebuild_threshold: u64,
}
impl Default for MerkleConfig {
fn default() -> Self {
Self { incremental_threshold: 7_000, rebuild_threshold: 100_000 }
}
}
/// Transaction Lookup stage configuration.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "serde", serde(default))]
pub struct TransactionLookupConfig {
/// The maximum number of transactions to process before writing to disk.
pub chunk_size: u64,
}
impl Default for TransactionLookupConfig {
fn default() -> Self {
Self { chunk_size: 5_000_000 }
}
}
/// Common ETL related configuration.
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "serde", serde(default))]
pub struct EtlConfig {
/// Data directory where temporary files are created.
pub dir: Option<PathBuf>,
/// The maximum size in bytes of data held in memory before being flushed to disk as a file.
pub file_size: usize,
}
impl Default for EtlConfig {
fn default() -> Self {
Self { dir: None, file_size: Self::default_file_size() }
}
}
impl EtlConfig {
/// Creates an ETL configuration
pub const fn new(dir: Option<PathBuf>, file_size: usize) -> Self {
Self { dir, file_size }
}
/// Return default ETL directory from datadir path.
pub fn from_datadir(path: &Path) -> PathBuf {
path.join("etl-tmp")
}
/// Default size in bytes of data held in memory before being flushed to disk as a file.
pub const fn default_file_size() -> usize {
// 500 MB
500 * (1024 * 1024)
}
}
/// History stage configuration.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "serde", serde(default))]
pub struct IndexHistoryConfig {
/// The maximum number of blocks to process before committing progress to the database.
pub commit_threshold: u64,
}
impl Default for IndexHistoryConfig {
fn default() -> Self {
Self { commit_threshold: 100_000 }
}
}
/// Pruning configuration.
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "serde", serde(default))]
pub struct PruneConfig {
/// Minimum pruning interval measured in blocks.
pub block_interval: usize,
/// Pruning configuration for every part of the data that can be pruned.
#[cfg_attr(feature = "serde", serde(alias = "parts"))]
pub segments: PruneModes,
}
impl Default for PruneConfig {
fn default() -> Self {
Self { block_interval: DEFAULT_BLOCK_INTERVAL, segments: PruneModes::none() }
}
}
impl PruneConfig {
/// Returns whether there is any kind of receipt pruning configuration.
pub fn has_receipts_pruning(&self) -> bool {
self.segments.receipts.is_some() || !self.segments.receipts_log_filter.is_empty()
}
/// Merges another `PruneConfig` into this one, taking values from the other config if and only
/// if the corresponding value in this config is not set.
pub fn merge(&mut self, other: Option<Self>) {
let Some(other) = other else { return };
let Self {
block_interval,
segments:
PruneModes {
sender_recovery,
transaction_lookup,
receipts,
account_history,
storage_history,
bodies_history,
receipts_log_filter,
},
} = other;
// Merge block_interval, only update if it's the default interval
if self.block_interval == DEFAULT_BLOCK_INTERVAL {
self.block_interval = block_interval;
}
// Merge the various segment prune modes
self.segments.sender_recovery = self.segments.sender_recovery.or(sender_recovery);
self.segments.transaction_lookup = self.segments.transaction_lookup.or(transaction_lookup);
self.segments.receipts = self.segments.receipts.or(receipts);
self.segments.account_history = self.segments.account_history.or(account_history);
self.segments.storage_history = self.segments.storage_history.or(storage_history);
self.segments.bodies_history = self.segments.bodies_history.or(bodies_history);
if self.segments.receipts_log_filter.0.is_empty() && !receipts_log_filter.0.is_empty() {
self.segments.receipts_log_filter = receipts_log_filter;
}
}
}
/// Helper type to support older versions of Duration deserialization.
#[cfg(feature = "serde")]
fn deserialize_duration<'de, D>(deserializer: D) -> Result<Option<Duration>, D::Error>
where
D: serde::de::Deserializer<'de>,
{
#[derive(serde::Deserialize)]
#[serde(untagged)]
enum AnyDuration {
#[serde(deserialize_with = "humantime_serde::deserialize")]
Human(Option<Duration>),
Duration(Option<Duration>),
}
<AnyDuration as serde::Deserialize>::deserialize(deserializer).map(|d| match d {
AnyDuration::Human(duration) | AnyDuration::Duration(duration) => duration,
})
}
#[cfg(all(test, feature = "serde"))]
mod tests {
use super::{Config, EXTENSION};
use crate::PruneConfig;
use alloy_primitives::Address;
use reth_network_peers::TrustedPeer;
use reth_prune_types::{PruneMode, PruneModes, ReceiptsLogPruneConfig};
use std::{collections::BTreeMap, path::Path, str::FromStr, time::Duration};
fn with_tempdir(filename: &str, proc: fn(&std::path::Path)) {
let temp_dir = tempfile::tempdir().unwrap();
let config_path = temp_dir.path().join(filename).with_extension(EXTENSION);
proc(&config_path);
temp_dir.close().unwrap()
}
/// Run a test function with a temporary config path as fixture.
fn with_config_path(test_fn: fn(&Path)) {
// Create a temporary directory for the config file
let config_dir = tempfile::tempdir().expect("creating test fixture failed");
// Create the config file path
let config_path =
config_dir.path().join("example-app").join("example-config").with_extension("toml");
// Run the test function with the config path
test_fn(&config_path);
config_dir.close().expect("removing test fixture failed");
}
#[test]
fn test_load_path_works() {
with_config_path(|path| {
let config = Config::from_path(path).expect("load_path failed");
assert_eq!(config, Config::default());
})
}
#[test]
fn test_load_path_reads_existing_config() {
with_config_path(|path| {
let config = Config::default();
// Create the parent directory if it doesn't exist
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent).expect("Failed to create directories");
}
// Write the config to the file
std::fs::write(path, toml::to_string(&config).unwrap())
.expect("Failed to write config");
// Load the config from the file and compare it
let loaded = Config::from_path(path).expect("load_path failed");
assert_eq!(config, loaded);
})
}
#[test]
fn test_load_path_fails_on_invalid_toml() {
with_config_path(|path| {
let invalid_toml = "invalid toml data";
// Create the parent directory if it doesn't exist
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent).expect("Failed to create directories");
}
// Write invalid TOML data to the file
std::fs::write(path, invalid_toml).expect("Failed to write invalid TOML");
// Attempt to load the config should fail
let result = Config::from_path(path);
assert!(result.is_err());
})
}
#[test]
fn test_load_path_creates_directory_if_not_exists() {
with_config_path(|path| {
// Ensure the directory does not exist
let parent = path.parent().unwrap();
assert!(!parent.exists());
// Load the configuration, which should create the directory and a default config file
let config = Config::from_path(path).expect("load_path failed");
assert_eq!(config, Config::default());
// The directory and file should now exist
assert!(parent.exists());
assert!(path.exists());
});
}
#[test]
fn test_store_config() {
with_tempdir("config-store-test", |config_path| {
let config = Config::default();
std::fs::write(
config_path,
toml::to_string(&config).expect("Failed to serialize config"),
)
.expect("Failed to write config file");
})
}
#[test]
fn test_store_config_method() {
with_tempdir("config-store-test-method", |config_path| {
let config = Config::default();
config.save(config_path).expect("Failed to store config");
})
}
#[test]
fn test_load_config() {
with_tempdir("config-load-test", |config_path| {
let config = Config::default();
// Write the config to a file
std::fs::write(
config_path,
toml::to_string(&config).expect("Failed to serialize config"),
)
.expect("Failed to write config file");
// Load the config from the file
let loaded_config = Config::from_path(config_path).unwrap();
// Compare the loaded config with the original config
assert_eq!(config, loaded_config);
})
}
#[test]
fn test_load_execution_stage() {
with_tempdir("config-load-test", |config_path| {
let mut config = Config::default();
config.stages.execution.max_duration = Some(Duration::from_secs(10 * 60));
// Write the config to a file
std::fs::write(
config_path,
toml::to_string(&config).expect("Failed to serialize config"),
)
.expect("Failed to write config file");
// Load the config from the file
let loaded_config = Config::from_path(config_path).unwrap();
// Compare the loaded config with the original config
assert_eq!(config, loaded_config);
})
}
// ensures config deserialization is backwards compatible
#[test]
fn test_backwards_compatibility() {
let alpha_0_0_8 = r"#
[stages.headers]
downloader_max_concurrent_requests = 100
downloader_min_concurrent_requests = 5
downloader_max_buffered_responses = 100
downloader_request_limit = 1000
commit_threshold = 10000
[stages.bodies]
downloader_request_limit = 200
downloader_stream_batch_size = 1000
downloader_max_buffered_blocks_size_bytes = 2147483648
downloader_min_concurrent_requests = 5
downloader_max_concurrent_requests = 100
[stages.sender_recovery]
commit_threshold = 5000000
[stages.execution]
max_blocks = 500000
max_changes = 5000000
[stages.account_hashing]
clean_threshold = 500000
commit_threshold = 100000
[stages.storage_hashing]
clean_threshold = 500000
commit_threshold = 100000
[stages.merkle]
clean_threshold = 50000
[stages.transaction_lookup]
chunk_size = 5000000
[stages.index_account_history]
commit_threshold = 100000
[stages.index_storage_history]
commit_threshold = 100000
[peers]
refill_slots_interval = '1s'
trusted_nodes = []
connect_trusted_nodes_only = false
max_backoff_count = 5
ban_duration = '12h'
[peers.connection_info]
max_outbound = 100
max_inbound = 30
[peers.reputation_weights]
bad_message = -16384
bad_block = -16384
bad_transactions = -16384
already_seen_transactions = 0
timeout = -4096
bad_protocol = -2147483648
failed_to_connect = -25600
dropped = -4096
[peers.backoff_durations]
low = '30s'
medium = '3m'
high = '15m'
max = '1h'
[sessions]
session_command_buffer = 32
session_event_buffer = 260
[sessions.limits]
[sessions.initial_internal_request_timeout]
secs = 20
nanos = 0
[sessions.protocol_breach_request_timeout]
secs = 120
nanos = 0
[prune]
block_interval = 5
[prune.parts]
sender_recovery = { distance = 16384 }
transaction_lookup = 'full'
receipts = { before = 1920000 }
account_history = { distance = 16384 }
storage_history = { distance = 16384 }
[prune.parts.receipts_log_filter]
'0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48' = { before = 17000000 }
'0xdac17f958d2ee523a2206206994597c13d831ec7' = { distance = 1000 }
#";
let _conf: Config = toml::from_str(alpha_0_0_8).unwrap();
let alpha_0_0_11 = r"#
[prune.segments]
sender_recovery = { distance = 16384 }
transaction_lookup = 'full'
receipts = { before = 1920000 }
account_history = { distance = 16384 }
storage_history = { distance = 16384 }
[prune.segments.receipts_log_filter]
'0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48' = { before = 17000000 }
'0xdac17f958d2ee523a2206206994597c13d831ec7' = { distance = 1000 }
#";
let _conf: Config = toml::from_str(alpha_0_0_11).unwrap();
let alpha_0_0_18 = r"#
[stages.headers]
downloader_max_concurrent_requests = 100
downloader_min_concurrent_requests = 5
downloader_max_buffered_responses = 100
downloader_request_limit = 1000
commit_threshold = 10000
[stages.total_difficulty]
commit_threshold = 100000
[stages.bodies]
downloader_request_limit = 200
downloader_stream_batch_size = 1000
downloader_max_buffered_blocks_size_bytes = 2147483648
downloader_min_concurrent_requests = 5
downloader_max_concurrent_requests = 100
[stages.sender_recovery]
commit_threshold = 5000000
[stages.execution]
max_blocks = 500000
max_changes = 5000000
max_cumulative_gas = 1500000000000
[stages.execution.max_duration]
secs = 600
nanos = 0
[stages.account_hashing]
clean_threshold = 500000
commit_threshold = 100000
[stages.storage_hashing]
clean_threshold = 500000
commit_threshold = 100000
[stages.merkle]
clean_threshold = 50000
[stages.transaction_lookup]
commit_threshold = 5000000
[stages.index_account_history]
commit_threshold = 100000
[stages.index_storage_history]
commit_threshold = 100000
[peers]
refill_slots_interval = '5s'
trusted_nodes = []
connect_trusted_nodes_only = false
max_backoff_count = 5
ban_duration = '12h'
[peers.connection_info]
max_outbound = 100
max_inbound = 30
max_concurrent_outbound_dials = 10
[peers.reputation_weights]
bad_message = -16384
bad_block = -16384
bad_transactions = -16384
already_seen_transactions = 0
timeout = -4096
bad_protocol = -2147483648
failed_to_connect = -25600
dropped = -4096
bad_announcement = -1024
[peers.backoff_durations]
low = '30s'
medium = '3m'
high = '15m'
max = '1h'
[sessions]
session_command_buffer = 32
session_event_buffer = 260
[sessions.limits]
[sessions.initial_internal_request_timeout]
secs = 20
nanos = 0
[sessions.protocol_breach_request_timeout]
secs = 120
nanos = 0
#";
let conf: Config = toml::from_str(alpha_0_0_18).unwrap();
assert_eq!(conf.stages.execution.max_duration, Some(Duration::from_secs(10 * 60)));
let alpha_0_0_19 = r"#
[stages.headers]
downloader_max_concurrent_requests = 100
downloader_min_concurrent_requests = 5
downloader_max_buffered_responses = 100
downloader_request_limit = 1000
commit_threshold = 10000
[stages.total_difficulty]
commit_threshold = 100000
[stages.bodies]
downloader_request_limit = 200
downloader_stream_batch_size = 1000
downloader_max_buffered_blocks_size_bytes = 2147483648
downloader_min_concurrent_requests = 5
downloader_max_concurrent_requests = 100
[stages.sender_recovery]
commit_threshold = 5000000
[stages.execution]
max_blocks = 500000
max_changes = 5000000
max_cumulative_gas = 1500000000000
max_duration = '10m'
[stages.account_hashing]
clean_threshold = 500000
commit_threshold = 100000
[stages.storage_hashing]
clean_threshold = 500000
commit_threshold = 100000
[stages.merkle]
clean_threshold = 50000
[stages.transaction_lookup]
commit_threshold = 5000000
[stages.index_account_history]
commit_threshold = 100000
[stages.index_storage_history]
commit_threshold = 100000
[peers]
refill_slots_interval = '5s'
trusted_nodes = []
connect_trusted_nodes_only = false
max_backoff_count = 5
ban_duration = '12h'
[peers.connection_info]
max_outbound = 100
max_inbound = 30
max_concurrent_outbound_dials = 10
[peers.reputation_weights]
bad_message = -16384
bad_block = -16384
bad_transactions = -16384
already_seen_transactions = 0
timeout = -4096
bad_protocol = -2147483648
failed_to_connect = -25600
dropped = -4096
bad_announcement = -1024
[peers.backoff_durations]
low = '30s'
medium = '3m'
high = '15m'
max = '1h'
[sessions]
session_command_buffer = 32
session_event_buffer = 260
[sessions.limits]
[sessions.initial_internal_request_timeout]
secs = 20
nanos = 0
[sessions.protocol_breach_request_timeout]
secs = 120
nanos = 0
#";
let _conf: Config = toml::from_str(alpha_0_0_19).unwrap();
}
// ensures prune config deserialization is backwards compatible
#[test]
fn test_backwards_compatibility_prune_full() {
let s = r"#
[prune]
block_interval = 5
[prune.segments]
sender_recovery = { distance = 16384 }
transaction_lookup = 'full'
receipts = { distance = 16384 }
#";
let _conf: Config = toml::from_str(s).unwrap();
let s = r"#
[prune]
block_interval = 5
[prune.segments]
sender_recovery = { distance = 16384 }
transaction_lookup = 'full'
receipts = 'full'
#";
let err = toml::from_str::<Config>(s).unwrap_err().to_string();
assert!(err.contains("invalid value: string \"full\""), "{}", err);
}
#[test]
fn test_prune_config_merge() {
let mut config1 = PruneConfig {
block_interval: 5,
segments: PruneModes {
sender_recovery: Some(PruneMode::Full),
transaction_lookup: None,
receipts: Some(PruneMode::Distance(1000)),
account_history: None,
storage_history: Some(PruneMode::Before(5000)),
bodies_history: None,
receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([(
Address::random(),
PruneMode::Full,
)])),
},
};
let config2 = PruneConfig {
block_interval: 10,
segments: PruneModes {
sender_recovery: Some(PruneMode::Distance(500)),
transaction_lookup: Some(PruneMode::Full),
receipts: Some(PruneMode::Full),
account_history: Some(PruneMode::Distance(2000)),
storage_history: Some(PruneMode::Distance(3000)),
bodies_history: None,
receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([
(Address::random(), PruneMode::Distance(1000)),
(Address::random(), PruneMode::Before(2000)),
])),
},
};
let original_filter = config1.segments.receipts_log_filter.clone();
config1.merge(Some(config2));
// Check that the configuration has been merged. Any configuration present in config1
// should not be overwritten by config2
assert_eq!(config1.block_interval, 10);
assert_eq!(config1.segments.sender_recovery, Some(PruneMode::Full));
assert_eq!(config1.segments.transaction_lookup, Some(PruneMode::Full));
assert_eq!(config1.segments.receipts, Some(PruneMode::Distance(1000)));
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/config/src/lib.rs | crates/config/src/lib.rs | //! Standalone crate for Reth configuration types.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
pub mod config;
pub use config::{BodiesConfig, Config, PruneConfig};
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.