repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/trie/parallel/src/stats.rs | crates/trie/parallel/src/stats.rs | use derive_more::Deref;
use reth_trie::stats::{TrieStats, TrieTracker};
/// Trie stats.
#[derive(Deref, Clone, Copy, Debug)]
pub struct ParallelTrieStats {
#[deref]
trie: TrieStats,
precomputed_storage_roots: u64,
missed_leaves: u64,
}
impl ParallelTrieStats {
/// Return general trie stats.
pub const fn trie_stats(&self) -> TrieStats {
self.trie
}
/// The number of pre-computed storage roots.
pub const fn precomputed_storage_roots(&self) -> u64 {
self.precomputed_storage_roots
}
/// The number of added leaf nodes for which we did not precompute the storage root.
pub const fn missed_leaves(&self) -> u64 {
self.missed_leaves
}
}
/// Trie metrics tracker.
#[derive(Deref, Default, Debug)]
pub struct ParallelTrieTracker {
#[deref]
trie: TrieTracker,
precomputed_storage_roots: u64,
missed_leaves: u64,
}
impl ParallelTrieTracker {
/// Set the number of precomputed storage roots.
pub const fn set_precomputed_storage_roots(&mut self, count: u64) {
self.precomputed_storage_roots = count;
}
/// Increment the number of branches added to the hash builder during the calculation.
pub const fn inc_branch(&mut self) {
self.trie.inc_branch();
}
/// Increment the number of leaves added to the hash builder during the calculation.
pub const fn inc_leaf(&mut self) {
self.trie.inc_leaf();
}
/// Increment the number of added leaf nodes for which we did not precompute the storage root.
pub const fn inc_missed_leaves(&mut self) {
self.missed_leaves += 1;
}
/// Called when root calculation is finished to return trie statistics.
pub fn finish(self) -> ParallelTrieStats {
ParallelTrieStats {
trie: self.trie.finish(),
precomputed_storage_roots: self.precomputed_storage_roots,
missed_leaves: self.missed_leaves,
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/trie/parallel/src/lib.rs | crates/trie/parallel/src/lib.rs | //! Implementation of exotic state root computation approaches.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
mod storage_root_targets;
pub use storage_root_targets::StorageRootTargets;
/// Parallel trie calculation stats.
pub mod stats;
/// Implementation of parallel state root computation.
pub mod root;
/// Implementation of parallel proof computation.
pub mod proof;
pub mod proof_task;
/// Parallel state root metrics.
#[cfg(feature = "metrics")]
pub mod metrics;
/// Proof task manager metrics.
#[cfg(feature = "metrics")]
pub mod proof_task_metrics;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/trie/parallel/src/root.rs | crates/trie/parallel/src/root.rs | #[cfg(feature = "metrics")]
use crate::metrics::ParallelStateRootMetrics;
use crate::{stats::ParallelTrieTracker, storage_root_targets::StorageRootTargets};
use alloy_primitives::B256;
use alloy_rlp::{BufMut, Encodable};
use itertools::Itertools;
use reth_execution_errors::StorageRootError;
use reth_provider::{
providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, ProviderError,
};
use reth_storage_errors::db::DatabaseError;
use reth_trie::{
hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory},
node_iter::{TrieElement, TrieNodeIter},
trie_cursor::{InMemoryTrieCursorFactory, TrieCursorFactory},
updates::TrieUpdates,
walker::TrieWalker,
HashBuilder, Nibbles, StorageRoot, TrieInput, TRIE_ACCOUNT_RLP_MAX_SIZE,
};
use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory};
use std::{
collections::HashMap,
sync::{mpsc, Arc, OnceLock},
time::Duration,
};
use thiserror::Error;
use tokio::runtime::{Builder, Handle, Runtime};
use tracing::*;
/// Parallel incremental state root calculator.
///
/// The calculator starts off by launching tasks to compute storage roots.
/// Then, it immediately starts walking the state trie updating the necessary trie
/// nodes in the process. Upon encountering a leaf node, it will poll the storage root
/// task for the corresponding hashed address.
///
/// Internally, the calculator uses [`ConsistentDbView`] since
/// it needs to rely on database state saying the same until
/// the last transaction is open.
/// See docs of using [`ConsistentDbView`] for caveats.
///
/// Note: This implementation only serves as a fallback for the sparse trie-based
/// state root calculation. The sparse trie approach is more efficient as it avoids traversing
/// the entire trie, only operating on the modified parts.
#[derive(Debug)]
pub struct ParallelStateRoot<Factory> {
/// Consistent view of the database.
view: ConsistentDbView<Factory>,
/// Trie input.
input: TrieInput,
/// Parallel state root metrics.
#[cfg(feature = "metrics")]
metrics: ParallelStateRootMetrics,
}
impl<Factory> ParallelStateRoot<Factory> {
/// Create new parallel state root calculator.
pub fn new(view: ConsistentDbView<Factory>, input: TrieInput) -> Self {
Self {
view,
input,
#[cfg(feature = "metrics")]
metrics: ParallelStateRootMetrics::default(),
}
}
}
impl<Factory> ParallelStateRoot<Factory>
where
Factory: DatabaseProviderFactory<Provider: BlockReader> + Clone + Send + Sync + 'static,
{
/// Calculate incremental state root in parallel.
pub fn incremental_root(self) -> Result<B256, ParallelStateRootError> {
self.calculate(false).map(|(root, _)| root)
}
/// Calculate incremental state root with updates in parallel.
pub fn incremental_root_with_updates(
self,
) -> Result<(B256, TrieUpdates), ParallelStateRootError> {
self.calculate(true)
}
/// Computes the state root by calculating storage roots in parallel for modified accounts,
/// then walking the state trie to build the final state root hash.
fn calculate(
self,
retain_updates: bool,
) -> Result<(B256, TrieUpdates), ParallelStateRootError> {
let mut tracker = ParallelTrieTracker::default();
let trie_nodes_sorted = Arc::new(self.input.nodes.into_sorted());
let hashed_state_sorted = Arc::new(self.input.state.into_sorted());
let prefix_sets = self.input.prefix_sets.freeze();
let storage_root_targets = StorageRootTargets::new(
prefix_sets.account_prefix_set.iter().map(|nibbles| B256::from_slice(&nibbles.pack())),
prefix_sets.storage_prefix_sets,
);
// Pre-calculate storage roots in parallel for accounts which were changed.
tracker.set_precomputed_storage_roots(storage_root_targets.len() as u64);
debug!(target: "trie::parallel_state_root", len = storage_root_targets.len(), "pre-calculating storage roots");
let mut storage_roots = HashMap::with_capacity(storage_root_targets.len());
// Get runtime handle once outside the loop
let handle = get_runtime_handle();
for (hashed_address, prefix_set) in
storage_root_targets.into_iter().sorted_unstable_by_key(|(address, _)| *address)
{
let view = self.view.clone();
let hashed_state_sorted = hashed_state_sorted.clone();
let trie_nodes_sorted = trie_nodes_sorted.clone();
#[cfg(feature = "metrics")]
let metrics = self.metrics.storage_trie.clone();
let (tx, rx) = mpsc::sync_channel(1);
// Spawn a blocking task to calculate account's storage root from database I/O
drop(handle.spawn_blocking(move || {
let result = (|| -> Result<_, ParallelStateRootError> {
let provider_ro = view.provider_ro()?;
let trie_cursor_factory = InMemoryTrieCursorFactory::new(
DatabaseTrieCursorFactory::new(provider_ro.tx_ref()),
&trie_nodes_sorted,
);
let hashed_state = HashedPostStateCursorFactory::new(
DatabaseHashedCursorFactory::new(provider_ro.tx_ref()),
&hashed_state_sorted,
);
Ok(StorageRoot::new_hashed(
trie_cursor_factory,
hashed_state,
hashed_address,
prefix_set,
#[cfg(feature = "metrics")]
metrics,
)
.calculate(retain_updates)?)
})();
let _ = tx.send(result);
}));
storage_roots.insert(hashed_address, rx);
}
trace!(target: "trie::parallel_state_root", "calculating state root");
let mut trie_updates = TrieUpdates::default();
let provider_ro = self.view.provider_ro()?;
let trie_cursor_factory = InMemoryTrieCursorFactory::new(
DatabaseTrieCursorFactory::new(provider_ro.tx_ref()),
&trie_nodes_sorted,
);
let hashed_cursor_factory = HashedPostStateCursorFactory::new(
DatabaseHashedCursorFactory::new(provider_ro.tx_ref()),
&hashed_state_sorted,
);
let walker = TrieWalker::<_>::state_trie(
trie_cursor_factory.account_trie_cursor().map_err(ProviderError::Database)?,
prefix_sets.account_prefix_set,
)
.with_deletions_retained(retain_updates);
let mut account_node_iter = TrieNodeIter::state_trie(
walker,
hashed_cursor_factory.hashed_account_cursor().map_err(ProviderError::Database)?,
);
let mut hash_builder = HashBuilder::default().with_updates(retain_updates);
let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE);
while let Some(node) = account_node_iter.try_next().map_err(ProviderError::Database)? {
match node {
TrieElement::Branch(node) => {
hash_builder.add_branch(node.key, node.value, node.children_are_in_trie);
}
TrieElement::Leaf(hashed_address, account) => {
let storage_root_result = match storage_roots.remove(&hashed_address) {
Some(rx) => rx.recv().map_err(|_| {
ParallelStateRootError::StorageRoot(StorageRootError::Database(
DatabaseError::Other(format!(
"channel closed for {hashed_address}"
)),
))
})??,
// Since we do not store all intermediate nodes in the database, there might
// be a possibility of re-adding a non-modified leaf to the hash builder.
None => {
tracker.inc_missed_leaves();
StorageRoot::new_hashed(
trie_cursor_factory.clone(),
hashed_cursor_factory.clone(),
hashed_address,
Default::default(),
#[cfg(feature = "metrics")]
self.metrics.storage_trie.clone(),
)
.calculate(retain_updates)?
}
};
let (storage_root, _, updates) = match storage_root_result {
reth_trie::StorageRootProgress::Complete(root, _, updates) => (root, (), updates),
reth_trie::StorageRootProgress::Progress(..) => {
return Err(ParallelStateRootError::StorageRoot(
StorageRootError::Database(DatabaseError::Other(
"StorageRoot returned Progress variant in parallel trie calculation".to_string()
))
))
}
};
if retain_updates {
trie_updates.insert_storage_updates(hashed_address, updates);
}
account_rlp.clear();
let account = account.into_trie_account(storage_root);
account.encode(&mut account_rlp as &mut dyn BufMut);
let is_private = false; // account leaves are always public. Their storage leaves can be private.
hash_builder.add_leaf(
Nibbles::unpack(hashed_address),
&account_rlp,
is_private,
);
}
}
}
let root = hash_builder.root();
let removed_keys = account_node_iter.walker.take_removed_keys();
trie_updates.finalize(hash_builder, removed_keys, prefix_sets.destroyed_accounts);
let stats = tracker.finish();
#[cfg(feature = "metrics")]
self.metrics.record_state_trie(stats);
trace!(
target: "trie::parallel_state_root",
%root,
duration = ?stats.duration(),
branches_added = stats.branches_added(),
leaves_added = stats.leaves_added(),
missed_leaves = stats.missed_leaves(),
precomputed_storage_roots = stats.precomputed_storage_roots(),
"Calculated state root"
);
Ok((root, trie_updates))
}
}
/// Error during parallel state root calculation.
#[derive(Error, Debug)]
pub enum ParallelStateRootError {
/// Error while calculating storage root.
#[error(transparent)]
StorageRoot(#[from] StorageRootError),
/// Provider error.
#[error(transparent)]
Provider(#[from] ProviderError),
/// Other unspecified error.
#[error("{_0}")]
Other(String),
}
impl From<ParallelStateRootError> for ProviderError {
fn from(error: ParallelStateRootError) -> Self {
match error {
ParallelStateRootError::Provider(error) => error,
ParallelStateRootError::StorageRoot(StorageRootError::Database(error)) => {
Self::Database(error)
}
ParallelStateRootError::Other(other) => Self::Database(DatabaseError::Other(other)),
}
}
}
impl From<alloy_rlp::Error> for ParallelStateRootError {
fn from(error: alloy_rlp::Error) -> Self {
Self::Provider(ProviderError::Rlp(error))
}
}
/// Gets or creates a tokio runtime handle for spawning blocking tasks.
/// This ensures we always have a runtime available for I/O operations.
fn get_runtime_handle() -> Handle {
Handle::try_current().unwrap_or_else(|_| {
// Create a new runtime if no runtime is available
static RT: OnceLock<Runtime> = OnceLock::new();
let rt = RT.get_or_init(|| {
Builder::new_multi_thread()
// Keep the threads alive for at least the block time (12 seconds) plus buffer.
// This prevents the costly process of spawning new threads on every
// new block, and instead reuses the existing threads.
.thread_keep_alive(Duration::from_secs(15))
.build()
.expect("Failed to create tokio runtime")
});
rt.handle().clone()
})
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_primitives::{keccak256, Address, U256};
use rand::Rng;
use reth_primitives_traits::{Account, StorageEntry};
use reth_provider::{test_utils::create_test_provider_factory, HashingWriter};
use reth_trie::{test_utils, HashedPostState, HashedStorage};
use revm_state::FlaggedStorage;
#[tokio::test]
async fn random_parallel_root() {
let factory = create_test_provider_factory();
let consistent_view = ConsistentDbView::new(factory.clone(), None);
let mut rng = rand::rng();
let mut state = (0..100)
.map(|_| {
let address = Address::random();
let account =
Account { balance: U256::from(rng.random::<u64>()), ..Default::default() };
let mut storage = HashMap::<B256, alloy_primitives::FlaggedStorage>::default();
let has_storage = rng.random_bool(0.7);
if has_storage {
for _ in 0..100 {
storage.insert(
B256::from(U256::from(rng.random::<u64>())),
U256::from(rng.random::<u64>()).into(),
);
}
}
(address, (account, storage))
})
.collect::<HashMap<_, _>>();
{
let provider_rw = factory.provider_rw().unwrap();
provider_rw
.insert_account_for_hashing(
state.iter().map(|(address, (account, _))| (*address, Some(*account))),
)
.unwrap();
provider_rw
.insert_storage_for_hashing(state.iter().map(|(address, (_, storage))| {
(
*address,
storage
.iter()
.map(|(slot, value)| StorageEntry { key: *slot, value: *value }),
)
}))
.unwrap();
provider_rw.commit().unwrap();
}
assert_eq!(
ParallelStateRoot::new(consistent_view.clone(), Default::default())
.incremental_root()
.unwrap(),
test_utils::state_root(state.clone())
);
let mut hashed_state = HashedPostState::default();
for (address, (account, storage)) in &mut state {
let hashed_address = keccak256(address);
let should_update_account = rng.random_bool(0.5);
if should_update_account {
*account = Account { balance: U256::from(rng.random::<u64>()), ..*account };
hashed_state.accounts.insert(hashed_address, Some(*account));
}
let should_update_storage = rng.random_bool(0.3);
if should_update_storage {
for (slot, value) in storage.iter_mut() {
let hashed_slot = keccak256(slot);
*value = U256::from(rng.random::<u64>()).into();
hashed_state
.storages
.entry(hashed_address)
.or_insert_with(HashedStorage::default)
.storage
.insert(hashed_slot, FlaggedStorage::from(*value));
}
}
}
assert_eq!(
ParallelStateRoot::new(consistent_view, TrieInput::from_state(hashed_state))
.incremental_root()
.unwrap(),
test_utils::state_root(state)
);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/trie/parallel/src/metrics.rs | crates/trie/parallel/src/metrics.rs | use crate::stats::ParallelTrieStats;
use metrics::Histogram;
use reth_metrics::Metrics;
use reth_trie::{metrics::TrieRootMetrics, TrieType};
/// Parallel state root metrics.
#[derive(Debug)]
pub struct ParallelStateRootMetrics {
/// State trie metrics.
pub state_trie: TrieRootMetrics,
/// Parallel trie metrics.
pub parallel: ParallelTrieMetrics,
/// Storage trie metrics.
pub storage_trie: TrieRootMetrics,
}
impl Default for ParallelStateRootMetrics {
fn default() -> Self {
Self {
state_trie: TrieRootMetrics::new(TrieType::State),
parallel: ParallelTrieMetrics::new_with_labels(&[("type", "root")]),
storage_trie: TrieRootMetrics::new(TrieType::Storage),
}
}
}
impl ParallelStateRootMetrics {
/// Record state trie metrics
pub fn record_state_trie(&self, stats: ParallelTrieStats) {
self.state_trie.record(stats.trie_stats());
self.parallel.record(stats);
}
}
/// Parallel state root metrics.
#[derive(Metrics)]
#[metrics(scope = "trie_parallel")]
pub struct ParallelTrieMetrics {
/// The number of storage roots computed in parallel.
pub precomputed_storage_roots: Histogram,
/// The number of leaves for which we did not pre-compute the storage roots.
pub missed_leaves: Histogram,
}
impl ParallelTrieMetrics {
/// Record parallel trie metrics.
pub fn record(&self, stats: ParallelTrieStats) {
self.precomputed_storage_roots.record(stats.precomputed_storage_roots() as f64);
self.missed_leaves.record(stats.missed_leaves() as f64);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/trie/parallel/src/proof_task.rs | crates/trie/parallel/src/proof_task.rs | //! A Task that manages sending proof requests to a number of tasks that have longer-running
//! database transactions.
//!
//! The [`ProofTaskManager`] ensures that there are a max number of currently executing proof tasks,
//! and is responsible for managing the fixed number of database transactions created at the start
//! of the task.
//!
//! Individual [`ProofTaskTx`] instances manage a dedicated [`InMemoryTrieCursorFactory`] and
//! [`HashedPostStateCursorFactory`], which are each backed by a database transaction.
use crate::root::ParallelStateRootError;
use alloy_primitives::{map::B256Set, B256};
use reth_db_api::transaction::DbTx;
use reth_execution_errors::SparseTrieError;
use reth_provider::{
providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, FactoryTx,
ProviderResult,
};
use reth_trie::{
hashed_cursor::HashedPostStateCursorFactory,
prefix_set::TriePrefixSetsMut,
proof::{ProofTrieNodeProviderFactory, StorageProof},
trie_cursor::InMemoryTrieCursorFactory,
updates::TrieUpdatesSorted,
DecodedStorageMultiProof, HashedPostStateSorted, Nibbles,
};
use reth_trie_common::{
added_removed_keys::MultiAddedRemovedKeys,
prefix_set::{PrefixSet, PrefixSetMut},
};
use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory};
use reth_trie_sparse::provider::{RevealedNode, TrieNodeProvider, TrieNodeProviderFactory};
use std::{
collections::VecDeque,
sync::{
atomic::{AtomicUsize, Ordering},
mpsc::{channel, Receiver, SendError, Sender},
Arc,
},
time::Instant,
};
use tokio::runtime::Handle;
use tracing::debug;
#[cfg(feature = "metrics")]
use crate::proof_task_metrics::ProofTaskMetrics;
type StorageProofResult = Result<DecodedStorageMultiProof, ParallelStateRootError>;
type TrieNodeProviderResult = Result<Option<RevealedNode>, SparseTrieError>;
/// A task that manages sending multiproof requests to a number of tasks that have longer-running
/// database transactions
#[derive(Debug)]
pub struct ProofTaskManager<Factory: DatabaseProviderFactory> {
/// Max number of database transactions to create
max_concurrency: usize,
/// Number of database transactions created
total_transactions: usize,
/// Consistent view provider used for creating transactions on-demand
view: ConsistentDbView<Factory>,
/// Proof task context shared across all proof tasks
task_ctx: ProofTaskCtx,
/// Proof tasks pending execution
pending_tasks: VecDeque<ProofTaskKind>,
/// The underlying handle from which to spawn proof tasks
executor: Handle,
/// The proof task transactions, containing owned cursor factories that are reused for proof
/// calculation.
proof_task_txs: Vec<ProofTaskTx<FactoryTx<Factory>>>,
/// A receiver for new proof tasks.
proof_task_rx: Receiver<ProofTaskMessage<FactoryTx<Factory>>>,
/// A sender for sending back transactions.
tx_sender: Sender<ProofTaskMessage<FactoryTx<Factory>>>,
/// The number of active handles.
///
/// Incremented in [`ProofTaskManagerHandle::new`] and decremented in
/// [`ProofTaskManagerHandle::drop`].
active_handles: Arc<AtomicUsize>,
/// Metrics tracking blinded node fetches.
#[cfg(feature = "metrics")]
metrics: ProofTaskMetrics,
}
impl<Factory: DatabaseProviderFactory> ProofTaskManager<Factory> {
/// Creates a new [`ProofTaskManager`] with the given max concurrency, creating that number of
/// cursor factories.
///
/// Returns an error if the consistent view provider fails to create a read-only transaction.
pub fn new(
executor: Handle,
view: ConsistentDbView<Factory>,
task_ctx: ProofTaskCtx,
max_concurrency: usize,
) -> Self {
let (tx_sender, proof_task_rx) = channel();
Self {
max_concurrency,
total_transactions: 0,
view,
task_ctx,
pending_tasks: VecDeque::new(),
executor,
proof_task_txs: Vec::new(),
proof_task_rx,
tx_sender,
active_handles: Arc::new(AtomicUsize::new(0)),
#[cfg(feature = "metrics")]
metrics: ProofTaskMetrics::default(),
}
}
/// Returns a handle for sending new proof tasks to the [`ProofTaskManager`].
pub fn handle(&self) -> ProofTaskManagerHandle<FactoryTx<Factory>> {
ProofTaskManagerHandle::new(self.tx_sender.clone(), self.active_handles.clone())
}
}
impl<Factory> ProofTaskManager<Factory>
where
Factory: DatabaseProviderFactory<Provider: BlockReader> + 'static,
{
/// Inserts the task into the pending tasks queue.
pub fn queue_proof_task(&mut self, task: ProofTaskKind) {
self.pending_tasks.push_back(task);
}
/// Gets either the next available transaction, or creates a new one if all are in use and the
/// total number of transactions created is less than the max concurrency.
pub fn get_or_create_tx(&mut self) -> ProviderResult<Option<ProofTaskTx<FactoryTx<Factory>>>> {
if let Some(proof_task_tx) = self.proof_task_txs.pop() {
return Ok(Some(proof_task_tx));
}
// if we can create a new tx within our concurrency limits, create one on-demand
if self.total_transactions < self.max_concurrency {
let provider_ro = self.view.provider_ro()?;
let tx = provider_ro.into_tx();
self.total_transactions += 1;
return Ok(Some(ProofTaskTx::new(tx, self.task_ctx.clone(), self.total_transactions)));
}
Ok(None)
}
/// Spawns the next queued proof task on the executor with the given input, if there are any
/// transactions available.
///
/// This will return an error if a transaction must be created on-demand and the consistent view
/// provider fails.
pub fn try_spawn_next(&mut self) -> ProviderResult<()> {
let Some(task) = self.pending_tasks.pop_front() else { return Ok(()) };
let Some(proof_task_tx) = self.get_or_create_tx()? else {
// if there are no txs available, requeue the proof task
self.pending_tasks.push_front(task);
return Ok(())
};
let tx_sender = self.tx_sender.clone();
self.executor.spawn_blocking(move || match task {
ProofTaskKind::StorageProof(input, sender) => {
proof_task_tx.storage_proof(input, sender, tx_sender);
}
ProofTaskKind::BlindedAccountNode(path, sender) => {
proof_task_tx.blinded_account_node(path, sender, tx_sender);
}
ProofTaskKind::BlindedStorageNode(account, path, sender) => {
proof_task_tx.blinded_storage_node(account, path, sender, tx_sender);
}
});
Ok(())
}
/// Loops, managing the proof tasks, and sending new tasks to the executor.
pub fn run(mut self) -> ProviderResult<()> {
loop {
match self.proof_task_rx.recv() {
Ok(message) => match message {
ProofTaskMessage::QueueTask(task) => {
// Track metrics for blinded node requests
#[cfg(feature = "metrics")]
match &task {
ProofTaskKind::BlindedAccountNode(_, _) => {
self.metrics.account_nodes += 1;
}
ProofTaskKind::BlindedStorageNode(_, _, _) => {
self.metrics.storage_nodes += 1;
}
_ => {}
}
// queue the task
self.queue_proof_task(task)
}
ProofTaskMessage::Transaction(tx) => {
// return the transaction to the pool
self.proof_task_txs.push(tx);
}
ProofTaskMessage::Terminate => {
// Record metrics before terminating
#[cfg(feature = "metrics")]
self.metrics.record();
return Ok(())
}
},
// All senders are disconnected, so we can terminate
// However this should never happen, as this struct stores a sender
Err(_) => return Ok(()),
};
// try spawning the next task
self.try_spawn_next()?;
}
}
}
/// This contains all information shared between all storage proof instances.
#[derive(Debug)]
pub struct ProofTaskTx<Tx> {
/// The tx that is reused for proof calculations.
tx: Tx,
/// Trie updates, prefix sets, and state updates
task_ctx: ProofTaskCtx,
/// Identifier for the tx within the context of a single [`ProofTaskManager`], used only for
/// tracing.
id: usize,
}
impl<Tx> ProofTaskTx<Tx> {
/// Initializes a [`ProofTaskTx`] using the given transaction and a [`ProofTaskCtx`]. The id is
/// used only for tracing.
const fn new(tx: Tx, task_ctx: ProofTaskCtx, id: usize) -> Self {
Self { tx, task_ctx, id }
}
}
impl<Tx> ProofTaskTx<Tx>
where
Tx: DbTx,
{
fn create_factories(
&self,
) -> (
InMemoryTrieCursorFactory<'_, DatabaseTrieCursorFactory<'_, Tx>>,
HashedPostStateCursorFactory<'_, DatabaseHashedCursorFactory<'_, Tx>>,
) {
let trie_cursor_factory = InMemoryTrieCursorFactory::new(
DatabaseTrieCursorFactory::new(&self.tx),
&self.task_ctx.nodes_sorted,
);
let hashed_cursor_factory = HashedPostStateCursorFactory::new(
DatabaseHashedCursorFactory::new(&self.tx),
&self.task_ctx.state_sorted,
);
(trie_cursor_factory, hashed_cursor_factory)
}
/// Calculates a storage proof for the given hashed address, and desired prefix set.
fn storage_proof(
self,
input: StorageProofInput,
result_sender: Sender<StorageProofResult>,
tx_sender: Sender<ProofTaskMessage<Tx>>,
) {
debug!(
target: "trie::proof_task",
hashed_address=?input.hashed_address,
"Starting storage proof task calculation"
);
let (trie_cursor_factory, hashed_cursor_factory) = self.create_factories();
let multi_added_removed_keys = input
.multi_added_removed_keys
.unwrap_or_else(|| Arc::new(MultiAddedRemovedKeys::new()));
let added_removed_keys = multi_added_removed_keys.get_storage(&input.hashed_address);
let span = tracing::trace_span!(
target: "trie::proof_task",
"Storage proof calculation",
hashed_address=?input.hashed_address,
// Add a unique id because we often have parallel storage proof calculations for the
// same hashed address, and we want to differentiate them during trace analysis.
span_id=self.id,
);
let span_guard = span.enter();
let target_slots_len = input.target_slots.len();
let proof_start = Instant::now();
let raw_proof_result = StorageProof::new_hashed(
trie_cursor_factory,
hashed_cursor_factory,
input.hashed_address,
)
.with_prefix_set_mut(PrefixSetMut::from(input.prefix_set.iter().copied()))
.with_branch_node_masks(input.with_branch_node_masks)
.with_added_removed_keys(added_removed_keys)
.storage_multiproof(input.target_slots)
.map_err(|e| ParallelStateRootError::Other(e.to_string()));
drop(span_guard);
let decoded_result = raw_proof_result.and_then(|raw_proof| {
raw_proof.try_into().map_err(|e: alloy_rlp::Error| {
ParallelStateRootError::Other(format!(
"Failed to decode storage proof for {}: {}",
input.hashed_address, e
))
})
});
debug!(
target: "trie::proof_task",
hashed_address=?input.hashed_address,
prefix_set = ?input.prefix_set.len(),
target_slots = ?target_slots_len,
proof_time = ?proof_start.elapsed(),
"Completed storage proof task calculation"
);
// send the result back
if let Err(error) = result_sender.send(decoded_result) {
debug!(
target: "trie::proof_task",
hashed_address = ?input.hashed_address,
?error,
task_time = ?proof_start.elapsed(),
"Storage proof receiver is dropped, discarding the result"
);
}
// send the tx back
let _ = tx_sender.send(ProofTaskMessage::Transaction(self));
}
/// Retrieves blinded account node by path.
fn blinded_account_node(
self,
path: Nibbles,
result_sender: Sender<TrieNodeProviderResult>,
tx_sender: Sender<ProofTaskMessage<Tx>>,
) {
debug!(
target: "trie::proof_task",
?path,
"Starting blinded account node retrieval"
);
let (trie_cursor_factory, hashed_cursor_factory) = self.create_factories();
let blinded_provider_factory = ProofTrieNodeProviderFactory::new(
trie_cursor_factory,
hashed_cursor_factory,
self.task_ctx.prefix_sets.clone(),
);
let start = Instant::now();
let result = blinded_provider_factory.account_node_provider().trie_node(&path);
debug!(
target: "trie::proof_task",
?path,
elapsed = ?start.elapsed(),
"Completed blinded account node retrieval"
);
if let Err(error) = result_sender.send(result) {
tracing::error!(
target: "trie::proof_task",
?path,
?error,
"Failed to send blinded account node result"
);
}
// send the tx back
let _ = tx_sender.send(ProofTaskMessage::Transaction(self));
}
/// Retrieves blinded storage node of the given account by path.
fn blinded_storage_node(
self,
account: B256,
path: Nibbles,
result_sender: Sender<TrieNodeProviderResult>,
tx_sender: Sender<ProofTaskMessage<Tx>>,
) {
debug!(
target: "trie::proof_task",
?account,
?path,
"Starting blinded storage node retrieval"
);
let (trie_cursor_factory, hashed_cursor_factory) = self.create_factories();
let blinded_provider_factory = ProofTrieNodeProviderFactory::new(
trie_cursor_factory,
hashed_cursor_factory,
self.task_ctx.prefix_sets.clone(),
);
let start = Instant::now();
let result = blinded_provider_factory.storage_node_provider(account).trie_node(&path);
debug!(
target: "trie::proof_task",
?account,
?path,
elapsed = ?start.elapsed(),
"Completed blinded storage node retrieval"
);
if let Err(error) = result_sender.send(result) {
tracing::error!(
target: "trie::proof_task",
?account,
?path,
?error,
"Failed to send blinded storage node result"
);
}
// send the tx back
let _ = tx_sender.send(ProofTaskMessage::Transaction(self));
}
}
/// This represents an input for a storage proof.
#[derive(Debug)]
pub struct StorageProofInput {
/// The hashed address for which the proof is calculated.
hashed_address: B256,
/// The prefix set for the proof calculation.
prefix_set: PrefixSet,
/// The target slots for the proof calculation.
target_slots: B256Set,
/// Whether or not to collect branch node masks
with_branch_node_masks: bool,
/// Provided by the user to give the necessary context to retain extra proofs.
multi_added_removed_keys: Option<Arc<MultiAddedRemovedKeys>>,
}
impl StorageProofInput {
/// Creates a new [`StorageProofInput`] with the given hashed address, prefix set, and target
/// slots.
pub const fn new(
hashed_address: B256,
prefix_set: PrefixSet,
target_slots: B256Set,
with_branch_node_masks: bool,
multi_added_removed_keys: Option<Arc<MultiAddedRemovedKeys>>,
) -> Self {
Self {
hashed_address,
prefix_set,
target_slots,
with_branch_node_masks,
multi_added_removed_keys,
}
}
}
/// Data used for initializing cursor factories that is shared across all storage proof instances.
#[derive(Debug, Clone)]
pub struct ProofTaskCtx {
/// The sorted collection of cached in-memory intermediate trie nodes that can be reused for
/// computation.
nodes_sorted: Arc<TrieUpdatesSorted>,
/// The sorted in-memory overlay hashed state.
state_sorted: Arc<HashedPostStateSorted>,
/// The collection of prefix sets for the computation. Since the prefix sets _always_
/// invalidate the in-memory nodes, not all keys from `state_sorted` might be present here,
/// if we have cached nodes for them.
prefix_sets: Arc<TriePrefixSetsMut>,
}
impl ProofTaskCtx {
/// Creates a new [`ProofTaskCtx`] with the given sorted nodes and state.
pub const fn new(
nodes_sorted: Arc<TrieUpdatesSorted>,
state_sorted: Arc<HashedPostStateSorted>,
prefix_sets: Arc<TriePrefixSetsMut>,
) -> Self {
Self { nodes_sorted, state_sorted, prefix_sets }
}
}
/// Message used to communicate with [`ProofTaskManager`].
#[derive(Debug)]
pub enum ProofTaskMessage<Tx> {
/// A request to queue a proof task.
QueueTask(ProofTaskKind),
/// A returned database transaction.
Transaction(ProofTaskTx<Tx>),
/// A request to terminate the proof task manager.
Terminate,
}
/// Proof task kind.
///
/// When queueing a task using [`ProofTaskMessage::QueueTask`], this enum
/// specifies the type of proof task to be executed.
#[derive(Debug)]
pub enum ProofTaskKind {
/// A storage proof request.
StorageProof(StorageProofInput, Sender<StorageProofResult>),
/// A blinded account node request.
BlindedAccountNode(Nibbles, Sender<TrieNodeProviderResult>),
/// A blinded storage node request.
BlindedStorageNode(B256, Nibbles, Sender<TrieNodeProviderResult>),
}
/// A handle that wraps a single proof task sender that sends a terminate message on `Drop` if the
/// number of active handles went to zero.
#[derive(Debug)]
pub struct ProofTaskManagerHandle<Tx> {
/// The sender for the proof task manager.
sender: Sender<ProofTaskMessage<Tx>>,
/// The number of active handles.
active_handles: Arc<AtomicUsize>,
}
impl<Tx> ProofTaskManagerHandle<Tx> {
/// Creates a new [`ProofTaskManagerHandle`] with the given sender.
pub fn new(sender: Sender<ProofTaskMessage<Tx>>, active_handles: Arc<AtomicUsize>) -> Self {
active_handles.fetch_add(1, Ordering::SeqCst);
Self { sender, active_handles }
}
/// Queues a task to the proof task manager.
pub fn queue_task(&self, task: ProofTaskKind) -> Result<(), SendError<ProofTaskMessage<Tx>>> {
self.sender.send(ProofTaskMessage::QueueTask(task))
}
/// Terminates the proof task manager.
pub fn terminate(&self) {
let _ = self.sender.send(ProofTaskMessage::Terminate);
}
}
impl<Tx> Clone for ProofTaskManagerHandle<Tx> {
fn clone(&self) -> Self {
Self::new(self.sender.clone(), self.active_handles.clone())
}
}
impl<Tx> Drop for ProofTaskManagerHandle<Tx> {
fn drop(&mut self) {
// Decrement the number of active handles and terminate the manager if it was the last
// handle.
if self.active_handles.fetch_sub(1, Ordering::SeqCst) == 1 {
self.terminate();
}
}
}
impl<Tx: DbTx> TrieNodeProviderFactory for ProofTaskManagerHandle<Tx> {
type AccountNodeProvider = ProofTaskTrieNodeProvider<Tx>;
type StorageNodeProvider = ProofTaskTrieNodeProvider<Tx>;
fn account_node_provider(&self) -> Self::AccountNodeProvider {
ProofTaskTrieNodeProvider::AccountNode { sender: self.sender.clone() }
}
fn storage_node_provider(&self, account: B256) -> Self::StorageNodeProvider {
ProofTaskTrieNodeProvider::StorageNode { account, sender: self.sender.clone() }
}
}
/// Trie node provider for retrieving trie nodes by path.
#[derive(Debug)]
pub enum ProofTaskTrieNodeProvider<Tx> {
/// Blinded account trie node provider.
AccountNode {
/// Sender to the proof task.
sender: Sender<ProofTaskMessage<Tx>>,
},
/// Blinded storage trie node provider.
StorageNode {
/// Target account.
account: B256,
/// Sender to the proof task.
sender: Sender<ProofTaskMessage<Tx>>,
},
}
impl<Tx: DbTx> TrieNodeProvider for ProofTaskTrieNodeProvider<Tx> {
fn trie_node(&self, path: &Nibbles) -> Result<Option<RevealedNode>, SparseTrieError> {
let (tx, rx) = channel();
match self {
Self::AccountNode { sender } => {
let _ = sender.send(ProofTaskMessage::QueueTask(
ProofTaskKind::BlindedAccountNode(*path, tx),
));
}
Self::StorageNode { sender, account } => {
let _ = sender.send(ProofTaskMessage::QueueTask(
ProofTaskKind::BlindedStorageNode(*account, *path, tx),
));
}
}
rx.recv().unwrap()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/trie/parallel/src/storage_root_targets.rs | crates/trie/parallel/src/storage_root_targets.rs | use alloy_primitives::{map::B256Map, B256};
use derive_more::{Deref, DerefMut};
use reth_trie::prefix_set::PrefixSet;
/// Target accounts with corresponding prefix sets for storage root calculation.
#[derive(Deref, DerefMut, Debug)]
pub struct StorageRootTargets(B256Map<PrefixSet>);
impl StorageRootTargets {
/// Create new storage root targets from updated post state accounts
/// and storage prefix sets.
///
/// NOTE: Since updated accounts and prefix sets always overlap,
/// it's important that iterator over storage prefix sets takes precedence.
pub fn new(
changed_accounts: impl IntoIterator<Item = B256>,
storage_prefix_sets: impl IntoIterator<Item = (B256, PrefixSet)>,
) -> Self {
Self(
changed_accounts
.into_iter()
.map(|address| (address, PrefixSet::default()))
.chain(storage_prefix_sets)
.collect(),
)
}
}
impl IntoIterator for StorageRootTargets {
type Item = (B256, PrefixSet);
type IntoIter = std::collections::hash_map::IntoIter<B256, PrefixSet>;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
impl rayon::iter::IntoParallelIterator for StorageRootTargets {
type Iter = rayon::collections::hash_map::IntoIter<B256, PrefixSet>;
type Item = (B256, PrefixSet);
fn into_par_iter(self) -> Self::Iter {
self.0.into_par_iter()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/trie/parallel/src/proof_task_metrics.rs | crates/trie/parallel/src/proof_task_metrics.rs | use reth_metrics::{metrics::Histogram, Metrics};
/// Metrics for blinded node fetching for the duration of the proof task manager.
#[derive(Clone, Debug, Default)]
pub struct ProofTaskMetrics {
/// The actual metrics for blinded nodes.
pub task_metrics: ProofTaskTrieMetrics,
/// Count of blinded account node requests.
pub account_nodes: usize,
/// Count of blinded storage node requests.
pub storage_nodes: usize,
}
impl ProofTaskMetrics {
/// Record the blinded node counts into the histograms.
pub fn record(&self) {
self.task_metrics.record_account_nodes(self.account_nodes);
self.task_metrics.record_storage_nodes(self.storage_nodes);
}
}
/// Metrics for the proof task.
#[derive(Clone, Metrics)]
#[metrics(scope = "trie.proof_task")]
pub struct ProofTaskTrieMetrics {
/// A histogram for the number of blinded account nodes fetched.
blinded_account_nodes: Histogram,
/// A histogram for the number of blinded storage nodes fetched.
blinded_storage_nodes: Histogram,
}
impl ProofTaskTrieMetrics {
/// Record account nodes fetched.
pub fn record_account_nodes(&self, count: usize) {
self.blinded_account_nodes.record(count as f64);
}
/// Record storage nodes fetched.
pub fn record_storage_nodes(&self, count: usize) {
self.blinded_storage_nodes.record(count as f64);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/trie/parallel/benches/root.rs | crates/trie/parallel/benches/root.rs | #![allow(missing_docs, unreachable_pub)]
use alloy_primitives::B256;
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner};
use proptest_arbitrary_interop::arb;
use reth_primitives_traits::Account;
use reth_provider::{
providers::ConsistentDbView, test_utils::create_test_provider_factory, StateWriter, TrieWriter,
};
use reth_trie::{
hashed_cursor::HashedPostStateCursorFactory, HashedPostState, HashedStorage, StateRoot,
TrieInput,
};
use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseStateRoot};
use reth_trie_parallel::root::ParallelStateRoot;
use revm_state::FlaggedStorage;
use std::collections::HashMap;
pub fn calculate_state_root(c: &mut Criterion) {
let mut group = c.benchmark_group("Calculate State Root");
group.sample_size(20);
for size in [1_000, 3_000, 5_000, 10_000] {
// Too slow.
#[expect(unexpected_cfgs)]
if cfg!(codspeed) && size > 3_000 {
continue;
}
let (db_state, updated_state) = generate_test_data(size);
let provider_factory = create_test_provider_factory();
{
let provider_rw = provider_factory.provider_rw().unwrap();
provider_rw.write_hashed_state(&db_state.into_sorted()).unwrap();
let (_, updates) =
StateRoot::from_tx(provider_rw.tx_ref()).root_with_updates().unwrap();
provider_rw.write_trie_updates(&updates).unwrap();
provider_rw.commit().unwrap();
}
let view = ConsistentDbView::new(provider_factory.clone(), None);
// state root
group.bench_function(BenchmarkId::new("sync root", size), |b| {
b.iter_with_setup(
|| {
let sorted_state = updated_state.clone().into_sorted();
let prefix_sets = updated_state.construct_prefix_sets().freeze();
let provider = provider_factory.provider().unwrap();
(provider, sorted_state, prefix_sets)
},
|(provider, sorted_state, prefix_sets)| {
let hashed_cursor_factory = HashedPostStateCursorFactory::new(
DatabaseHashedCursorFactory::new(provider.tx_ref()),
&sorted_state,
);
StateRoot::from_tx(provider.tx_ref())
.with_hashed_cursor_factory(hashed_cursor_factory)
.with_prefix_sets(prefix_sets)
.root()
},
)
});
// parallel root
group.bench_function(BenchmarkId::new("parallel root", size), |b| {
b.iter_with_setup(
|| {
ParallelStateRoot::new(
view.clone(),
TrieInput::from_state(updated_state.clone()),
)
},
|calculator| calculator.incremental_root(),
);
});
}
}
fn generate_test_data(size: usize) -> (HashedPostState, HashedPostState) {
let storage_size = 1_000;
let mut runner = TestRunner::deterministic();
use proptest::{collection::hash_map, sample::subsequence};
let db_state = hash_map(
any::<B256>(),
(
arb::<Account>().prop_filter("non empty account", |a| !a.is_empty()),
hash_map(
any::<B256>(),
any::<FlaggedStorage>().prop_filter("non zero value", |v| !v.is_zero()),
storage_size,
),
),
size,
)
.new_tree(&mut runner)
.unwrap()
.current();
let keys = db_state.keys().copied().collect::<Vec<_>>();
let keys_to_update = subsequence(keys, size / 2).new_tree(&mut runner).unwrap().current();
let updated_storages = keys_to_update
.into_iter()
.map(|address| {
let (_, storage) = db_state.get(&address).unwrap();
let slots = storage.keys().copied().collect::<Vec<_>>();
let slots_to_update =
subsequence(slots, storage_size / 2).new_tree(&mut runner).unwrap().current();
(
address,
slots_to_update
.into_iter()
.map(|slot| {
(slot, any::<FlaggedStorage>().new_tree(&mut runner).unwrap().current())
})
.collect::<HashMap<_, _>>(),
)
})
.collect::<HashMap<_, _>>();
(
HashedPostState::default()
.with_accounts(
db_state.iter().map(|(address, (account, _))| (*address, Some(*account))),
)
.with_storages(db_state.into_iter().map(|(address, (_, storage))| {
(address, HashedStorage::from_iter(false, storage))
})),
HashedPostState::default().with_storages(
updated_storages
.into_iter()
.map(|(address, storage)| (address, HashedStorage::from_iter(false, storage))),
),
)
}
criterion_group!(state_root, calculate_state_root);
criterion_main!(state_root);
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/builder/src/setup.rs | crates/node/builder/src/setup.rs | //! Helpers for setting up parts of the node.
use std::sync::Arc;
use crate::BlockTy;
use alloy_primitives::{BlockNumber, B256};
use reth_config::{config::StageConfig, PruneConfig};
use reth_consensus::{ConsensusError, FullConsensus};
use reth_downloaders::{
bodies::bodies::BodiesDownloaderBuilder,
headers::reverse_headers::ReverseHeadersDownloaderBuilder,
};
use reth_evm::ConfigureEvm;
use reth_exex::ExExManagerHandle;
use reth_network_p2p::{
bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader, BlockClient,
};
use reth_node_api::HeaderTy;
use reth_provider::{providers::ProviderNodeTypes, ProviderFactory};
use reth_stages::{
prelude::DefaultStages,
stages::{EraImportSource, ExecutionStage},
Pipeline, StageSet,
};
use reth_static_file::StaticFileProducer;
use reth_tasks::TaskExecutor;
use reth_tracing::tracing::debug;
use tokio::sync::watch;
/// Constructs a [Pipeline] that's wired to the network
#[expect(clippy::too_many_arguments)]
pub fn build_networked_pipeline<N, Client, Evm>(
config: &StageConfig,
client: Client,
consensus: Arc<dyn FullConsensus<N::Primitives, Error = ConsensusError>>,
provider_factory: ProviderFactory<N>,
task_executor: &TaskExecutor,
metrics_tx: reth_stages::MetricEventsSender,
prune_config: Option<PruneConfig>,
max_block: Option<BlockNumber>,
static_file_producer: StaticFileProducer<ProviderFactory<N>>,
evm_config: Evm,
exex_manager_handle: ExExManagerHandle<N::Primitives>,
era_import_source: Option<EraImportSource>,
) -> eyre::Result<Pipeline<N>>
where
N: ProviderNodeTypes,
Client: BlockClient<Block = BlockTy<N>> + 'static,
Evm: ConfigureEvm<Primitives = N::Primitives> + 'static,
{
// building network downloaders using the fetch client
let header_downloader = ReverseHeadersDownloaderBuilder::new(config.headers)
.build(client.clone(), consensus.clone())
.into_task_with(task_executor);
let body_downloader = BodiesDownloaderBuilder::new(config.bodies)
.build(client, consensus.clone(), provider_factory.clone())
.into_task_with(task_executor);
let pipeline = build_pipeline(
provider_factory,
config,
header_downloader,
body_downloader,
consensus,
max_block,
metrics_tx,
prune_config,
static_file_producer,
evm_config,
exex_manager_handle,
era_import_source,
)?;
Ok(pipeline)
}
/// Builds the [Pipeline] with the given [`ProviderFactory`] and downloaders.
#[expect(clippy::too_many_arguments)]
pub fn build_pipeline<N, H, B, Evm>(
provider_factory: ProviderFactory<N>,
stage_config: &StageConfig,
header_downloader: H,
body_downloader: B,
consensus: Arc<dyn FullConsensus<N::Primitives, Error = ConsensusError>>,
max_block: Option<u64>,
metrics_tx: reth_stages::MetricEventsSender,
prune_config: Option<PruneConfig>,
static_file_producer: StaticFileProducer<ProviderFactory<N>>,
evm_config: Evm,
exex_manager_handle: ExExManagerHandle<N::Primitives>,
era_import_source: Option<EraImportSource>,
) -> eyre::Result<Pipeline<N>>
where
N: ProviderNodeTypes,
H: HeaderDownloader<Header = HeaderTy<N>> + 'static,
B: BodyDownloader<Block = BlockTy<N>> + 'static,
Evm: ConfigureEvm<Primitives = N::Primitives> + 'static,
{
let mut builder = Pipeline::<N>::builder();
if let Some(max_block) = max_block {
debug!(target: "reth::cli", max_block, "Configuring builder to use max block");
builder = builder.with_max_block(max_block)
}
let (tip_tx, tip_rx) = watch::channel(B256::ZERO);
let prune_modes = prune_config.map(|prune| prune.segments).unwrap_or_default();
let pipeline = builder
.with_tip_sender(tip_tx)
.with_metrics_tx(metrics_tx)
.add_stages(
DefaultStages::new(
provider_factory.clone(),
tip_rx,
Arc::clone(&consensus),
header_downloader,
body_downloader,
evm_config.clone(),
stage_config.clone(),
prune_modes,
era_import_source,
)
.set(ExecutionStage::new(
evm_config,
consensus,
stage_config.execution.into(),
stage_config.execution_external_clean_threshold(),
exex_manager_handle,
)),
)
.build(provider_factory, static_file_producer);
Ok(pipeline)
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/builder/src/node.rs | crates/node/builder/src/node.rs | // re-export the node api types
pub use reth_node_api::{FullNodeTypes, NodeTypes};
use crate::{components::NodeComponentsBuilder, rpc::RethRpcAddOns, NodeAdapter, NodeAddOns};
use reth_node_api::{EngineTypes, FullNodeComponents, PayloadTypes};
use reth_node_core::{
dirs::{ChainPath, DataDirPath},
node_config::NodeConfig,
};
use reth_payload_builder::PayloadBuilderHandle;
use reth_provider::ChainSpecProvider;
use reth_rpc_api::EngineApiClient;
use reth_rpc_builder::{auth::AuthServerHandle, RpcServerHandle};
use reth_tasks::TaskExecutor;
use std::{
fmt::Debug,
marker::PhantomData,
ops::{Deref, DerefMut},
sync::Arc,
};
/// A helper type to obtain components for a given node when [`FullNodeTypes::Types`] is a [`Node`]
/// implementation.
pub type ComponentsFor<N> = <<<N as FullNodeTypes>::Types as Node<N>>::ComponentsBuilder as NodeComponentsBuilder<N>>::Components;
/// A [`crate::Node`] is a [`NodeTypes`] that comes with preconfigured components.
///
/// This can be used to configure the builder with a preset of components.
pub trait Node<N: FullNodeTypes>: NodeTypes + Clone {
/// The type that builds the node's components.
type ComponentsBuilder: NodeComponentsBuilder<N>;
/// Exposes the customizable node add-on types.
type AddOns: NodeAddOns<
NodeAdapter<N, <Self::ComponentsBuilder as NodeComponentsBuilder<N>>::Components>,
>;
/// Returns a [`NodeComponentsBuilder`] for the node.
fn components_builder(&self) -> Self::ComponentsBuilder;
/// Returns the node add-ons.
fn add_ons(&self) -> Self::AddOns;
}
/// A [`Node`] type builder
#[derive(Clone, Default, Debug)]
pub struct AnyNode<N = (), C = (), AO = ()>(PhantomData<N>, C, AO);
impl<N, C, AO> AnyNode<N, C, AO> {
/// Configures the types of the node.
pub fn types<T>(self) -> AnyNode<T, C, AO> {
AnyNode(PhantomData, self.1, self.2)
}
/// Sets the node components builder.
pub fn components_builder<T>(self, value: T) -> AnyNode<N, T, AO> {
AnyNode(PhantomData, value, self.2)
}
/// Sets the node add-ons.
pub fn add_ons<T>(self, value: T) -> AnyNode<N, C, T> {
AnyNode(PhantomData, self.1, value)
}
}
impl<N, C, AO> NodeTypes for AnyNode<N, C, AO>
where
N: FullNodeTypes,
C: Clone + Debug + Send + Sync + Unpin + 'static,
AO: Clone + Debug + Send + Sync + Unpin + 'static,
{
type Primitives = <N::Types as NodeTypes>::Primitives;
type ChainSpec = <N::Types as NodeTypes>::ChainSpec;
type Storage = <N::Types as NodeTypes>::Storage;
type Payload = <N::Types as NodeTypes>::Payload;
}
impl<N, C, AO> Node<N> for AnyNode<N, C, AO>
where
N: FullNodeTypes + Clone,
C: NodeComponentsBuilder<N> + Clone + Debug + Sync + Unpin + 'static,
AO: NodeAddOns<NodeAdapter<N, C::Components>> + Clone + Debug + Sync + Unpin + 'static,
{
type ComponentsBuilder = C;
type AddOns = AO;
fn components_builder(&self) -> Self::ComponentsBuilder {
self.1.clone()
}
fn add_ons(&self) -> Self::AddOns {
self.2.clone()
}
}
/// The launched node with all components including RPC handlers.
///
/// This can be used to interact with the launched node.
#[derive(Debug)]
pub struct FullNode<Node: FullNodeComponents, AddOns: NodeAddOns<Node>> {
/// The evm configuration.
pub evm_config: Node::Evm,
/// The node's transaction pool.
pub pool: Node::Pool,
/// Handle to the node's network.
pub network: Node::Network,
/// Provider to interact with the node's database
pub provider: Node::Provider,
/// Handle to the node's payload builder service.
pub payload_builder_handle: PayloadBuilderHandle<<Node::Types as NodeTypes>::Payload>,
/// Task executor for the node.
pub task_executor: TaskExecutor,
/// The initial node config.
pub config: NodeConfig<<Node::Types as NodeTypes>::ChainSpec>,
/// The data dir of the node.
pub data_dir: ChainPath<DataDirPath>,
/// The handle to launched add-ons
pub add_ons_handle: AddOns::Handle,
}
impl<Node: FullNodeComponents, AddOns: NodeAddOns<Node>> Clone for FullNode<Node, AddOns> {
fn clone(&self) -> Self {
Self {
evm_config: self.evm_config.clone(),
pool: self.pool.clone(),
network: self.network.clone(),
provider: self.provider.clone(),
payload_builder_handle: self.payload_builder_handle.clone(),
task_executor: self.task_executor.clone(),
config: self.config.clone(),
data_dir: self.data_dir.clone(),
add_ons_handle: self.add_ons_handle.clone(),
}
}
}
impl<Payload, Node, AddOns> FullNode<Node, AddOns>
where
Payload: PayloadTypes,
Node: FullNodeComponents<Types: NodeTypes<Payload = Payload>>,
AddOns: NodeAddOns<Node>,
{
/// Returns the chain spec of the node.
pub fn chain_spec(&self) -> Arc<<Node::Types as NodeTypes>::ChainSpec> {
self.provider.chain_spec()
}
}
impl<Payload, Node, AddOns> FullNode<Node, AddOns>
where
Payload: PayloadTypes,
Node: FullNodeComponents<Types: NodeTypes<Payload = Payload>>,
AddOns: RethRpcAddOns<Node>,
{
/// Returns the [`RpcServerHandle`] to the started rpc server.
pub const fn rpc_server_handle(&self) -> &RpcServerHandle {
&self.add_ons_handle.rpc_server_handles.rpc
}
/// Returns the [`AuthServerHandle`] to the started authenticated engine API server.
pub const fn auth_server_handle(&self) -> &AuthServerHandle {
&self.add_ons_handle.rpc_server_handles.auth
}
}
impl<Engine, Node, AddOns> FullNode<Node, AddOns>
where
Engine: EngineTypes,
Node: FullNodeComponents<Types: NodeTypes<Payload = Engine>>,
AddOns: RethRpcAddOns<Node>,
{
/// Returns the [`EngineApiClient`] interface for the authenticated engine API.
///
/// This will send authenticated http requests to the node's auth server.
pub fn engine_http_client(&self) -> impl EngineApiClient<Engine> {
self.auth_server_handle().http_client()
}
/// Returns the [`EngineApiClient`] interface for the authenticated engine API.
///
/// This will send authenticated ws requests to the node's auth server.
pub async fn engine_ws_client(&self) -> impl EngineApiClient<Engine> {
self.auth_server_handle().ws_client().await
}
/// Returns the [`EngineApiClient`] interface for the authenticated engine API.
///
/// This will send not authenticated IPC requests to the node's auth server.
#[cfg(unix)]
pub async fn engine_ipc_client(&self) -> Option<impl EngineApiClient<Engine>> {
self.auth_server_handle().ipc_client().await
}
}
impl<Node: FullNodeComponents, AddOns: NodeAddOns<Node>> Deref for FullNode<Node, AddOns> {
type Target = AddOns::Handle;
fn deref(&self) -> &Self::Target {
&self.add_ons_handle
}
}
impl<Node: FullNodeComponents, AddOns: NodeAddOns<Node>> DerefMut for FullNode<Node, AddOns> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.add_ons_handle
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/builder/src/lib.rs | crates/node/builder/src/lib.rs | //! Standalone crate for Reth configuration and builder types.
//!
//! # features
//! - `js-tracer`: Enable the `JavaScript` tracer for the `debug_trace` endpoints
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
/// Node event hooks.
pub mod hooks;
/// Support for configuring the higher level node types.
pub mod node;
pub use node::*;
/// Support for accessing the EngineApi outside the RPC server context.
mod engine_api_ext;
pub use engine_api_ext::EngineApiExt;
/// Support for configuring the components of a node.
pub mod components;
pub use components::{NodeComponents, NodeComponentsBuilder};
mod builder;
pub use builder::{add_ons::AddOns, *};
mod launch;
pub use launch::{
debug::{DebugNode, DebugNodeLauncher},
engine::EngineNodeLauncher,
*,
};
mod handle;
pub use handle::NodeHandle;
pub mod rpc;
pub mod setup;
/// Type aliases for traits that are often used together
pub mod aliases;
pub use aliases::*;
/// Support for installing the ExExs (execution extensions) in a node.
pub mod exex;
/// Re-export the core configuration traits.
pub use reth_node_core::cli::config::{
PayloadBuilderConfig, RethNetworkConfig, RethTransactionPoolConfig,
};
// re-export the core config for convenience
pub use reth_node_core::node_config::NodeConfig;
// re-export API types for convenience
pub use reth_node_api::*;
use aquamarine as _;
use reth_rpc as _;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/builder/src/hooks.rs | crates/node/builder/src/hooks.rs | use std::fmt;
use reth_node_api::{FullNodeComponents, NodeAddOns};
use crate::node::FullNode;
/// Container for all the configurable hook functions.
pub struct NodeHooks<Node: FullNodeComponents, AddOns: NodeAddOns<Node>> {
/// Hook to run once core components are initialized.
pub on_component_initialized: Box<dyn OnComponentInitializedHook<Node>>,
/// Hook to run once the node is started.
pub on_node_started: Box<dyn OnNodeStartedHook<Node, AddOns>>,
_marker: std::marker::PhantomData<Node>,
}
impl<Node, AddOns> NodeHooks<Node, AddOns>
where
Node: FullNodeComponents,
AddOns: NodeAddOns<Node>,
{
/// Creates a new, empty [`NodeHooks`] instance for the given node type.
pub fn new() -> Self {
Self {
on_component_initialized: Box::<()>::default(),
on_node_started: Box::<()>::default(),
_marker: Default::default(),
}
}
/// Sets the hook that is run once the node's components are initialized.
pub(crate) fn set_on_component_initialized<F>(&mut self, hook: F) -> &mut Self
where
F: OnComponentInitializedHook<Node> + 'static,
{
self.on_component_initialized = Box::new(hook);
self
}
/// Sets the hook that is run once the node's components are initialized.
#[expect(unused)]
pub(crate) fn on_component_initialized<F>(mut self, hook: F) -> Self
where
F: OnComponentInitializedHook<Node> + 'static,
{
self.set_on_component_initialized(hook);
self
}
/// Sets the hook that is run once the node has started.
pub(crate) fn set_on_node_started<F>(&mut self, hook: F) -> &mut Self
where
F: OnNodeStartedHook<Node, AddOns> + 'static,
{
self.on_node_started = Box::new(hook);
self
}
/// Sets the hook that is run once the node has started.
#[expect(unused)]
pub(crate) fn on_node_started<F>(mut self, hook: F) -> Self
where
F: OnNodeStartedHook<Node, AddOns> + 'static,
{
self.set_on_node_started(hook);
self
}
}
impl<Node, AddOns> Default for NodeHooks<Node, AddOns>
where
Node: FullNodeComponents,
AddOns: NodeAddOns<Node>,
{
fn default() -> Self {
Self::new()
}
}
impl<Node, AddOns> fmt::Debug for NodeHooks<Node, AddOns>
where
Node: FullNodeComponents,
AddOns: NodeAddOns<Node>,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("NodeHooks")
.field("on_component_initialized", &"...")
.field("on_node_started", &"...")
.finish()
}
}
/// A helper trait for the event hook that is run once the node is initialized.
pub trait OnComponentInitializedHook<Node>: Send {
/// Consumes the event hook and runs it.
///
/// If this returns an error, the node launch will be aborted.
fn on_event(self: Box<Self>, node: Node) -> eyre::Result<()>;
}
impl<Node, F> OnComponentInitializedHook<Node> for F
where
F: FnOnce(Node) -> eyre::Result<()> + Send,
{
fn on_event(self: Box<Self>, node: Node) -> eyre::Result<()> {
(*self)(node)
}
}
/// A helper trait that is run once the node is started.
pub trait OnNodeStartedHook<Node: FullNodeComponents, AddOns: NodeAddOns<Node>>: Send {
/// Consumes the event hook and runs it.
///
/// If this returns an error, the node launch will be aborted.
fn on_event(self: Box<Self>, node: FullNode<Node, AddOns>) -> eyre::Result<()>;
}
impl<Node, AddOns, F> OnNodeStartedHook<Node, AddOns> for F
where
Node: FullNodeComponents,
AddOns: NodeAddOns<Node>,
F: FnOnce(FullNode<Node, AddOns>) -> eyre::Result<()> + Send,
{
fn on_event(self: Box<Self>, node: FullNode<Node, AddOns>) -> eyre::Result<()> {
(*self)(node)
}
}
impl<Node> OnComponentInitializedHook<Node> for () {
fn on_event(self: Box<Self>, _node: Node) -> eyre::Result<()> {
Ok(())
}
}
impl<Node, AddOns> OnNodeStartedHook<Node, AddOns> for ()
where
Node: FullNodeComponents,
AddOns: NodeAddOns<Node>,
{
fn on_event(self: Box<Self>, _node: FullNode<Node, AddOns>) -> eyre::Result<()> {
Ok(())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/builder/src/rpc.rs | crates/node/builder/src/rpc.rs | //! Builder support for rpc components.
pub use jsonrpsee::server::middleware::rpc::{RpcService, RpcServiceBuilder};
pub use reth_engine_tree::tree::{BasicEngineValidator, EngineValidator};
pub use reth_rpc_builder::{middleware::RethRpcMiddleware, Identity, Stack};
use crate::{
invalid_block_hook::InvalidBlockHookExt, ConfigureEngineEvm, ConsensusEngineEvent,
ConsensusEngineHandle,
};
use alloy_rpc_types::engine::ClientVersionV1;
use alloy_rpc_types_engine::ExecutionData;
use jsonrpsee::{core::middleware::layer::Either, RpcModule};
use reth_chain_state::CanonStateSubscriptions;
use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks, Hardforks};
use reth_node_api::{
AddOnsContext, BlockTy, EngineApiValidator, EngineTypes, FullNodeComponents, FullNodeTypes,
NodeAddOns, NodeTypes, PayloadTypes, PayloadValidator, PrimitivesTy, TreeConfig,
};
use reth_node_core::{
cli::config::RethTransactionPoolConfig,
node_config::NodeConfig,
version::{version_metadata, CLIENT_CODE},
};
use reth_payload_builder::{PayloadBuilderHandle, PayloadStore};
use reth_rpc::eth::{core::EthRpcConverterFor, EthApiTypes, FullEthApiServer};
use reth_rpc_api::{eth::helpers::AddDevSigners, IntoEngineApiRpcModule};
use reth_rpc_builder::{
auth::{AuthRpcModule, AuthServerHandle},
config::RethRpcServerConfig,
RpcModuleBuilder, RpcRegistryInner, RpcServerConfig, RpcServerHandle, TransportRpcModules,
};
use reth_rpc_engine_api::{capabilities::EngineCapabilities, EngineApi};
use reth_rpc_eth_types::{cache::cache_new_blocks_task, EthConfig, EthStateCache};
use reth_tokio_util::EventSender;
use reth_tracing::tracing::{debug, info};
use std::{
fmt::{self, Debug},
future::Future,
ops::{Deref, DerefMut},
};
/// Contains the handles to the spawned RPC servers.
///
/// This can be used to access the endpoints of the servers.
#[derive(Debug, Clone)]
pub struct RethRpcServerHandles {
/// The regular RPC server handle to all configured transports.
pub rpc: RpcServerHandle,
/// The handle to the auth server (engine API)
pub auth: AuthServerHandle,
}
/// Contains hooks that are called during the rpc setup.
pub struct RpcHooks<Node: FullNodeComponents, EthApi> {
/// Hooks to run once RPC server is running.
pub on_rpc_started: Box<dyn OnRpcStarted<Node, EthApi>>,
/// Hooks to run to configure RPC server API.
pub extend_rpc_modules: Box<dyn ExtendRpcModules<Node, EthApi>>,
}
impl<Node, EthApi> Default for RpcHooks<Node, EthApi>
where
Node: FullNodeComponents,
EthApi: EthApiTypes,
{
fn default() -> Self {
Self { on_rpc_started: Box::<()>::default(), extend_rpc_modules: Box::<()>::default() }
}
}
impl<Node, EthApi> RpcHooks<Node, EthApi>
where
Node: FullNodeComponents,
EthApi: EthApiTypes,
{
/// Sets the hook that is run once the rpc server is started.
pub(crate) fn set_on_rpc_started<F>(&mut self, hook: F) -> &mut Self
where
F: OnRpcStarted<Node, EthApi> + 'static,
{
self.on_rpc_started = Box::new(hook);
self
}
/// Sets the hook that is run once the rpc server is started.
#[expect(unused)]
pub(crate) fn on_rpc_started<F>(mut self, hook: F) -> Self
where
F: OnRpcStarted<Node, EthApi> + 'static,
{
self.set_on_rpc_started(hook);
self
}
/// Sets the hook that is run to configure the rpc modules.
pub(crate) fn set_extend_rpc_modules<F>(&mut self, hook: F) -> &mut Self
where
F: ExtendRpcModules<Node, EthApi> + 'static,
{
self.extend_rpc_modules = Box::new(hook);
self
}
/// Sets the hook that is run to configure the rpc modules.
#[expect(unused)]
pub(crate) fn extend_rpc_modules<F>(mut self, hook: F) -> Self
where
F: ExtendRpcModules<Node, EthApi> + 'static,
{
self.set_extend_rpc_modules(hook);
self
}
}
impl<Node, EthApi> fmt::Debug for RpcHooks<Node, EthApi>
where
Node: FullNodeComponents,
EthApi: EthApiTypes,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("RpcHooks")
.field("on_rpc_started", &"...")
.field("extend_rpc_modules", &"...")
.finish()
}
}
/// Event hook that is called once the rpc server is started.
pub trait OnRpcStarted<Node: FullNodeComponents, EthApi: EthApiTypes>: Send {
/// The hook that is called once the rpc server is started.
fn on_rpc_started(
self: Box<Self>,
ctx: RpcContext<'_, Node, EthApi>,
handles: RethRpcServerHandles,
) -> eyre::Result<()>;
}
impl<Node, EthApi, F> OnRpcStarted<Node, EthApi> for F
where
F: FnOnce(RpcContext<'_, Node, EthApi>, RethRpcServerHandles) -> eyre::Result<()> + Send,
Node: FullNodeComponents,
EthApi: EthApiTypes,
{
fn on_rpc_started(
self: Box<Self>,
ctx: RpcContext<'_, Node, EthApi>,
handles: RethRpcServerHandles,
) -> eyre::Result<()> {
(*self)(ctx, handles)
}
}
impl<Node, EthApi> OnRpcStarted<Node, EthApi> for ()
where
Node: FullNodeComponents,
EthApi: EthApiTypes,
{
fn on_rpc_started(
self: Box<Self>,
_: RpcContext<'_, Node, EthApi>,
_: RethRpcServerHandles,
) -> eyre::Result<()> {
Ok(())
}
}
/// Event hook that is called when the rpc server is started.
pub trait ExtendRpcModules<Node: FullNodeComponents, EthApi: EthApiTypes>: Send {
/// The hook that is called once the rpc server is started.
fn extend_rpc_modules(self: Box<Self>, ctx: RpcContext<'_, Node, EthApi>) -> eyre::Result<()>;
}
impl<Node, EthApi, F> ExtendRpcModules<Node, EthApi> for F
where
F: FnOnce(RpcContext<'_, Node, EthApi>) -> eyre::Result<()> + Send,
Node: FullNodeComponents,
EthApi: EthApiTypes,
{
fn extend_rpc_modules(self: Box<Self>, ctx: RpcContext<'_, Node, EthApi>) -> eyre::Result<()> {
(*self)(ctx)
}
}
impl<Node, EthApi> ExtendRpcModules<Node, EthApi> for ()
where
Node: FullNodeComponents,
EthApi: EthApiTypes,
{
fn extend_rpc_modules(self: Box<Self>, _: RpcContext<'_, Node, EthApi>) -> eyre::Result<()> {
Ok(())
}
}
/// Helper wrapper type to encapsulate the [`RpcRegistryInner`] over components trait.
#[derive(Debug, Clone)]
#[expect(clippy::type_complexity)]
pub struct RpcRegistry<Node: FullNodeComponents, EthApi: EthApiTypes> {
pub(crate) registry: RpcRegistryInner<
Node::Provider,
Node::Pool,
Node::Network,
EthApi,
Node::Evm,
Node::Consensus,
>,
}
impl<Node, EthApi> Deref for RpcRegistry<Node, EthApi>
where
Node: FullNodeComponents,
EthApi: EthApiTypes,
{
type Target = RpcRegistryInner<
Node::Provider,
Node::Pool,
Node::Network,
EthApi,
Node::Evm,
Node::Consensus,
>;
fn deref(&self) -> &Self::Target {
&self.registry
}
}
impl<Node, EthApi> DerefMut for RpcRegistry<Node, EthApi>
where
Node: FullNodeComponents,
EthApi: EthApiTypes,
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.registry
}
}
/// Helper container for the parameters commonly passed to RPC module extension functions.
#[expect(missing_debug_implementations)]
pub struct RpcModuleContainer<'a, Node: FullNodeComponents, EthApi: EthApiTypes> {
/// Holds installed modules per transport type.
pub modules: &'a mut TransportRpcModules,
/// Holds jwt authenticated rpc module.
pub auth_module: &'a mut AuthRpcModule,
/// A Helper type the holds instances of the configured modules.
pub registry: &'a mut RpcRegistry<Node, EthApi>,
}
/// Helper container to encapsulate [`RpcRegistryInner`], [`TransportRpcModules`] and
/// [`AuthRpcModule`].
///
/// This can be used to access installed modules, or create commonly used handlers like
/// [`reth_rpc::eth::EthApi`], and ultimately merge additional rpc handler into the configured
/// transport modules [`TransportRpcModules`] as well as configured authenticated methods
/// [`AuthRpcModule`].
#[expect(missing_debug_implementations)]
pub struct RpcContext<'a, Node: FullNodeComponents, EthApi: EthApiTypes> {
/// The node components.
pub(crate) node: Node,
/// Gives access to the node configuration.
pub(crate) config: &'a NodeConfig<<Node::Types as NodeTypes>::ChainSpec>,
/// A Helper type the holds instances of the configured modules.
///
/// This provides easy access to rpc handlers, such as [`RpcRegistryInner::eth_api`].
pub registry: &'a mut RpcRegistry<Node, EthApi>,
/// Holds installed modules per transport type.
///
/// This can be used to merge additional modules into the configured transports (http, ipc,
/// ws). See [`TransportRpcModules::merge_configured`]
pub modules: &'a mut TransportRpcModules,
/// Holds jwt authenticated rpc module.
///
/// This can be used to merge additional modules into the configured authenticated methods
pub auth_module: &'a mut AuthRpcModule,
}
impl<Node, EthApi> RpcContext<'_, Node, EthApi>
where
Node: FullNodeComponents,
EthApi: EthApiTypes,
{
/// Returns the config of the node.
pub const fn config(&self) -> &NodeConfig<<Node::Types as NodeTypes>::ChainSpec> {
self.config
}
/// Returns a reference to the configured node.
///
/// This gives access to the node's components.
pub const fn node(&self) -> &Node {
&self.node
}
/// Returns the transaction pool instance.
pub fn pool(&self) -> &Node::Pool {
self.node.pool()
}
/// Returns provider to interact with the node.
pub fn provider(&self) -> &Node::Provider {
self.node.provider()
}
/// Returns the handle to the network
pub fn network(&self) -> &Node::Network {
self.node.network()
}
/// Returns the handle to the payload builder service
pub fn payload_builder_handle(
&self,
) -> &PayloadBuilderHandle<<Node::Types as NodeTypes>::Payload> {
self.node.payload_builder_handle()
}
}
/// Handle to the launched RPC servers.
pub struct RpcHandle<Node: FullNodeComponents, EthApi: EthApiTypes> {
/// Handles to launched servers.
pub rpc_server_handles: RethRpcServerHandles,
/// Configured RPC modules.
pub rpc_registry: RpcRegistry<Node, EthApi>,
/// Notification channel for engine API events
///
/// Caution: This is a multi-producer, multi-consumer broadcast and allows grants access to
/// dispatch events
pub engine_events: EventSender<ConsensusEngineEvent<<Node::Types as NodeTypes>::Primitives>>,
/// Handle to the beacon consensus engine.
pub beacon_engine_handle: ConsensusEngineHandle<<Node::Types as NodeTypes>::Payload>,
}
impl<Node: FullNodeComponents, EthApi: EthApiTypes> Clone for RpcHandle<Node, EthApi> {
fn clone(&self) -> Self {
Self {
rpc_server_handles: self.rpc_server_handles.clone(),
rpc_registry: self.rpc_registry.clone(),
engine_events: self.engine_events.clone(),
beacon_engine_handle: self.beacon_engine_handle.clone(),
}
}
}
impl<Node: FullNodeComponents, EthApi: EthApiTypes> Deref for RpcHandle<Node, EthApi> {
type Target = RpcRegistry<Node, EthApi>;
fn deref(&self) -> &Self::Target {
&self.rpc_registry
}
}
impl<Node: FullNodeComponents, EthApi: EthApiTypes> Debug for RpcHandle<Node, EthApi>
where
RpcRegistry<Node, EthApi>: Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("RpcHandle")
.field("rpc_server_handles", &self.rpc_server_handles)
.field("rpc_registry", &self.rpc_registry)
.finish()
}
}
impl<Node: FullNodeComponents, EthApi: EthApiTypes> RpcHandle<Node, EthApi> {
/// Returns the RPC server handles.
pub const fn rpc_server_handles(&self) -> &RethRpcServerHandles {
&self.rpc_server_handles
}
/// Returns the consensus engine handle.
///
/// This handle can be used to interact with the engine service directly.
pub const fn consensus_engine_handle(
&self,
) -> &ConsensusEngineHandle<<Node::Types as NodeTypes>::Payload> {
&self.beacon_engine_handle
}
/// Returns the consensus engine events sender.
pub const fn consensus_engine_events(
&self,
) -> &EventSender<ConsensusEngineEvent<<Node::Types as NodeTypes>::Primitives>> {
&self.engine_events
}
}
/// Handle returned when only the regular RPC server (HTTP/WS/IPC) is launched.
///
/// This handle provides access to the RPC server endpoints and registry, but does not
/// include an authenticated Engine API server. Use this when you only need regular
/// RPC functionality.
#[derive(Debug, Clone)]
pub struct RpcServerOnlyHandle<Node: FullNodeComponents, EthApi: EthApiTypes> {
/// Handle to the RPC server
pub rpc_server_handle: RpcServerHandle,
/// Configured RPC modules.
pub rpc_registry: RpcRegistry<Node, EthApi>,
/// Notification channel for engine API events
pub engine_events: EventSender<ConsensusEngineEvent<<Node::Types as NodeTypes>::Primitives>>,
/// Handle to the consensus engine.
pub engine_handle: ConsensusEngineHandle<<Node::Types as NodeTypes>::Payload>,
}
impl<Node: FullNodeComponents, EthApi: EthApiTypes> RpcServerOnlyHandle<Node, EthApi> {
/// Returns the RPC server handle.
pub const fn rpc_server_handle(&self) -> &RpcServerHandle {
&self.rpc_server_handle
}
/// Returns the consensus engine handle.
///
/// This handle can be used to interact with the engine service directly.
pub const fn consensus_engine_handle(
&self,
) -> &ConsensusEngineHandle<<Node::Types as NodeTypes>::Payload> {
&self.engine_handle
}
/// Returns the consensus engine events sender.
pub const fn consensus_engine_events(
&self,
) -> &EventSender<ConsensusEngineEvent<<Node::Types as NodeTypes>::Primitives>> {
&self.engine_events
}
}
/// Handle returned when only the authenticated Engine API server is launched.
///
/// This handle provides access to the Engine API server and registry, but does not
/// include the regular RPC servers (HTTP/WS/IPC). Use this for specialized setups
/// that only need Engine API functionality.
#[derive(Debug, Clone)]
pub struct AuthServerOnlyHandle<Node: FullNodeComponents, EthApi: EthApiTypes> {
/// Handle to the auth server (engine API)
pub auth_server_handle: AuthServerHandle,
/// Configured RPC modules.
pub rpc_registry: RpcRegistry<Node, EthApi>,
/// Notification channel for engine API events
pub engine_events: EventSender<ConsensusEngineEvent<<Node::Types as NodeTypes>::Primitives>>,
/// Handle to the consensus engine.
pub engine_handle: ConsensusEngineHandle<<Node::Types as NodeTypes>::Payload>,
}
impl<Node: FullNodeComponents, EthApi: EthApiTypes> AuthServerOnlyHandle<Node, EthApi> {
/// Returns the consensus engine handle.
///
/// This handle can be used to interact with the engine service directly.
pub const fn consensus_engine_handle(
&self,
) -> &ConsensusEngineHandle<<Node::Types as NodeTypes>::Payload> {
&self.engine_handle
}
/// Returns the consensus engine events sender.
pub const fn consensus_engine_events(
&self,
) -> &EventSender<ConsensusEngineEvent<<Node::Types as NodeTypes>::Primitives>> {
&self.engine_events
}
}
/// Internal context struct for RPC setup shared between different launch methods
struct RpcSetupContext<'a, Node: FullNodeComponents, EthApi: EthApiTypes> {
node: Node,
config: &'a NodeConfig<<Node::Types as NodeTypes>::ChainSpec>,
modules: TransportRpcModules,
auth_module: AuthRpcModule,
auth_config: reth_rpc_builder::auth::AuthServerConfig,
registry: RpcRegistry<Node, EthApi>,
on_rpc_started: Box<dyn OnRpcStarted<Node, EthApi>>,
engine_events: EventSender<ConsensusEngineEvent<<Node::Types as NodeTypes>::Primitives>>,
engine_handle: ConsensusEngineHandle<<Node::Types as NodeTypes>::Payload>,
}
/// Node add-ons containing RPC server configuration, with customizable eth API handler.
///
/// This struct can be used to provide the RPC server functionality. It is responsible for launching
/// the regular RPC and the authenticated RPC server (engine API). It is intended to be used and
/// modified as part of the [`NodeAddOns`] see for example `OpRpcAddons`, `EthereumAddOns`.
///
/// It can be modified to register RPC API handlers, see [`RpcAddOns::launch_add_ons_with`] which
/// takes a closure that provides access to all the configured modules (namespaces), and is invoked
/// just before the servers are launched. This can be used to extend the node with custom RPC
/// methods or even replace existing method handlers, see also [`TransportRpcModules`].
pub struct RpcAddOns<
Node: FullNodeComponents,
EthB: EthApiBuilder<Node>,
PVB,
EB = BasicEngineApiBuilder<PVB>,
EVB = BasicEngineValidatorBuilder<PVB>,
RpcMiddleware = Identity,
> {
/// Additional RPC add-ons.
pub hooks: RpcHooks<Node, EthB::EthApi>,
/// Builder for `EthApi`
eth_api_builder: EthB,
/// Payload validator builder
payload_validator_builder: PVB,
/// Builder for `EngineApi`
engine_api_builder: EB,
/// Builder for tree validator
engine_validator_builder: EVB,
/// Configurable RPC middleware stack.
///
/// This middleware is applied to all RPC requests across all transports (HTTP, WS, IPC).
/// See [`RpcAddOns::with_rpc_middleware`] for more details.
rpc_middleware: RpcMiddleware,
/// Optional custom tokio runtime for the RPC server.
tokio_runtime: Option<tokio::runtime::Handle>,
}
impl<Node, EthB, PVB, EB, EVB, RpcMiddleware> Debug
for RpcAddOns<Node, EthB, PVB, EB, EVB, RpcMiddleware>
where
Node: FullNodeComponents,
EthB: EthApiBuilder<Node>,
PVB: Debug,
EB: Debug,
EVB: Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("RpcAddOns")
.field("hooks", &self.hooks)
.field("eth_api_builder", &"...")
.field("payload_validator_builder", &self.payload_validator_builder)
.field("engine_api_builder", &self.engine_api_builder)
.field("engine_validator_builder", &self.engine_validator_builder)
.field("rpc_middleware", &"...")
.finish()
}
}
impl<Node, EthB, PVB, EB, EVB, RpcMiddleware> RpcAddOns<Node, EthB, PVB, EB, EVB, RpcMiddleware>
where
Node: FullNodeComponents,
EthB: EthApiBuilder<Node>,
{
/// Creates a new instance of the RPC add-ons.
pub fn new(
eth_api_builder: EthB,
payload_validator_builder: PVB,
engine_api_builder: EB,
engine_validator_builder: EVB,
rpc_middleware: RpcMiddleware,
) -> Self {
Self {
hooks: RpcHooks::default(),
eth_api_builder,
payload_validator_builder,
engine_api_builder,
engine_validator_builder,
rpc_middleware,
tokio_runtime: None,
}
}
/// Maps the [`EngineApiBuilder`] builder type.
pub fn with_engine_api<T>(
self,
engine_api_builder: T,
) -> RpcAddOns<Node, EthB, PVB, T, EVB, RpcMiddleware> {
let Self {
hooks,
eth_api_builder,
payload_validator_builder,
engine_validator_builder,
rpc_middleware,
tokio_runtime,
..
} = self;
RpcAddOns {
hooks,
eth_api_builder,
payload_validator_builder,
engine_api_builder,
engine_validator_builder,
rpc_middleware,
tokio_runtime,
}
}
/// Maps the [`PayloadValidatorBuilder`] builder type.
pub fn with_payload_validator<T>(
self,
payload_validator_builder: T,
) -> RpcAddOns<Node, EthB, T, EB, EVB, RpcMiddleware> {
let Self {
hooks,
eth_api_builder,
engine_api_builder,
engine_validator_builder,
rpc_middleware,
tokio_runtime,
..
} = self;
RpcAddOns {
hooks,
eth_api_builder,
payload_validator_builder,
engine_api_builder,
engine_validator_builder,
rpc_middleware,
tokio_runtime,
}
}
/// Maps the [`EngineValidatorBuilder`] builder type.
pub fn with_engine_validator<T>(
self,
engine_validator_builder: T,
) -> RpcAddOns<Node, EthB, PVB, EB, T, RpcMiddleware> {
let Self {
hooks,
eth_api_builder,
payload_validator_builder,
engine_api_builder,
rpc_middleware,
tokio_runtime,
..
} = self;
RpcAddOns {
hooks,
eth_api_builder,
payload_validator_builder,
engine_api_builder,
engine_validator_builder,
rpc_middleware,
tokio_runtime,
}
}
/// Sets the RPC middleware stack for processing RPC requests.
///
/// This method configures a custom middleware stack that will be applied to all RPC requests
/// across HTTP, `WebSocket`, and IPC transports. The middleware is applied to the RPC service
/// layer, allowing you to intercept, modify, or enhance RPC request processing.
///
///
/// # How It Works
///
/// The middleware uses the Tower ecosystem's `Layer` pattern. When an RPC server is started,
/// the configured middleware stack is applied to create a layered service that processes
/// requests in the order the layers were added.
///
/// # Examples
///
/// ```ignore
/// use reth_rpc_builder::{RpcServiceBuilder, RpcRequestMetrics};
/// use tower::Layer;
///
/// // Simple example with metrics
/// let metrics_layer = RpcRequestMetrics::new(metrics_recorder);
/// let with_metrics = rpc_addons.with_rpc_middleware(
/// RpcServiceBuilder::new().layer(metrics_layer)
/// );
///
/// // Composing multiple middleware layers
/// let middleware_stack = RpcServiceBuilder::new()
/// .layer(rate_limit_layer)
/// .layer(logging_layer)
/// .layer(metrics_layer);
/// let with_full_stack = rpc_addons.with_rpc_middleware(middleware_stack);
/// ```
///
/// # Notes
///
/// - Middleware is applied to the RPC service layer, not the HTTP transport layer
/// - The default middleware is `Identity` (no-op), which passes through requests unchanged
/// - Middleware layers are applied in the order they are added via `.layer()`
pub fn with_rpc_middleware<T>(
self,
rpc_middleware: T,
) -> RpcAddOns<Node, EthB, PVB, EB, EVB, T> {
let Self {
hooks,
eth_api_builder,
payload_validator_builder,
engine_api_builder,
engine_validator_builder,
tokio_runtime,
..
} = self;
RpcAddOns {
hooks,
eth_api_builder,
payload_validator_builder,
engine_api_builder,
engine_validator_builder,
rpc_middleware,
tokio_runtime,
}
}
/// Sets the tokio runtime for the RPC servers.
///
/// Caution: This runtime must not be created from within asynchronous context.
pub fn with_tokio_runtime(self, tokio_runtime: Option<tokio::runtime::Handle>) -> Self {
let Self {
hooks,
eth_api_builder,
payload_validator_builder,
engine_validator_builder,
engine_api_builder,
rpc_middleware,
..
} = self;
Self {
hooks,
eth_api_builder,
payload_validator_builder,
engine_validator_builder,
engine_api_builder,
rpc_middleware,
tokio_runtime,
}
}
/// Add a new layer `T` to the configured [`RpcServiceBuilder`].
pub fn layer_rpc_middleware<T>(
self,
layer: T,
) -> RpcAddOns<Node, EthB, PVB, EB, EVB, Stack<RpcMiddleware, T>> {
let Self {
hooks,
eth_api_builder,
payload_validator_builder,
engine_api_builder,
engine_validator_builder,
rpc_middleware,
tokio_runtime,
} = self;
let rpc_middleware = Stack::new(rpc_middleware, layer);
RpcAddOns {
hooks,
eth_api_builder,
payload_validator_builder,
engine_api_builder,
engine_validator_builder,
rpc_middleware,
tokio_runtime,
}
}
/// Optionally adds a new layer `T` to the configured [`RpcServiceBuilder`].
#[expect(clippy::type_complexity)]
pub fn option_layer_rpc_middleware<T>(
self,
layer: Option<T>,
) -> RpcAddOns<Node, EthB, PVB, EB, EVB, Stack<RpcMiddleware, Either<T, Identity>>> {
let layer = layer.map(Either::Left).unwrap_or(Either::Right(Identity::new()));
self.layer_rpc_middleware(layer)
}
/// Sets the hook that is run once the rpc server is started.
pub fn on_rpc_started<F>(mut self, hook: F) -> Self
where
F: FnOnce(RpcContext<'_, Node, EthB::EthApi>, RethRpcServerHandles) -> eyre::Result<()>
+ Send
+ 'static,
{
self.hooks.set_on_rpc_started(hook);
self
}
/// Sets the hook that is run to configure the rpc modules.
pub fn extend_rpc_modules<F>(mut self, hook: F) -> Self
where
F: FnOnce(RpcContext<'_, Node, EthB::EthApi>) -> eyre::Result<()> + Send + 'static,
{
self.hooks.set_extend_rpc_modules(hook);
self
}
}
impl<Node, EthB, EV, EB, Engine> Default for RpcAddOns<Node, EthB, EV, EB, Engine, Identity>
where
Node: FullNodeComponents,
EthB: EthApiBuilder<Node>,
EV: Default,
EB: Default,
Engine: Default,
{
fn default() -> Self {
Self::new(
EthB::default(),
EV::default(),
EB::default(),
Engine::default(),
Default::default(),
)
}
}
impl<N, EthB, PVB, EB, EVB, RpcMiddleware> RpcAddOns<N, EthB, PVB, EB, EVB, RpcMiddleware>
where
N: FullNodeComponents,
N::Provider: ChainSpecProvider<ChainSpec: EthereumHardforks>,
EthB: EthApiBuilder<N>,
EB: EngineApiBuilder<N>,
EVB: EngineValidatorBuilder<N>,
RpcMiddleware: RethRpcMiddleware,
{
/// Launches only the regular RPC server (HTTP/WS/IPC), without the authenticated Engine API
/// server.
///
/// This is useful when you only need the regular RPC functionality and want to avoid
/// starting the auth server.
pub async fn launch_rpc_server<F>(
self,
ctx: AddOnsContext<'_, N>,
ext: F,
) -> eyre::Result<RpcServerOnlyHandle<N, EthB::EthApi>>
where
F: FnOnce(RpcModuleContainer<'_, N, EthB::EthApi>) -> eyre::Result<()>,
{
let rpc_middleware = self.rpc_middleware.clone();
let tokio_runtime = self.tokio_runtime.clone();
let setup_ctx = self.setup_rpc_components(ctx, ext).await?;
let RpcSetupContext {
node,
config,
mut modules,
mut auth_module,
auth_config: _,
mut registry,
on_rpc_started,
engine_events,
engine_handle,
} = setup_ctx;
let server_config = config
.rpc
.rpc_server_config()
.set_rpc_middleware(rpc_middleware)
.with_tokio_runtime(tokio_runtime);
let rpc_server_handle = Self::launch_rpc_server_internal(server_config, &modules).await?;
let handles =
RethRpcServerHandles { rpc: rpc_server_handle.clone(), auth: AuthServerHandle::noop() };
Self::finalize_rpc_setup(
&mut registry,
&mut modules,
&mut auth_module,
&node,
config,
on_rpc_started,
handles,
)?;
Ok(RpcServerOnlyHandle {
rpc_server_handle,
rpc_registry: registry,
engine_events,
engine_handle,
})
}
/// Launches the RPC servers with the given context and an additional hook for extending
/// modules. Whether the auth server is launched depends on the CLI configuration.
pub async fn launch_add_ons_with<F>(
self,
ctx: AddOnsContext<'_, N>,
ext: F,
) -> eyre::Result<RpcHandle<N, EthB::EthApi>>
where
F: FnOnce(RpcModuleContainer<'_, N, EthB::EthApi>) -> eyre::Result<()>,
{
// Check CLI config to determine if auth server should be disabled
let disable_auth = ctx.config.rpc.disable_auth_server;
self.launch_add_ons_with_opt_engine(ctx, ext, disable_auth).await
}
/// Launches the RPC servers with the given context and an additional hook for extending
/// modules. Optionally disables the auth server based on the `disable_auth` parameter.
///
/// When `disable_auth` is true, the auth server will not be started and a noop handle
/// will be used instead.
pub async fn launch_add_ons_with_opt_engine<F>(
self,
ctx: AddOnsContext<'_, N>,
ext: F,
disable_auth: bool,
) -> eyre::Result<RpcHandle<N, EthB::EthApi>>
where
F: FnOnce(RpcModuleContainer<'_, N, EthB::EthApi>) -> eyre::Result<()>,
{
let rpc_middleware = self.rpc_middleware.clone();
let tokio_runtime = self.tokio_runtime.clone();
let setup_ctx = self.setup_rpc_components(ctx, ext).await?;
let RpcSetupContext {
node,
config,
mut modules,
mut auth_module,
auth_config,
mut registry,
on_rpc_started,
engine_events,
engine_handle,
} = setup_ctx;
let server_config = config
.rpc
.rpc_server_config()
.set_rpc_middleware(rpc_middleware)
.with_tokio_runtime(tokio_runtime);
let (rpc, auth) = if disable_auth {
// Only launch the RPC server, use a noop auth handle
let rpc = Self::launch_rpc_server_internal(server_config, &modules).await?;
(rpc, AuthServerHandle::noop())
} else {
let auth_module_clone = auth_module.clone();
// launch servers concurrently
let (rpc, auth) = futures::future::try_join(
Self::launch_rpc_server_internal(server_config, &modules),
Self::launch_auth_server_internal(auth_module_clone, auth_config),
)
.await?;
(rpc, auth)
};
let handles = RethRpcServerHandles { rpc, auth };
Self::finalize_rpc_setup(
&mut registry,
&mut modules,
&mut auth_module,
&node,
config,
on_rpc_started,
handles.clone(),
)?;
Ok(RpcHandle {
rpc_server_handles: handles,
rpc_registry: registry,
engine_events,
beacon_engine_handle: engine_handle,
})
}
/// Common setup for RPC server initialization
async fn setup_rpc_components<'a, F>(
self,
ctx: AddOnsContext<'a, N>,
ext: F,
) -> eyre::Result<RpcSetupContext<'a, N, EthB::EthApi>>
where
F: FnOnce(RpcModuleContainer<'_, N, EthB::EthApi>) -> eyre::Result<()>,
{
let Self { eth_api_builder, engine_api_builder, hooks, .. } = self;
let engine_api = engine_api_builder.build_engine_api(&ctx).await?;
let AddOnsContext { node, config, beacon_engine_handle, jwt_secret, engine_events } = ctx;
info!(target: "reth::cli", "Engine API handler initialized");
let cache = EthStateCache::spawn_with(
node.provider().clone(),
config.rpc.eth_config().cache,
node.task_executor().clone(),
);
let new_canonical_blocks = node.provider().canonical_state_stream();
let c = cache.clone();
node.task_executor().spawn_critical(
"cache canonical blocks task",
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/builder/src/exex.rs | crates/node/builder/src/exex.rs | //! Types for launching execution extensions (ExEx).
use std::future::Future;
use futures::{future::BoxFuture, FutureExt};
use reth_exex::ExExContext;
use reth_node_api::FullNodeComponents;
/// A trait for launching an `ExEx`.
pub trait LaunchExEx<Node: FullNodeComponents>: Send {
/// Launches the `ExEx`.
///
/// The `ExEx` should be able to run independently and emit events on the channels provided in
/// the [`ExExContext`].
fn launch(
self,
ctx: ExExContext<Node>,
) -> impl Future<Output = eyre::Result<impl Future<Output = eyre::Result<()>> + Send>> + Send;
}
/// A boxed exex future.
pub type BoxExEx = BoxFuture<'static, eyre::Result<()>>;
/// A version of [`LaunchExEx`] that returns a boxed future. Makes the trait object-safe.
pub trait BoxedLaunchExEx<Node: FullNodeComponents>: Send {
/// Launches the `ExEx` and returns a boxed future.
fn launch(self: Box<Self>, ctx: ExExContext<Node>)
-> BoxFuture<'static, eyre::Result<BoxExEx>>;
}
/// Implements [`BoxedLaunchExEx`] for any [`LaunchExEx`] that is [Send] and `'static`.
///
/// Returns a [`BoxFuture`] that resolves to a [`BoxExEx`].
impl<E, Node> BoxedLaunchExEx<Node> for E
where
E: LaunchExEx<Node> + Send + 'static,
Node: FullNodeComponents,
{
fn launch(
self: Box<Self>,
ctx: ExExContext<Node>,
) -> BoxFuture<'static, eyre::Result<BoxExEx>> {
async move {
let exex = LaunchExEx::launch(*self, ctx).await?;
Ok(Box::pin(exex) as BoxExEx)
}
.boxed()
}
}
/// Implements `LaunchExEx` for any closure that takes an [`ExExContext`] and returns a future
/// resolving to an `ExEx`.
impl<Node, F, Fut, E> LaunchExEx<Node> for F
where
Node: FullNodeComponents,
F: FnOnce(ExExContext<Node>) -> Fut + Send,
Fut: Future<Output = eyre::Result<E>> + Send,
E: Future<Output = eyre::Result<()>> + Send,
{
fn launch(
self,
ctx: ExExContext<Node>,
) -> impl Future<Output = eyre::Result<impl Future<Output = eyre::Result<()>> + Send>> + Send
{
self(ctx)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/builder/src/engine_api_ext.rs | crates/node/builder/src/engine_api_ext.rs | //! `EngineApiBuilder` callback wrapper
//!
//! Wraps an `EngineApiBuilder` to provide access to the built Engine API instance.
use crate::rpc::EngineApiBuilder;
use eyre::Result;
use reth_node_api::{AddOnsContext, FullNodeComponents};
use reth_rpc_api::IntoEngineApiRpcModule;
/// Provides access to an `EngineApi` instance with a callback
#[derive(Debug)]
pub struct EngineApiExt<B, F> {
/// The inner builder that constructs the actual `EngineApi`
inner: B,
/// Optional callback function to execute with the built API
callback: Option<F>,
}
impl<B, F> EngineApiExt<B, F> {
/// Creates a new wrapper that calls `callback` when the API is built.
pub const fn new(inner: B, callback: F) -> Self {
Self { inner, callback: Some(callback) }
}
}
impl<N, B, F> EngineApiBuilder<N> for EngineApiExt<B, F>
where
B: EngineApiBuilder<N>,
N: FullNodeComponents,
B::EngineApi: IntoEngineApiRpcModule + Send + Sync + Clone + 'static,
F: FnOnce(B::EngineApi) + Send + Sync + 'static,
{
type EngineApi = B::EngineApi;
/// Builds the `EngineApi` and executes the callback if present.
async fn build_engine_api(mut self, ctx: &AddOnsContext<'_, N>) -> Result<Self::EngineApi> {
let api = self.inner.build_engine_api(ctx).await?;
if let Some(callback) = self.callback.take() {
callback(api.clone());
}
Ok(api)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/builder/src/aliases.rs | crates/node/builder/src/aliases.rs | use reth_network::NetworkPrimitives;
use reth_node_api::BlockBody;
use reth_provider::BlockReader;
/// This is a type alias to make type bounds simpler, when we have a [`NetworkPrimitives`] and need
/// a [`BlockReader`] whose associated types match the [`NetworkPrimitives`] associated types.
pub trait BlockReaderFor<N: NetworkPrimitives>:
BlockReader<
Block = N::Block,
Header = N::BlockHeader,
Transaction = <N::BlockBody as BlockBody>::Transaction,
Receipt = N::Receipt,
>
{
}
impl<N, T> BlockReaderFor<N> for T
where
N: NetworkPrimitives,
T: BlockReader<
Block = N::Block,
Header = N::BlockHeader,
Transaction = <N::BlockBody as BlockBody>::Transaction,
Receipt = N::Receipt,
>,
{
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/builder/src/handle.rs | crates/node/builder/src/handle.rs | use std::fmt;
use reth_node_api::FullNodeComponents;
use reth_node_core::exit::NodeExitFuture;
use crate::{node::FullNode, rpc::RethRpcAddOns};
/// A Handle to the launched node.
#[must_use = "Needs to await the node exit future"]
pub struct NodeHandle<Node: FullNodeComponents, AddOns: RethRpcAddOns<Node>> {
/// All node components.
pub node: FullNode<Node, AddOns>,
/// The exit future of the node.
pub node_exit_future: NodeExitFuture,
}
impl<Node, AddOns> NodeHandle<Node, AddOns>
where
Node: FullNodeComponents,
AddOns: RethRpcAddOns<Node>,
{
/// Waits for the node to exit, if it was configured to exit.
pub async fn wait_for_node_exit(self) -> eyre::Result<()> {
self.node_exit_future.await
}
}
impl<Node, AddOns> fmt::Debug for NodeHandle<Node, AddOns>
where
Node: FullNodeComponents,
AddOns: RethRpcAddOns<Node>,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("NodeHandle")
.field("node", &"...")
.field("node_exit_future", &self.node_exit_future)
.finish()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/builder/src/builder/add_ons.rs | crates/node/builder/src/builder/add_ons.rs | //! Node add-ons. Depend on core [`NodeComponents`](crate::NodeComponents).
use reth_node_api::{FullNodeComponents, NodeAddOns};
use crate::{exex::BoxedLaunchExEx, hooks::NodeHooks};
/// Additional node extensions.
///
/// At this point we consider all necessary components defined.
pub struct AddOns<Node: FullNodeComponents, AddOns: NodeAddOns<Node>> {
/// Additional `NodeHooks` that are called at specific points in the node's launch lifecycle.
pub hooks: NodeHooks<Node, AddOns>,
/// The `ExExs` (execution extensions) of the node.
pub exexs: Vec<(String, Box<dyn BoxedLaunchExEx<Node>>)>,
/// Additional captured addons.
pub add_ons: AddOns,
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/builder/src/builder/mod.rs | crates/node/builder/src/builder/mod.rs | //! Customizable node builder.
#![allow(clippy::type_complexity, missing_debug_implementations)]
use crate::{
common::WithConfigs,
components::NodeComponentsBuilder,
node::FullNode,
rpc::{RethRpcAddOns, RethRpcServerHandles, RpcContext},
BlockReaderFor, DebugNode, DebugNodeLauncher, EngineNodeLauncher, LaunchNode, Node,
};
use alloy_eips::eip4844::env_settings::EnvKzgSettings;
use futures::Future;
use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks};
use reth_cli_util::get_secret_key;
use reth_db_api::{database::Database, database_metrics::DatabaseMetrics};
use reth_exex::ExExContext;
use reth_network::{
transactions::{TransactionPropagationPolicy, TransactionsManagerConfig},
NetworkBuilder, NetworkConfig, NetworkConfigBuilder, NetworkHandle, NetworkManager,
NetworkPrimitives,
};
use reth_node_api::{
FullNodePrimitives, FullNodeTypes, FullNodeTypesAdapter, NodeAddOns, NodeTypes,
NodeTypesWithDBAdapter,
};
use reth_node_core::{
cli::config::{PayloadBuilderConfig, RethTransactionPoolConfig},
dirs::{ChainPath, DataDirPath},
node_config::NodeConfig,
primitives::Head,
};
use reth_provider::{
providers::{BlockchainProvider, NodeTypesForProvider},
ChainSpecProvider, FullProvider,
};
use reth_tasks::TaskExecutor;
use reth_transaction_pool::{PoolConfig, PoolTransaction, TransactionPool};
use secp256k1::SecretKey;
use std::{fmt::Debug, sync::Arc};
use tracing::{info, trace, warn};
pub mod add_ons;
mod states;
pub use states::*;
/// The adapter type for a reth node with the builtin provider type
// Note: we need to hardcode this because custom components might depend on it in associated types.
pub type RethFullAdapter<DB, Types> =
FullNodeTypesAdapter<Types, DB, BlockchainProvider<NodeTypesWithDBAdapter<Types, DB>>>;
#[expect(clippy::doc_markdown)]
#[cfg_attr(doc, aquamarine::aquamarine)]
/// Declaratively construct a node.
///
/// [`NodeBuilder`] provides a [builder-like interface][builder] for composing
/// components of a node.
///
/// ## Order
///
/// Configuring a node starts out with a [`NodeConfig`] (this can be obtained from cli arguments for
/// example) and then proceeds to configure the core static types of the node:
/// [`NodeTypes`], these include the node's primitive types and the node's engine
/// types.
///
/// Next all stateful components of the node are configured, these include all the
/// components of the node that are downstream of those types, these include:
///
/// - The EVM and Executor configuration: [`ExecutorBuilder`](crate::components::ExecutorBuilder)
/// - The transaction pool: [`PoolBuilder`](crate::components::PoolBuilder)
/// - The network: [`NetworkBuilder`](crate::components::NetworkBuilder)
/// - The payload builder: [`PayloadBuilder`](crate::components::PayloadServiceBuilder)
///
/// Once all the components are configured, the node is ready to be launched.
///
/// On launch the builder returns a fully type aware [`NodeHandle`] that has access to all the
/// configured components and can interact with the node.
///
/// There are convenience functions for networks that come with a preset of types and components via
/// the [`Node`] trait, see `reth_node_ethereum::EthereumNode` or `reth_optimism_node::OpNode`.
///
/// The [`NodeBuilder::node`] function configures the node's types and components in one step.
///
/// ## Components
///
/// All components are configured with a [`NodeComponentsBuilder`] that is responsible for actually
/// creating the node components during the launch process. The
/// [`ComponentsBuilder`](crate::components::ComponentsBuilder) is a general purpose implementation
/// of the [`NodeComponentsBuilder`] trait that can be used to configure the executor, network,
/// transaction pool and payload builder of the node. It enforces the correct order of
/// configuration, for example the network and the payload builder depend on the transaction pool
/// type that is configured first.
///
/// All builder traits are generic over the node types and are invoked with the [`BuilderContext`]
/// that gives access to internals of the that are needed to configure the components. This include
/// the original config, chain spec, the database provider and the task executor,
///
/// ## Hooks
///
/// Once all the components are configured, the builder can be used to set hooks that are run at
/// specific points in the node's lifecycle. This way custom services can be spawned before the node
/// is launched [`NodeBuilderWithComponents::on_component_initialized`], or once the rpc server(s)
/// are launched [`NodeBuilderWithComponents::on_rpc_started`]. The
/// [`NodeBuilderWithComponents::extend_rpc_modules`] can be used to inject custom rpc modules into
/// the rpc server before it is launched. See also [`RpcContext`] All hooks accept a closure that is
/// then invoked at the appropriate time in the node's launch process.
///
/// ## Flow
///
/// The [`NodeBuilder`] is intended to sit behind a CLI that provides the necessary [`NodeConfig`]
/// input: [`NodeBuilder::new`]
///
/// From there the builder is configured with the node's types, components, and hooks, then launched
/// with the [`WithLaunchContext::launch`] method. On launch all the builtin internals, such as the
/// `Database` and its providers [`BlockchainProvider`] are initialized before the configured
/// [`NodeComponentsBuilder`] is invoked with the [`BuilderContext`] to create the transaction pool,
/// network, and payload builder components. When the RPC is configured, the corresponding hooks are
/// invoked to allow for custom rpc modules to be injected into the rpc server:
/// [`NodeBuilderWithComponents::extend_rpc_modules`]
///
/// Finally all components are created and all services are launched and a [`NodeHandle`] is
/// returned that can be used to interact with the node: [`FullNode`]
///
/// The following diagram shows the flow of the node builder from CLI to a launched node.
///
/// include_mmd!("docs/mermaid/builder.mmd")
///
/// ## Internals
///
/// The node builder is fully type safe, it uses the [`NodeTypes`] trait to enforce that
/// all components are configured with the correct types. However the database types and with that
/// the provider trait implementations are currently created by the builder itself during the launch
/// process, hence the database type is not part of the [`NodeTypes`] trait and the node's
/// components, that depend on the database, are configured separately. In order to have a nice
/// trait that encapsulates the entire node the
/// [`FullNodeComponents`](reth_node_api::FullNodeComponents) trait was introduced. This
/// trait has convenient associated types for all the components of the node. After
/// [`WithLaunchContext::launch`] the [`NodeHandle`] contains an instance of [`FullNode`] that
/// implements the [`FullNodeComponents`](reth_node_api::FullNodeComponents) trait and has access to
/// all the components of the node. Internally the node builder uses several generic adapter types
/// that are then map to traits with associated types for ease of use.
///
/// ### Limitations
///
/// Currently the launch process is limited to ethereum nodes and requires all the components
/// specified above. It also expects beacon consensus with the ethereum engine API that is
/// configured by the builder itself during launch. This might change in the future.
///
/// [builder]: https://doc.rust-lang.org/1.0.0/style/ownership/builders.html
pub struct NodeBuilder<DB, ChainSpec> {
/// All settings for how the node should be configured.
config: NodeConfig<ChainSpec>,
/// The configured database for the node.
database: DB,
}
impl<ChainSpec> NodeBuilder<(), ChainSpec> {
/// Create a new [`NodeBuilder`].
pub const fn new(config: NodeConfig<ChainSpec>) -> Self {
Self { config, database: () }
}
}
impl<DB, ChainSpec> NodeBuilder<DB, ChainSpec> {
/// Returns a reference to the node builder's config.
pub const fn config(&self) -> &NodeConfig<ChainSpec> {
&self.config
}
/// Returns a mutable reference to the node builder's config.
pub const fn config_mut(&mut self) -> &mut NodeConfig<ChainSpec> {
&mut self.config
}
/// Returns a reference to the node's database
pub const fn db(&self) -> &DB {
&self.database
}
/// Returns a mutable reference to the node's database
pub const fn db_mut(&mut self) -> &mut DB {
&mut self.database
}
/// Applies a fallible function to the builder.
pub fn try_apply<F, R>(self, f: F) -> Result<Self, R>
where
F: FnOnce(Self) -> Result<Self, R>,
{
f(self)
}
/// Applies a fallible function to the builder, if the condition is `true`.
pub fn try_apply_if<F, R>(self, cond: bool, f: F) -> Result<Self, R>
where
F: FnOnce(Self) -> Result<Self, R>,
{
if cond {
f(self)
} else {
Ok(self)
}
}
/// Apply a function to the builder
pub fn apply<F>(self, f: F) -> Self
where
F: FnOnce(Self) -> Self,
{
f(self)
}
/// Apply a function to the builder, if the condition is `true`.
pub fn apply_if<F>(self, cond: bool, f: F) -> Self
where
F: FnOnce(Self) -> Self,
{
if cond {
f(self)
} else {
self
}
}
}
impl<DB, ChainSpec: EthChainSpec> NodeBuilder<DB, ChainSpec> {
/// Configures the underlying database that the node will use.
pub fn with_database<D>(self, database: D) -> NodeBuilder<D, ChainSpec> {
NodeBuilder { config: self.config, database }
}
/// Preconfigure the builder with the context to launch the node.
///
/// This provides the task executor and the data directory for the node.
pub const fn with_launch_context(self, task_executor: TaskExecutor) -> WithLaunchContext<Self> {
WithLaunchContext { builder: self, task_executor }
}
/// Creates an _ephemeral_ preconfigured node for testing purposes.
#[cfg(feature = "test-utils")]
pub fn testing_node(
self,
task_executor: TaskExecutor,
) -> WithLaunchContext<
NodeBuilder<Arc<reth_db::test_utils::TempDatabase<reth_db::DatabaseEnv>>, ChainSpec>,
> {
let path = reth_db::test_utils::tempdir_path();
self.testing_node_with_datadir(task_executor, path)
}
/// Creates a preconfigured node for testing purposes with a specific datadir.
#[cfg(feature = "test-utils")]
pub fn testing_node_with_datadir(
mut self,
task_executor: TaskExecutor,
datadir: impl Into<std::path::PathBuf>,
) -> WithLaunchContext<
NodeBuilder<Arc<reth_db::test_utils::TempDatabase<reth_db::DatabaseEnv>>, ChainSpec>,
> {
let path = reth_node_core::dirs::MaybePlatformPath::<DataDirPath>::from(datadir.into());
self.config = self.config.with_datadir_args(reth_node_core::args::DatadirArgs {
datadir: path.clone(),
..Default::default()
});
let data_dir =
path.unwrap_or_chain_default(self.config.chain.chain(), self.config.datadir.clone());
let db = reth_db::test_utils::create_test_rw_db_with_path(data_dir.db());
WithLaunchContext { builder: self.with_database(db), task_executor }
}
}
impl<DB, ChainSpec> NodeBuilder<DB, ChainSpec>
where
DB: Database + DatabaseMetrics + Clone + Unpin + 'static,
ChainSpec: EthChainSpec + EthereumHardforks,
{
/// Configures the types of the node.
pub fn with_types<T>(self) -> NodeBuilderWithTypes<RethFullAdapter<DB, T>>
where
T: NodeTypesForProvider<ChainSpec = ChainSpec>,
{
self.with_types_and_provider()
}
/// Configures the types of the node and the provider type that will be used by the node.
pub fn with_types_and_provider<T, P>(
self,
) -> NodeBuilderWithTypes<FullNodeTypesAdapter<T, DB, P>>
where
T: NodeTypesForProvider<ChainSpec = ChainSpec>,
P: FullProvider<NodeTypesWithDBAdapter<T, DB>>,
{
NodeBuilderWithTypes::new(self.config, self.database)
}
/// Preconfigures the node with a specific node implementation.
///
/// This is a convenience method that sets the node's types and components in one call.
pub fn node<N>(
self,
node: N,
) -> NodeBuilderWithComponents<RethFullAdapter<DB, N>, N::ComponentsBuilder, N::AddOns>
where
N: Node<RethFullAdapter<DB, N>, ChainSpec = ChainSpec> + NodeTypesForProvider,
{
self.with_types().with_components(node.components_builder()).with_add_ons(node.add_ons())
}
}
/// A [`NodeBuilder`] with its launch context already configured.
///
/// This exposes the same methods as [`NodeBuilder`] but with the launch context already configured,
/// See [`WithLaunchContext::launch`]
pub struct WithLaunchContext<Builder> {
builder: Builder,
task_executor: TaskExecutor,
}
impl<Builder> WithLaunchContext<Builder> {
/// Returns a reference to the task executor.
pub const fn task_executor(&self) -> &TaskExecutor {
&self.task_executor
}
}
impl<DB, ChainSpec> WithLaunchContext<NodeBuilder<DB, ChainSpec>> {
/// Returns a reference to the node builder's config.
pub const fn config(&self) -> &NodeConfig<ChainSpec> {
self.builder.config()
}
}
impl<DB, ChainSpec> WithLaunchContext<NodeBuilder<DB, ChainSpec>>
where
DB: Database + DatabaseMetrics + Clone + Unpin + 'static,
ChainSpec: EthChainSpec + EthereumHardforks,
{
/// Configures the types of the node.
pub fn with_types<T>(self) -> WithLaunchContext<NodeBuilderWithTypes<RethFullAdapter<DB, T>>>
where
T: NodeTypesForProvider<ChainSpec = ChainSpec>,
{
WithLaunchContext { builder: self.builder.with_types(), task_executor: self.task_executor }
}
/// Configures the types of the node and the provider type that will be used by the node.
pub fn with_types_and_provider<T, P>(
self,
) -> WithLaunchContext<NodeBuilderWithTypes<FullNodeTypesAdapter<T, DB, P>>>
where
T: NodeTypesForProvider<ChainSpec = ChainSpec>,
P: FullProvider<NodeTypesWithDBAdapter<T, DB>>,
{
WithLaunchContext {
builder: self.builder.with_types_and_provider(),
task_executor: self.task_executor,
}
}
/// Preconfigures the node with a specific node implementation.
///
/// This is a convenience method that sets the node's types and components in one call.
pub fn node<N>(
self,
node: N,
) -> WithLaunchContext<
NodeBuilderWithComponents<RethFullAdapter<DB, N>, N::ComponentsBuilder, N::AddOns>,
>
where
N: Node<RethFullAdapter<DB, N>, ChainSpec = ChainSpec> + NodeTypesForProvider,
{
self.with_types().with_components(node.components_builder()).with_add_ons(node.add_ons())
}
/// Launches a preconfigured [Node]
///
/// This bootstraps the node internals, creates all the components with the given [Node]
///
/// Returns a [`NodeHandle`](crate::NodeHandle) that can be used to interact with the node.
pub async fn launch_node<N>(
self,
node: N,
) -> eyre::Result<
<EngineNodeLauncher as LaunchNode<
NodeBuilderWithComponents<RethFullAdapter<DB, N>, N::ComponentsBuilder, N::AddOns>,
>>::Node,
>
where
N: Node<RethFullAdapter<DB, N>, ChainSpec = ChainSpec> + NodeTypesForProvider,
N::AddOns: RethRpcAddOns<
NodeAdapter<
RethFullAdapter<DB, N>,
<N::ComponentsBuilder as NodeComponentsBuilder<RethFullAdapter<DB, N>>>::Components,
>,
>,
N::Primitives: FullNodePrimitives,
EngineNodeLauncher: LaunchNode<
NodeBuilderWithComponents<RethFullAdapter<DB, N>, N::ComponentsBuilder, N::AddOns>,
>,
{
self.node(node).launch().await
}
}
impl<T: FullNodeTypes> WithLaunchContext<NodeBuilderWithTypes<T>> {
/// Advances the state of the node builder to the next state where all components are configured
pub fn with_components<CB>(
self,
components_builder: CB,
) -> WithLaunchContext<NodeBuilderWithComponents<T, CB, ()>>
where
CB: NodeComponentsBuilder<T>,
{
WithLaunchContext {
builder: self.builder.with_components(components_builder),
task_executor: self.task_executor,
}
}
}
impl<T, CB> WithLaunchContext<NodeBuilderWithComponents<T, CB, ()>>
where
T: FullNodeTypes,
CB: NodeComponentsBuilder<T>,
{
/// Advances the state of the node builder to the next state where all customizable
/// [`NodeAddOns`] types are configured.
pub fn with_add_ons<AO>(
self,
add_ons: AO,
) -> WithLaunchContext<NodeBuilderWithComponents<T, CB, AO>>
where
AO: NodeAddOns<NodeAdapter<T, CB::Components>>,
{
WithLaunchContext {
builder: self.builder.with_add_ons(add_ons),
task_executor: self.task_executor,
}
}
}
impl<T, CB, AO> WithLaunchContext<NodeBuilderWithComponents<T, CB, AO>>
where
T: FullNodeTypes,
CB: NodeComponentsBuilder<T>,
AO: RethRpcAddOns<NodeAdapter<T, CB::Components>>,
{
/// Returns a reference to the node builder's config.
pub const fn config(&self) -> &NodeConfig<<T::Types as NodeTypes>::ChainSpec> {
&self.builder.config
}
/// Returns a reference to node's database.
pub const fn db(&self) -> &T::DB {
&self.builder.adapter.database
}
/// Returns a mutable reference to node's database.
pub const fn db_mut(&mut self) -> &mut T::DB {
&mut self.builder.adapter.database
}
/// Applies a fallible function to the builder.
pub fn try_apply<F, R>(self, f: F) -> Result<Self, R>
where
F: FnOnce(Self) -> Result<Self, R>,
{
f(self)
}
/// Applies a fallible function to the builder, if the condition is `true`.
pub fn try_apply_if<F, R>(self, cond: bool, f: F) -> Result<Self, R>
where
F: FnOnce(Self) -> Result<Self, R>,
{
if cond {
f(self)
} else {
Ok(self)
}
}
/// Apply a function to the builder
pub fn apply<F>(self, f: F) -> Self
where
F: FnOnce(Self) -> Self,
{
f(self)
}
/// Apply a function to the builder, if the condition is `true`.
pub fn apply_if<F>(self, cond: bool, f: F) -> Self
where
F: FnOnce(Self) -> Self,
{
if cond {
f(self)
} else {
self
}
}
/// Sets the hook that is run once the node's components are initialized.
pub fn on_component_initialized<F>(self, hook: F) -> Self
where
F: FnOnce(NodeAdapter<T, CB::Components>) -> eyre::Result<()> + Send + 'static,
{
Self {
builder: self.builder.on_component_initialized(hook),
task_executor: self.task_executor,
}
}
/// Sets the hook that is run once the node has started.
pub fn on_node_started<F>(self, hook: F) -> Self
where
F: FnOnce(FullNode<NodeAdapter<T, CB::Components>, AO>) -> eyre::Result<()>
+ Send
+ 'static,
{
Self { builder: self.builder.on_node_started(hook), task_executor: self.task_executor }
}
/// Modifies the addons with the given closure.
pub fn map_add_ons<F>(self, f: F) -> Self
where
F: FnOnce(AO) -> AO,
{
Self { builder: self.builder.map_add_ons(f), task_executor: self.task_executor }
}
/// Sets the hook that is run once the rpc server is started.
pub fn on_rpc_started<F>(self, hook: F) -> Self
where
F: FnOnce(
RpcContext<'_, NodeAdapter<T, CB::Components>, AO::EthApi>,
RethRpcServerHandles,
) -> eyre::Result<()>
+ Send
+ 'static,
{
Self { builder: self.builder.on_rpc_started(hook), task_executor: self.task_executor }
}
/// Sets the hook that is run to configure the rpc modules.
///
/// This hook can obtain the node's components (txpool, provider, etc.) and can modify the
/// modules that the RPC server installs.
///
/// # Examples
///
/// ```rust,ignore
/// use jsonrpsee::{core::RpcResult, proc_macros::rpc};
///
/// #[derive(Clone)]
/// struct CustomApi<Pool> { pool: Pool }
///
/// #[rpc(server, namespace = "custom")]
/// impl CustomApi {
/// #[method(name = "hello")]
/// async fn hello(&self) -> RpcResult<String> {
/// Ok("World".to_string())
/// }
/// }
///
/// let node = NodeBuilder::new(config)
/// .node(EthereumNode::default())
/// .extend_rpc_modules(|ctx| {
/// // Access node components, so they can used by the CustomApi
/// let pool = ctx.pool().clone();
///
/// // Add custom RPC namespace
/// ctx.modules.merge_configured(CustomApi { pool }.into_rpc())?;
///
/// Ok(())
/// })
/// .build()?;
/// ```
pub fn extend_rpc_modules<F>(self, hook: F) -> Self
where
F: FnOnce(RpcContext<'_, NodeAdapter<T, CB::Components>, AO::EthApi>) -> eyre::Result<()>
+ Send
+ 'static,
{
Self { builder: self.builder.extend_rpc_modules(hook), task_executor: self.task_executor }
}
/// Installs an `ExEx` (Execution Extension) in the node.
///
/// # Note
///
/// The `ExEx` ID must be unique.
pub fn install_exex<F, R, E>(self, exex_id: impl Into<String>, exex: F) -> Self
where
F: FnOnce(ExExContext<NodeAdapter<T, CB::Components>>) -> R + Send + 'static,
R: Future<Output = eyre::Result<E>> + Send,
E: Future<Output = eyre::Result<()>> + Send,
{
Self {
builder: self.builder.install_exex(exex_id, exex),
task_executor: self.task_executor,
}
}
/// Installs an `ExEx` (Execution Extension) in the node if the condition is true.
///
/// # Note
///
/// The `ExEx` ID must be unique.
pub fn install_exex_if<F, R, E>(self, cond: bool, exex_id: impl Into<String>, exex: F) -> Self
where
F: FnOnce(ExExContext<NodeAdapter<T, CB::Components>>) -> R + Send + 'static,
R: Future<Output = eyre::Result<E>> + Send,
E: Future<Output = eyre::Result<()>> + Send,
{
if cond {
self.install_exex(exex_id, exex)
} else {
self
}
}
/// Launches the node with the given launcher.
pub async fn launch_with<L>(self, launcher: L) -> eyre::Result<L::Node>
where
L: LaunchNode<NodeBuilderWithComponents<T, CB, AO>>,
{
launcher.launch_node(self.builder).await
}
/// Launches the node with the given closure.
pub fn launch_with_fn<L, R>(self, launcher: L) -> R
where
L: FnOnce(Self) -> R,
{
launcher(self)
}
/// Check that the builder can be launched
///
/// This is useful when writing tests to ensure that the builder is configured correctly.
pub const fn check_launch(self) -> Self {
self
}
/// Launches the node with the [`EngineNodeLauncher`] that sets up engine API consensus and rpc
pub async fn launch(
self,
) -> eyre::Result<<EngineNodeLauncher as LaunchNode<NodeBuilderWithComponents<T, CB, AO>>>::Node>
where
EngineNodeLauncher: LaunchNode<NodeBuilderWithComponents<T, CB, AO>>,
{
let launcher = self.engine_api_launcher();
self.builder.launch_with(launcher).await
}
/// Launches the node with the [`DebugNodeLauncher`].
///
/// This is equivalent to [`WithLaunchContext::launch`], but will enable the debugging features,
/// if they are configured.
pub async fn launch_with_debug_capabilities(
self,
) -> eyre::Result<<DebugNodeLauncher as LaunchNode<NodeBuilderWithComponents<T, CB, AO>>>::Node>
where
T::Types: DebugNode<NodeAdapter<T, CB::Components>>,
DebugNodeLauncher: LaunchNode<NodeBuilderWithComponents<T, CB, AO>>,
{
let Self { builder, task_executor } = self;
let engine_tree_config = builder.config.engine.tree_config();
let launcher = DebugNodeLauncher::new(EngineNodeLauncher::new(
task_executor,
builder.config.datadir(),
engine_tree_config,
));
builder.launch_with(launcher).await
}
/// Returns an [`EngineNodeLauncher`] that can be used to launch the node with engine API
/// support.
pub fn engine_api_launcher(&self) -> EngineNodeLauncher {
let engine_tree_config = self.builder.config.engine.tree_config();
EngineNodeLauncher::new(
self.task_executor.clone(),
self.builder.config.datadir(),
engine_tree_config,
)
}
}
/// Captures the necessary context for building the components of the node.
pub struct BuilderContext<Node: FullNodeTypes> {
/// The current head of the blockchain at launch.
pub(crate) head: Head,
/// The configured provider to interact with the blockchain.
pub(crate) provider: Node::Provider,
/// The executor of the node.
pub(crate) executor: TaskExecutor,
/// Config container
pub(crate) config_container: WithConfigs<<Node::Types as NodeTypes>::ChainSpec>,
}
impl<Node: FullNodeTypes> BuilderContext<Node> {
/// Create a new instance of [`BuilderContext`]
pub const fn new(
head: Head,
provider: Node::Provider,
executor: TaskExecutor,
config_container: WithConfigs<<Node::Types as NodeTypes>::ChainSpec>,
) -> Self {
Self { head, provider, executor, config_container }
}
/// Returns the configured provider to interact with the blockchain.
pub const fn provider(&self) -> &Node::Provider {
&self.provider
}
/// Returns the current head of the blockchain at launch.
pub const fn head(&self) -> Head {
self.head
}
/// Returns the config of the node.
pub const fn config(&self) -> &NodeConfig<<Node::Types as NodeTypes>::ChainSpec> {
&self.config_container.config
}
/// Returns the loaded reth.toml config.
pub const fn reth_config(&self) -> &reth_config::Config {
&self.config_container.toml_config
}
/// Returns the executor of the node.
///
/// This can be used to execute async tasks or functions during the setup.
pub const fn task_executor(&self) -> &TaskExecutor {
&self.executor
}
/// Returns the chain spec of the node.
pub fn chain_spec(&self) -> Arc<<Node::Types as NodeTypes>::ChainSpec> {
self.provider().chain_spec()
}
/// Returns true if the node is configured as --dev
pub const fn is_dev(&self) -> bool {
self.config().dev.dev
}
/// Returns the transaction pool config of the node.
pub fn pool_config(&self) -> PoolConfig {
self.config().txpool.pool_config()
}
/// Loads `EnvKzgSettings::Default`.
pub const fn kzg_settings(&self) -> eyre::Result<EnvKzgSettings> {
Ok(EnvKzgSettings::Default)
}
/// Returns the config for payload building.
pub fn payload_builder_config(&self) -> impl PayloadBuilderConfig {
self.config().builder.clone()
}
/// Convenience function to start the network tasks.
///
/// Spawns the configured network and associated tasks and returns the [`NetworkHandle`]
/// connected to that network.
pub fn start_network<N, Pool>(
&self,
builder: NetworkBuilder<(), (), N>,
pool: Pool,
) -> NetworkHandle<N>
where
N: NetworkPrimitives,
Pool: TransactionPool<
Transaction: PoolTransaction<
Consensus = N::BroadcastedTransaction,
Pooled = N::PooledTransaction,
>,
> + Unpin
+ 'static,
Node::Provider: BlockReaderFor<N>,
{
self.start_network_with(
builder,
pool,
self.config().network.transactions_manager_config(),
self.config().network.tx_propagation_policy,
)
}
/// Convenience function to start the network tasks.
///
/// Accepts the config for the transaction task and the policy for propagation.
///
/// Spawns the configured network and associated tasks and returns the [`NetworkHandle`]
/// connected to that network.
pub fn start_network_with<Pool, N, Policy>(
&self,
builder: NetworkBuilder<(), (), N>,
pool: Pool,
tx_config: TransactionsManagerConfig,
propagation_policy: Policy,
) -> NetworkHandle<N>
where
N: NetworkPrimitives,
Pool: TransactionPool<
Transaction: PoolTransaction<
Consensus = N::BroadcastedTransaction,
Pooled = N::PooledTransaction,
>,
> + Unpin
+ 'static,
Node::Provider: BlockReaderFor<N>,
Policy: TransactionPropagationPolicy + Debug,
{
let (handle, network, txpool, eth) = builder
.transactions_with_policy(pool, tx_config, propagation_policy)
.request_handler(self.provider().clone())
.split_with_handle();
self.executor.spawn_critical("p2p txpool", Box::pin(txpool));
self.executor.spawn_critical("p2p eth request handler", Box::pin(eth));
let default_peers_path = self.config().datadir().known_peers();
let known_peers_file = self.config().network.persistent_peers_file(default_peers_path);
self.executor.spawn_critical_with_graceful_shutdown_signal(
"p2p network task",
|shutdown| {
Box::pin(network.run_until_graceful_shutdown(shutdown, |network| {
if let Some(peers_file) = known_peers_file {
let num_known_peers = network.num_known_peers();
trace!(target: "reth::cli", peers_file=?peers_file, num_peers=%num_known_peers, "Saving current peers");
match network.write_peers_to_file(peers_file.as_path()) {
Ok(_) => {
info!(target: "reth::cli", peers_file=?peers_file, "Wrote network peers to file");
}
Err(err) => {
warn!(target: "reth::cli", %err, "Failed to write network peers to file");
}
}
}
}))
},
);
handle
}
/// Get the network secret from the given data dir
fn network_secret(&self, data_dir: &ChainPath<DataDirPath>) -> eyre::Result<SecretKey> {
let network_secret_path =
self.config().network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret());
let secret_key = get_secret_key(&network_secret_path)?;
Ok(secret_key)
}
/// Builds the [`NetworkConfig`].
pub fn build_network_config<N>(
&self,
network_builder: NetworkConfigBuilder<N>,
) -> NetworkConfig<Node::Provider, N>
where
N: NetworkPrimitives,
Node::Types: NodeTypes<ChainSpec: Hardforks>,
{
network_builder.build(self.provider.clone())
}
}
impl<Node: FullNodeTypes<Types: NodeTypes<ChainSpec: Hardforks>>> BuilderContext<Node> {
/// Creates the [`NetworkBuilder`] for the node.
pub async fn network_builder<N>(&self) -> eyre::Result<NetworkBuilder<(), (), N>>
where
N: NetworkPrimitives,
{
let network_config = self.network_config()?;
let builder = NetworkManager::builder(network_config).await?;
Ok(builder)
}
/// Returns the default network config for the node.
pub fn network_config<N>(&self) -> eyre::Result<NetworkConfig<Node::Provider, N>>
where
N: NetworkPrimitives,
{
let network_builder = self.network_config_builder();
Ok(self.build_network_config(network_builder?))
}
/// Get the [`NetworkConfigBuilder`].
pub fn network_config_builder<N>(&self) -> eyre::Result<NetworkConfigBuilder<N>>
where
N: NetworkPrimitives,
{
let secret_key = self.network_secret(&self.config().datadir())?;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/builder/src/builder/states.rs | crates/node/builder/src/builder/states.rs | //! Node builder states and helper traits.
//!
//! Keeps track of the current state of the node builder.
//!
//! The node builder process is essentially a state machine that transitions through various states
//! before the node can be launched.
use crate::{
components::{NodeComponents, NodeComponentsBuilder},
hooks::NodeHooks,
launch::LaunchNode,
rpc::{RethRpcAddOns, RethRpcServerHandles, RpcContext},
AddOns, ComponentsFor, FullNode,
};
use reth_exex::ExExContext;
use reth_node_api::{FullNodeComponents, FullNodeTypes, NodeAddOns, NodeTypes};
use reth_node_core::node_config::NodeConfig;
use reth_tasks::TaskExecutor;
use std::{fmt, fmt::Debug, future::Future};
/// A node builder that also has the configured types.
pub struct NodeBuilderWithTypes<T: FullNodeTypes> {
/// All settings for how the node should be configured.
config: NodeConfig<<T::Types as NodeTypes>::ChainSpec>,
/// The configured database for the node.
adapter: NodeTypesAdapter<T>,
}
impl<T: FullNodeTypes> NodeBuilderWithTypes<T> {
/// Creates a new instance of the node builder with the given configuration and types.
pub const fn new(
config: NodeConfig<<T::Types as NodeTypes>::ChainSpec>,
database: T::DB,
) -> Self {
Self { config, adapter: NodeTypesAdapter::new(database) }
}
/// Advances the state of the node builder to the next state where all components are configured
pub fn with_components<CB>(self, components_builder: CB) -> NodeBuilderWithComponents<T, CB, ()>
where
CB: NodeComponentsBuilder<T>,
{
let Self { config, adapter } = self;
NodeBuilderWithComponents {
config,
adapter,
components_builder,
add_ons: AddOns { hooks: NodeHooks::default(), exexs: Vec::new(), add_ons: () },
}
}
}
/// Container for the node's types and the database the node uses.
pub struct NodeTypesAdapter<T: FullNodeTypes> {
/// The database type used by the node.
pub database: T::DB,
}
impl<T: FullNodeTypes> NodeTypesAdapter<T> {
/// Create a new adapter from the given node types.
pub(crate) const fn new(database: T::DB) -> Self {
Self { database }
}
}
impl<T: FullNodeTypes> fmt::Debug for NodeTypesAdapter<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("NodeTypesAdapter").field("db", &"...").field("types", &"...").finish()
}
}
/// Container for the node's types and the components and other internals that can be used by
/// addons of the node.
#[derive(Debug)]
pub struct NodeAdapter<T: FullNodeTypes, C: NodeComponents<T> = ComponentsFor<T>> {
/// The components of the node.
pub components: C,
/// The task executor for the node.
pub task_executor: TaskExecutor,
/// The provider of the node.
pub provider: T::Provider,
}
impl<T: FullNodeTypes, C: NodeComponents<T>> FullNodeTypes for NodeAdapter<T, C> {
type Types = T::Types;
type DB = T::DB;
type Provider = T::Provider;
}
impl<T: FullNodeTypes, C: NodeComponents<T>> FullNodeComponents for NodeAdapter<T, C> {
type Pool = C::Pool;
type Evm = C::Evm;
type Consensus = C::Consensus;
type Network = C::Network;
fn pool(&self) -> &Self::Pool {
self.components.pool()
}
fn evm_config(&self) -> &Self::Evm {
self.components.evm_config()
}
fn consensus(&self) -> &Self::Consensus {
self.components.consensus()
}
fn network(&self) -> &Self::Network {
self.components.network()
}
fn payload_builder_handle(
&self,
) -> &reth_payload_builder::PayloadBuilderHandle<
<Self::Types as reth_node_api::NodeTypes>::Payload,
> {
self.components.payload_builder_handle()
}
fn provider(&self) -> &Self::Provider {
&self.provider
}
fn task_executor(&self) -> &TaskExecutor {
&self.task_executor
}
}
impl<T: FullNodeTypes, C: NodeComponents<T>> Clone for NodeAdapter<T, C> {
fn clone(&self) -> Self {
Self {
components: self.components.clone(),
task_executor: self.task_executor.clone(),
provider: self.provider.clone(),
}
}
}
/// A fully type configured node builder.
///
/// Supports adding additional addons to the node.
pub struct NodeBuilderWithComponents<
T: FullNodeTypes,
CB: NodeComponentsBuilder<T>,
AO: NodeAddOns<NodeAdapter<T, CB::Components>>,
> {
/// All settings for how the node should be configured.
pub config: NodeConfig<<T::Types as NodeTypes>::ChainSpec>,
/// Adapter for the underlying node types and database
pub adapter: NodeTypesAdapter<T>,
/// container for type specific components
pub components_builder: CB,
/// Additional node extensions.
pub add_ons: AddOns<NodeAdapter<T, CB::Components>, AO>,
}
impl<T, CB> NodeBuilderWithComponents<T, CB, ()>
where
T: FullNodeTypes,
CB: NodeComponentsBuilder<T>,
{
/// Advances the state of the node builder to the next state where all customizable
/// [`NodeAddOns`] types are configured.
pub fn with_add_ons<AO>(self, add_ons: AO) -> NodeBuilderWithComponents<T, CB, AO>
where
AO: NodeAddOns<NodeAdapter<T, CB::Components>>,
{
let Self { config, adapter, components_builder, .. } = self;
NodeBuilderWithComponents {
config,
adapter,
components_builder,
add_ons: AddOns { hooks: NodeHooks::default(), exexs: Vec::new(), add_ons },
}
}
}
impl<T, CB, AO> NodeBuilderWithComponents<T, CB, AO>
where
T: FullNodeTypes,
CB: NodeComponentsBuilder<T>,
AO: NodeAddOns<NodeAdapter<T, CB::Components>>,
{
/// Sets the hook that is run once the node's components are initialized.
pub fn on_component_initialized<F>(mut self, hook: F) -> Self
where
F: FnOnce(NodeAdapter<T, CB::Components>) -> eyre::Result<()> + Send + 'static,
{
self.add_ons.hooks.set_on_component_initialized(hook);
self
}
/// Sets the hook that is run once the node has started.
pub fn on_node_started<F>(mut self, hook: F) -> Self
where
F: FnOnce(FullNode<NodeAdapter<T, CB::Components>, AO>) -> eyre::Result<()>
+ Send
+ 'static,
{
self.add_ons.hooks.set_on_node_started(hook);
self
}
/// Installs an `ExEx` (Execution Extension) in the node.
///
/// # Note
///
/// The `ExEx` ID must be unique.
pub fn install_exex<F, R, E>(mut self, exex_id: impl Into<String>, exex: F) -> Self
where
F: FnOnce(ExExContext<NodeAdapter<T, CB::Components>>) -> R + Send + 'static,
R: Future<Output = eyre::Result<E>> + Send,
E: Future<Output = eyre::Result<()>> + Send,
{
self.add_ons.exexs.push((exex_id.into(), Box::new(exex)));
self
}
/// Launches the node with the given closure.
pub fn launch_with_fn<L, R>(self, launcher: L) -> R
where
L: FnOnce(Self) -> R,
{
launcher(self)
}
/// Check that the builder can be launched
///
/// This is useful when writing tests to ensure that the builder is configured correctly.
pub const fn check_launch(self) -> Self {
self
}
/// Modifies the addons with the given closure.
pub fn map_add_ons<F>(mut self, f: F) -> Self
where
F: FnOnce(AO) -> AO,
{
self.add_ons.add_ons = f(self.add_ons.add_ons);
self
}
}
impl<T, CB, AO> NodeBuilderWithComponents<T, CB, AO>
where
T: FullNodeTypes,
CB: NodeComponentsBuilder<T>,
AO: RethRpcAddOns<NodeAdapter<T, CB::Components>>,
{
/// Launches the node with the given launcher.
pub async fn launch_with<L>(self, launcher: L) -> eyre::Result<L::Node>
where
L: LaunchNode<Self>,
{
launcher.launch_node(self).await
}
/// Sets the hook that is run once the rpc server is started.
pub fn on_rpc_started<F>(self, hook: F) -> Self
where
F: FnOnce(
RpcContext<'_, NodeAdapter<T, CB::Components>, AO::EthApi>,
RethRpcServerHandles,
) -> eyre::Result<()>
+ Send
+ 'static,
{
self.map_add_ons(|mut add_ons| {
add_ons.hooks_mut().set_on_rpc_started(hook);
add_ons
})
}
/// Sets the hook that is run to configure the rpc modules.
pub fn extend_rpc_modules<F>(self, hook: F) -> Self
where
F: FnOnce(RpcContext<'_, NodeAdapter<T, CB::Components>, AO::EthApi>) -> eyre::Result<()>
+ Send
+ 'static,
{
self.map_add_ons(|mut add_ons| {
add_ons.hooks_mut().set_extend_rpc_modules(hook);
add_ons
})
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::components::Components;
use reth_consensus::noop::NoopConsensus;
use reth_db_api::mock::DatabaseMock;
use reth_ethereum_engine_primitives::EthEngineTypes;
use reth_evm::noop::NoopEvmConfig;
use reth_evm_ethereum::MockEvmConfig;
use reth_network::EthNetworkPrimitives;
use reth_network_api::noop::NoopNetwork;
use reth_node_api::FullNodeTypesAdapter;
use reth_node_ethereum::EthereumNode;
use reth_payload_builder::PayloadBuilderHandle;
use reth_provider::noop::NoopProvider;
use reth_tasks::TaskManager;
use reth_transaction_pool::noop::NoopTransactionPool;
#[test]
fn test_noop_components() {
let components = Components::<
FullNodeTypesAdapter<EthereumNode, DatabaseMock, NoopProvider>,
NoopNetwork<EthNetworkPrimitives>,
_,
NoopEvmConfig<MockEvmConfig>,
_,
> {
transaction_pool: NoopTransactionPool::default(),
evm_config: NoopEvmConfig::default(),
consensus: NoopConsensus::default(),
network: NoopNetwork::default(),
payload_builder_handle: PayloadBuilderHandle::<EthEngineTypes>::noop(),
};
let task_executor = {
let runtime = tokio::runtime::Runtime::new().unwrap();
let handle = runtime.handle().clone();
let manager = TaskManager::new(handle);
manager.executor()
};
let node = NodeAdapter { components, task_executor, provider: NoopProvider::default() };
// test that node implements `FullNodeComponents``
<NodeAdapter<_, _> as FullNodeComponents>::pool(&node);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/builder/src/components/builder.rs | crates/node/builder/src/components/builder.rs | //! A generic [`NodeComponentsBuilder`]
use crate::{
components::{
Components, ConsensusBuilder, ExecutorBuilder, NetworkBuilder, NodeComponents,
PayloadServiceBuilder, PoolBuilder,
},
BuilderContext, ConfigureEvm, FullNodeTypes,
};
use reth_consensus::{noop::NoopConsensus, ConsensusError, FullConsensus};
use reth_network::{types::NetPrimitivesFor, EthNetworkPrimitives, NetworkPrimitives};
use reth_network_api::{noop::NoopNetwork, FullNetwork};
use reth_node_api::{BlockTy, BodyTy, HeaderTy, NodeTypes, PrimitivesTy, ReceiptTy, TxTy};
use reth_payload_builder::PayloadBuilderHandle;
use reth_transaction_pool::{
noop::NoopTransactionPool, EthPoolTransaction, EthPooledTransaction, PoolPooledTx,
PoolTransaction, TransactionPool,
};
use std::{future::Future, marker::PhantomData};
/// A generic, general purpose and customizable [`NodeComponentsBuilder`] implementation.
///
/// This type is stateful and captures the configuration of the node's components.
///
/// ## Component dependencies:
///
/// The components of the node depend on each other:
/// - The payload builder service depends on the transaction pool.
/// - The network depends on the transaction pool.
///
/// We distinguish between different kind of components:
/// - Components that are standalone, such as the transaction pool.
/// - Components that are spawned as a service, such as the payload builder service or the network.
///
/// ## Builder lifecycle:
///
/// First all standalone components are built. Then the service components are spawned.
/// All component builders are captured in the builder state and will be consumed once the node is
/// launched.
#[derive(Debug)]
pub struct ComponentsBuilder<Node, PoolB, PayloadB, NetworkB, ExecB, ConsB> {
pool_builder: PoolB,
payload_builder: PayloadB,
network_builder: NetworkB,
executor_builder: ExecB,
consensus_builder: ConsB,
_marker: PhantomData<Node>,
}
impl<Node, PoolB, PayloadB, NetworkB, ExecB, ConsB>
ComponentsBuilder<Node, PoolB, PayloadB, NetworkB, ExecB, ConsB>
{
/// Configures the node types.
pub fn node_types<Types>(
self,
) -> ComponentsBuilder<Types, PoolB, PayloadB, NetworkB, ExecB, ConsB>
where
Types: FullNodeTypes,
{
let Self {
pool_builder,
payload_builder,
network_builder,
executor_builder: evm_builder,
consensus_builder,
_marker,
} = self;
ComponentsBuilder {
executor_builder: evm_builder,
pool_builder,
payload_builder,
network_builder,
consensus_builder,
_marker: Default::default(),
}
}
/// Apply a function to the pool builder.
pub fn map_pool(self, f: impl FnOnce(PoolB) -> PoolB) -> Self {
Self {
pool_builder: f(self.pool_builder),
payload_builder: self.payload_builder,
network_builder: self.network_builder,
executor_builder: self.executor_builder,
consensus_builder: self.consensus_builder,
_marker: self._marker,
}
}
/// Apply a function to the payload builder.
pub fn map_payload(self, f: impl FnOnce(PayloadB) -> PayloadB) -> Self {
Self {
pool_builder: self.pool_builder,
payload_builder: f(self.payload_builder),
network_builder: self.network_builder,
executor_builder: self.executor_builder,
consensus_builder: self.consensus_builder,
_marker: self._marker,
}
}
/// Apply a function to the network builder.
pub fn map_network(self, f: impl FnOnce(NetworkB) -> NetworkB) -> Self {
Self {
pool_builder: self.pool_builder,
payload_builder: self.payload_builder,
network_builder: f(self.network_builder),
executor_builder: self.executor_builder,
consensus_builder: self.consensus_builder,
_marker: self._marker,
}
}
/// Apply a function to the executor builder.
pub fn map_executor(self, f: impl FnOnce(ExecB) -> ExecB) -> Self {
Self {
pool_builder: self.pool_builder,
payload_builder: self.payload_builder,
network_builder: self.network_builder,
executor_builder: f(self.executor_builder),
consensus_builder: self.consensus_builder,
_marker: self._marker,
}
}
/// Apply a function to the consensus builder.
pub fn map_consensus(self, f: impl FnOnce(ConsB) -> ConsB) -> Self {
Self {
pool_builder: self.pool_builder,
payload_builder: self.payload_builder,
network_builder: self.network_builder,
executor_builder: self.executor_builder,
consensus_builder: f(self.consensus_builder),
_marker: self._marker,
}
}
}
impl<Node, PoolB, PayloadB, NetworkB, ExecB, ConsB>
ComponentsBuilder<Node, PoolB, PayloadB, NetworkB, ExecB, ConsB>
where
Node: FullNodeTypes,
{
/// Configures the pool builder.
///
/// This accepts a [`PoolBuilder`] instance that will be used to create the node's transaction
/// pool.
pub fn pool<PB>(
self,
pool_builder: PB,
) -> ComponentsBuilder<Node, PB, PayloadB, NetworkB, ExecB, ConsB>
where
PB: PoolBuilder<Node>,
{
let Self {
pool_builder: _,
payload_builder,
network_builder,
executor_builder: evm_builder,
consensus_builder,
_marker,
} = self;
ComponentsBuilder {
pool_builder,
payload_builder,
network_builder,
executor_builder: evm_builder,
consensus_builder,
_marker,
}
}
/// Sets [`NoopTransactionPoolBuilder`].
pub fn noop_pool<Tx>(
self,
) -> ComponentsBuilder<Node, NoopTransactionPoolBuilder<Tx>, PayloadB, NetworkB, ExecB, ConsB>
{
ComponentsBuilder {
pool_builder: NoopTransactionPoolBuilder::<Tx>::default(),
payload_builder: self.payload_builder,
network_builder: self.network_builder,
executor_builder: self.executor_builder,
consensus_builder: self.consensus_builder,
_marker: self._marker,
}
}
}
impl<Node, PoolB, PayloadB, NetworkB, ExecB, ConsB>
ComponentsBuilder<Node, PoolB, PayloadB, NetworkB, ExecB, ConsB>
where
Node: FullNodeTypes,
PoolB: PoolBuilder<Node>,
{
/// Configures the network builder.
///
/// This accepts a [`NetworkBuilder`] instance that will be used to create the node's network
/// stack.
pub fn network<NB>(
self,
network_builder: NB,
) -> ComponentsBuilder<Node, PoolB, PayloadB, NB, ExecB, ConsB>
where
NB: NetworkBuilder<Node, PoolB::Pool>,
{
let Self {
pool_builder,
payload_builder,
network_builder: _,
executor_builder: evm_builder,
consensus_builder,
_marker,
} = self;
ComponentsBuilder {
pool_builder,
payload_builder,
network_builder,
executor_builder: evm_builder,
consensus_builder,
_marker,
}
}
/// Configures the payload builder.
///
/// This accepts a [`PayloadServiceBuilder`] instance that will be used to create the node's
/// payload builder service.
pub fn payload<PB>(
self,
payload_builder: PB,
) -> ComponentsBuilder<Node, PoolB, PB, NetworkB, ExecB, ConsB>
where
ExecB: ExecutorBuilder<Node>,
PB: PayloadServiceBuilder<Node, PoolB::Pool, ExecB::EVM>,
{
let Self {
pool_builder,
payload_builder: _,
network_builder,
executor_builder: evm_builder,
consensus_builder,
_marker,
} = self;
ComponentsBuilder {
pool_builder,
payload_builder,
network_builder,
executor_builder: evm_builder,
consensus_builder,
_marker,
}
}
/// Configures the executor builder.
///
/// This accepts a [`ExecutorBuilder`] instance that will be used to create the node's
/// components for execution.
pub fn executor<EB>(
self,
executor_builder: EB,
) -> ComponentsBuilder<Node, PoolB, PayloadB, NetworkB, EB, ConsB>
where
EB: ExecutorBuilder<Node>,
{
let Self {
pool_builder,
payload_builder,
network_builder,
executor_builder: _,
consensus_builder,
_marker,
} = self;
ComponentsBuilder {
pool_builder,
payload_builder,
network_builder,
executor_builder,
consensus_builder,
_marker,
}
}
/// Configures the consensus builder.
///
/// This accepts a [`ConsensusBuilder`] instance that will be used to create the node's
/// components for consensus.
pub fn consensus<CB>(
self,
consensus_builder: CB,
) -> ComponentsBuilder<Node, PoolB, PayloadB, NetworkB, ExecB, CB>
where
CB: ConsensusBuilder<Node>,
{
let Self {
pool_builder,
payload_builder,
network_builder,
executor_builder,
consensus_builder: _,
_marker,
} = self;
ComponentsBuilder {
pool_builder,
payload_builder,
network_builder,
executor_builder,
consensus_builder,
_marker,
}
}
/// Sets [`NoopNetworkBuilder`].
pub fn noop_network<Net>(
self,
) -> ComponentsBuilder<Node, PoolB, PayloadB, NoopNetworkBuilder<Net>, ExecB, ConsB> {
ComponentsBuilder {
pool_builder: self.pool_builder,
payload_builder: self.payload_builder,
network_builder: NoopNetworkBuilder::<Net>::default(),
executor_builder: self.executor_builder,
consensus_builder: self.consensus_builder,
_marker: self._marker,
}
}
/// Sets [`NoopPayloadBuilder`].
pub fn noop_payload(
self,
) -> ComponentsBuilder<Node, PoolB, NoopPayloadBuilder, NetworkB, ExecB, ConsB> {
ComponentsBuilder {
pool_builder: self.pool_builder,
payload_builder: NoopPayloadBuilder,
network_builder: self.network_builder,
executor_builder: self.executor_builder,
consensus_builder: self.consensus_builder,
_marker: self._marker,
}
}
/// Sets [`NoopConsensusBuilder`].
pub fn noop_consensus(
self,
) -> ComponentsBuilder<Node, PoolB, PayloadB, NetworkB, ExecB, NoopConsensusBuilder> {
ComponentsBuilder {
pool_builder: self.pool_builder,
payload_builder: self.payload_builder,
network_builder: self.network_builder,
executor_builder: self.executor_builder,
consensus_builder: NoopConsensusBuilder,
_marker: self._marker,
}
}
}
impl<Node, PoolB, PayloadB, NetworkB, ExecB, ConsB> NodeComponentsBuilder<Node>
for ComponentsBuilder<Node, PoolB, PayloadB, NetworkB, ExecB, ConsB>
where
Node: FullNodeTypes,
PoolB: PoolBuilder<Node, Pool: TransactionPool>,
NetworkB: NetworkBuilder<
Node,
PoolB::Pool,
Network: FullNetwork<
Primitives: NetPrimitivesFor<
PrimitivesTy<Node::Types>,
PooledTransaction = PoolPooledTx<PoolB::Pool>,
>,
>,
>,
PayloadB: PayloadServiceBuilder<Node, PoolB::Pool, ExecB::EVM>,
ExecB: ExecutorBuilder<Node>,
ConsB: ConsensusBuilder<Node>,
{
type Components =
Components<Node, NetworkB::Network, PoolB::Pool, ExecB::EVM, ConsB::Consensus>;
async fn build_components(
self,
context: &BuilderContext<Node>,
) -> eyre::Result<Self::Components> {
let Self {
pool_builder,
payload_builder,
network_builder,
executor_builder: evm_builder,
consensus_builder,
_marker,
} = self;
let evm_config = evm_builder.build_evm(context).await?;
let pool = pool_builder.build_pool(context).await?;
let network = network_builder.build_network(context, pool.clone()).await?;
let payload_builder_handle = payload_builder
.spawn_payload_builder_service(context, pool.clone(), evm_config.clone())
.await?;
let consensus = consensus_builder.build_consensus(context).await?;
Ok(Components {
transaction_pool: pool,
evm_config,
network,
payload_builder_handle,
consensus,
})
}
}
impl Default for ComponentsBuilder<(), (), (), (), (), ()> {
fn default() -> Self {
Self {
pool_builder: (),
payload_builder: (),
network_builder: (),
executor_builder: (),
consensus_builder: (),
_marker: Default::default(),
}
}
}
/// A type that configures all the customizable components of the node and knows how to build them.
///
/// Implementers of this trait are responsible for building all the components of the node: See
/// [`NodeComponents`].
///
/// The [`ComponentsBuilder`] is a generic, general purpose implementation of this trait that can be
/// used to customize certain components of the node using the builder pattern and defaults, e.g.
/// Ethereum and Optimism.
/// A type that's responsible for building the components of the node.
pub trait NodeComponentsBuilder<Node: FullNodeTypes>: Send {
/// The components for the node with the given types
type Components: NodeComponents<Node>;
/// Consumes the type and returns the created components.
fn build_components(
self,
ctx: &BuilderContext<Node>,
) -> impl Future<Output = eyre::Result<Self::Components>> + Send;
}
impl<Node, Net, F, Fut, Pool, EVM, Cons> NodeComponentsBuilder<Node> for F
where
Net: FullNetwork<
Primitives: NetPrimitivesFor<
PrimitivesTy<Node::Types>,
PooledTransaction = PoolPooledTx<Pool>,
>,
>,
Node: FullNodeTypes,
F: FnOnce(&BuilderContext<Node>) -> Fut + Send,
Fut: Future<Output = eyre::Result<Components<Node, Net, Pool, EVM, Cons>>> + Send,
Pool: TransactionPool<Transaction: PoolTransaction<Consensus = TxTy<Node::Types>>>
+ Unpin
+ 'static,
EVM: ConfigureEvm<Primitives = PrimitivesTy<Node::Types>> + 'static,
Cons:
FullConsensus<PrimitivesTy<Node::Types>, Error = ConsensusError> + Clone + Unpin + 'static,
{
type Components = Components<Node, Net, Pool, EVM, Cons>;
fn build_components(
self,
ctx: &BuilderContext<Node>,
) -> impl Future<Output = eyre::Result<Self::Components>> + Send {
self(ctx)
}
}
/// Builds [`NoopTransactionPool`].
#[derive(Debug, Clone)]
pub struct NoopTransactionPoolBuilder<Tx = EthPooledTransaction>(PhantomData<Tx>);
impl<N, Tx> PoolBuilder<N> for NoopTransactionPoolBuilder<Tx>
where
N: FullNodeTypes,
Tx: EthPoolTransaction<Consensus = TxTy<N::Types>> + Unpin,
{
type Pool = NoopTransactionPool<Tx>;
async fn build_pool(self, _ctx: &BuilderContext<N>) -> eyre::Result<Self::Pool> {
Ok(NoopTransactionPool::<Tx>::new())
}
}
impl<Tx> Default for NoopTransactionPoolBuilder<Tx> {
fn default() -> Self {
Self(PhantomData)
}
}
/// Builds [`NoopNetwork`].
#[derive(Debug, Clone)]
pub struct NoopNetworkBuilder<Net = EthNetworkPrimitives>(PhantomData<Net>);
impl NoopNetworkBuilder {
/// Returns the instance with ethereum types.
pub fn eth() -> Self {
Self::default()
}
}
impl<N, Pool, Net> NetworkBuilder<N, Pool> for NoopNetworkBuilder<Net>
where
N: FullNodeTypes,
Pool: TransactionPool,
Net: NetworkPrimitives<
BlockHeader = HeaderTy<N::Types>,
BlockBody = BodyTy<N::Types>,
Block = BlockTy<N::Types>,
Receipt = ReceiptTy<N::Types>,
>,
{
type Network = NoopNetwork<Net>;
async fn build_network(
self,
_ctx: &BuilderContext<N>,
_pool: Pool,
) -> eyre::Result<Self::Network> {
Ok(NoopNetwork::new())
}
}
impl<Net> Default for NoopNetworkBuilder<Net> {
fn default() -> Self {
Self(PhantomData)
}
}
/// Builds [`NoopConsensus`].
#[derive(Debug, Clone, Default)]
pub struct NoopConsensusBuilder;
impl<N> ConsensusBuilder<N> for NoopConsensusBuilder
where
N: FullNodeTypes,
{
type Consensus = NoopConsensus;
async fn build_consensus(self, _ctx: &BuilderContext<N>) -> eyre::Result<Self::Consensus> {
Ok(NoopConsensus::default())
}
}
/// Builds [`PayloadBuilderHandle::noop`].
#[derive(Debug, Clone, Default)]
pub struct NoopPayloadBuilder;
impl<N, Pool, EVM> PayloadServiceBuilder<N, Pool, EVM> for NoopPayloadBuilder
where
N: FullNodeTypes,
Pool: TransactionPool,
EVM: ConfigureEvm<Primitives = PrimitivesTy<N::Types>> + 'static,
{
async fn spawn_payload_builder_service(
self,
_ctx: &BuilderContext<N>,
_pool: Pool,
_evm_config: EVM,
) -> eyre::Result<PayloadBuilderHandle<<N::Types as NodeTypes>::Payload>> {
Ok(PayloadBuilderHandle::<<N::Types as NodeTypes>::Payload>::noop())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/builder/src/components/network.rs | crates/node/builder/src/components/network.rs | //! Network component for the node builder.
use crate::{BuilderContext, FullNodeTypes};
use reth_network::types::NetPrimitivesFor;
use reth_network_api::FullNetwork;
use reth_node_api::PrimitivesTy;
use reth_transaction_pool::TransactionPool;
use std::future::Future;
/// A type that knows how to build the network implementation.
pub trait NetworkBuilder<Node: FullNodeTypes, Pool: TransactionPool>: Send {
/// The network built.
type Network: FullNetwork<Primitives: NetPrimitivesFor<PrimitivesTy<Node::Types>>>;
/// Launches the network implementation and returns the handle to it.
fn build_network(
self,
ctx: &BuilderContext<Node>,
pool: Pool,
) -> impl Future<Output = eyre::Result<Self::Network>> + Send;
}
impl<Node, Net, F, Fut, Pool> NetworkBuilder<Node, Pool> for F
where
Node: FullNodeTypes,
Net: FullNetwork<Primitives: NetPrimitivesFor<PrimitivesTy<Node::Types>>>,
Pool: TransactionPool,
F: Fn(&BuilderContext<Node>, Pool) -> Fut + Send,
Fut: Future<Output = eyre::Result<Net>> + Send,
{
type Network = Net;
fn build_network(
self,
ctx: &BuilderContext<Node>,
pool: Pool,
) -> impl Future<Output = eyre::Result<Net>> + Send {
self(ctx, pool)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/builder/src/components/execute.rs | crates/node/builder/src/components/execute.rs | //! EVM component for the node builder.
use crate::{BuilderContext, ConfigureEvm, FullNodeTypes};
use reth_node_api::PrimitivesTy;
use std::future::Future;
/// A type that knows how to build the executor types.
pub trait ExecutorBuilder<Node: FullNodeTypes>: Send {
/// The EVM config to use.
///
/// This provides the node with the necessary configuration to configure an EVM.
type EVM: ConfigureEvm<Primitives = PrimitivesTy<Node::Types>> + 'static;
/// Creates the EVM config.
fn build_evm(
self,
ctx: &BuilderContext<Node>,
) -> impl Future<Output = eyre::Result<Self::EVM>> + Send;
}
impl<Node, F, Fut, EVM> ExecutorBuilder<Node> for F
where
Node: FullNodeTypes,
EVM: ConfigureEvm<Primitives = PrimitivesTy<Node::Types>> + 'static,
F: FnOnce(&BuilderContext<Node>) -> Fut + Send,
Fut: Future<Output = eyre::Result<EVM>> + Send,
{
type EVM = EVM;
fn build_evm(
self,
ctx: &BuilderContext<Node>,
) -> impl Future<Output = eyre::Result<Self::EVM>> {
self(ctx)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/builder/src/components/mod.rs | crates/node/builder/src/components/mod.rs | //! Support for configuring the components of a node.
//!
//! Customizable components of the node include:
//! - The transaction pool.
//! - The network implementation.
//! - The payload builder service.
//!
//! Components depend on a fully type configured node: [FullNodeTypes](crate::node::FullNodeTypes).
mod builder;
mod consensus;
mod execute;
mod network;
mod payload;
mod pool;
pub use builder::*;
pub use consensus::*;
pub use execute::*;
pub use network::*;
pub use payload::*;
pub use pool::*;
use crate::{ConfigureEvm, FullNodeTypes};
use reth_consensus::{ConsensusError, FullConsensus};
use reth_network::types::NetPrimitivesFor;
use reth_network_api::FullNetwork;
use reth_node_api::{NodeTypes, PrimitivesTy, TxTy};
use reth_payload_builder::PayloadBuilderHandle;
use reth_transaction_pool::{PoolPooledTx, PoolTransaction, TransactionPool};
use std::fmt::Debug;
/// An abstraction over the components of a node, consisting of:
/// - evm and executor
/// - transaction pool
/// - network
/// - payload builder.
pub trait NodeComponents<T: FullNodeTypes>: Clone + Debug + Unpin + Send + Sync + 'static {
/// The transaction pool of the node.
type Pool: TransactionPool<Transaction: PoolTransaction<Consensus = TxTy<T::Types>>> + Unpin;
/// The node's EVM configuration, defining settings for the Ethereum Virtual Machine.
type Evm: ConfigureEvm<Primitives = <T::Types as NodeTypes>::Primitives>;
/// The consensus type of the node.
type Consensus: FullConsensus<<T::Types as NodeTypes>::Primitives, Error = ConsensusError>
+ Clone
+ Unpin
+ 'static;
/// Network API.
type Network: FullNetwork<Primitives: NetPrimitivesFor<<T::Types as NodeTypes>::Primitives>>;
/// Returns the transaction pool of the node.
fn pool(&self) -> &Self::Pool;
/// Returns the node's evm config.
fn evm_config(&self) -> &Self::Evm;
/// Returns the node's consensus type.
fn consensus(&self) -> &Self::Consensus;
/// Returns the handle to the network
fn network(&self) -> &Self::Network;
/// Returns the handle to the payload builder service handling payload building requests from
/// the engine.
fn payload_builder_handle(&self) -> &PayloadBuilderHandle<<T::Types as NodeTypes>::Payload>;
}
/// All the components of the node.
///
/// This provides access to all the components of the node.
#[derive(Debug)]
pub struct Components<Node: FullNodeTypes, Network, Pool, EVM, Consensus> {
/// The transaction pool of the node.
pub transaction_pool: Pool,
/// The node's EVM configuration, defining settings for the Ethereum Virtual Machine.
pub evm_config: EVM,
/// The consensus implementation of the node.
pub consensus: Consensus,
/// The network implementation of the node.
pub network: Network,
/// The handle to the payload builder service.
pub payload_builder_handle: PayloadBuilderHandle<<Node::Types as NodeTypes>::Payload>,
}
impl<Node, Pool, EVM, Cons, Network> NodeComponents<Node>
for Components<Node, Network, Pool, EVM, Cons>
where
Node: FullNodeTypes,
Network: FullNetwork<
Primitives: NetPrimitivesFor<
PrimitivesTy<Node::Types>,
PooledTransaction = PoolPooledTx<Pool>,
>,
>,
Pool: TransactionPool<Transaction: PoolTransaction<Consensus = TxTy<Node::Types>>>
+ Unpin
+ 'static,
EVM: ConfigureEvm<Primitives = PrimitivesTy<Node::Types>> + 'static,
Cons:
FullConsensus<PrimitivesTy<Node::Types>, Error = ConsensusError> + Clone + Unpin + 'static,
{
type Pool = Pool;
type Evm = EVM;
type Consensus = Cons;
type Network = Network;
fn pool(&self) -> &Self::Pool {
&self.transaction_pool
}
fn evm_config(&self) -> &Self::Evm {
&self.evm_config
}
fn consensus(&self) -> &Self::Consensus {
&self.consensus
}
fn network(&self) -> &Self::Network {
&self.network
}
fn payload_builder_handle(&self) -> &PayloadBuilderHandle<<Node::Types as NodeTypes>::Payload> {
&self.payload_builder_handle
}
}
impl<Node, N, Pool, EVM, Cons> Clone for Components<Node, N, Pool, EVM, Cons>
where
N: Clone,
Node: FullNodeTypes,
Pool: TransactionPool,
EVM: ConfigureEvm,
Cons: Clone,
{
fn clone(&self) -> Self {
Self {
transaction_pool: self.transaction_pool.clone(),
evm_config: self.evm_config.clone(),
consensus: self.consensus.clone(),
network: self.network.clone(),
payload_builder_handle: self.payload_builder_handle.clone(),
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/builder/src/components/payload.rs | crates/node/builder/src/components/payload.rs | //! Payload service component for the node builder.
use crate::{BuilderContext, FullNodeTypes};
use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig};
use reth_chain_state::CanonStateSubscriptions;
use reth_node_api::{NodeTypes, PayloadBuilderFor};
use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService, PayloadServiceCommand};
use reth_transaction_pool::TransactionPool;
use std::future::Future;
use tokio::sync::{broadcast, mpsc};
use tracing::warn;
/// A type that knows how to spawn the payload service.
pub trait PayloadServiceBuilder<Node: FullNodeTypes, Pool: TransactionPool, EvmConfig>:
Send + Sized
{
/// Spawns the [`PayloadBuilderService`] and returns the handle to it for use by the engine.
///
/// We provide default implementation via [`BasicPayloadJobGenerator`] but it can be overridden
/// for custom job orchestration logic,
fn spawn_payload_builder_service(
self,
ctx: &BuilderContext<Node>,
pool: Pool,
evm_config: EvmConfig,
) -> impl Future<Output = eyre::Result<PayloadBuilderHandle<<Node::Types as NodeTypes>::Payload>>>
+ Send;
}
impl<Node, F, Fut, Pool, EvmConfig> PayloadServiceBuilder<Node, Pool, EvmConfig> for F
where
Node: FullNodeTypes,
Pool: TransactionPool,
F: Fn(&BuilderContext<Node>, Pool, EvmConfig) -> Fut + Send,
Fut: Future<Output = eyre::Result<PayloadBuilderHandle<<Node::Types as NodeTypes>::Payload>>>
+ Send,
{
fn spawn_payload_builder_service(
self,
ctx: &BuilderContext<Node>,
pool: Pool,
evm_config: EvmConfig,
) -> impl Future<Output = eyre::Result<PayloadBuilderHandle<<Node::Types as NodeTypes>::Payload>>>
{
self(ctx, pool, evm_config)
}
}
/// A type that knows how to build a payload builder to plug into [`BasicPayloadServiceBuilder`].
pub trait PayloadBuilderBuilder<Node: FullNodeTypes, Pool: TransactionPool, EvmConfig>:
Send + Sized
{
/// Payload builder implementation.
type PayloadBuilder: PayloadBuilderFor<Node::Types> + Unpin + 'static;
/// Spawns the payload service and returns the handle to it.
///
/// The [`BuilderContext`] is provided to allow access to the node's configuration.
fn build_payload_builder(
self,
ctx: &BuilderContext<Node>,
pool: Pool,
evm_config: EvmConfig,
) -> impl Future<Output = eyre::Result<Self::PayloadBuilder>> + Send;
}
/// Basic payload service builder that spawns a [`BasicPayloadJobGenerator`]
#[derive(Debug, Default, Clone)]
pub struct BasicPayloadServiceBuilder<PB>(PB);
impl<PB> BasicPayloadServiceBuilder<PB> {
/// Create a new [`BasicPayloadServiceBuilder`].
pub const fn new(payload_builder_builder: PB) -> Self {
Self(payload_builder_builder)
}
}
impl<Node, Pool, PB, EvmConfig> PayloadServiceBuilder<Node, Pool, EvmConfig>
for BasicPayloadServiceBuilder<PB>
where
Node: FullNodeTypes,
Pool: TransactionPool,
EvmConfig: Send,
PB: PayloadBuilderBuilder<Node, Pool, EvmConfig>,
{
async fn spawn_payload_builder_service(
self,
ctx: &BuilderContext<Node>,
pool: Pool,
evm_config: EvmConfig,
) -> eyre::Result<PayloadBuilderHandle<<Node::Types as NodeTypes>::Payload>> {
let payload_builder = self.0.build_payload_builder(ctx, pool, evm_config).await?;
let conf = ctx.config().builder.clone();
let payload_job_config = BasicPayloadJobGeneratorConfig::default()
.interval(conf.interval)
.deadline(conf.deadline)
.max_payload_tasks(conf.max_payload_tasks);
let payload_generator = BasicPayloadJobGenerator::with_builder(
ctx.provider().clone(),
ctx.task_executor().clone(),
payload_job_config,
payload_builder,
);
let (payload_service, payload_service_handle) =
PayloadBuilderService::new(payload_generator, ctx.provider().canonical_state_stream());
ctx.task_executor().spawn_critical("payload builder service", Box::pin(payload_service));
Ok(payload_service_handle)
}
}
/// A `NoopPayloadServiceBuilder` useful for node implementations that are not implementing
/// validating/sequencing logic.
#[derive(Debug, Clone, Copy, Default)]
#[non_exhaustive]
pub struct NoopPayloadServiceBuilder;
impl<Node, Pool, Evm> PayloadServiceBuilder<Node, Pool, Evm> for NoopPayloadServiceBuilder
where
Node: FullNodeTypes,
Pool: TransactionPool,
Evm: Send,
{
async fn spawn_payload_builder_service(
self,
ctx: &BuilderContext<Node>,
_pool: Pool,
_evm_config: Evm,
) -> eyre::Result<PayloadBuilderHandle<<Node::Types as NodeTypes>::Payload>> {
let (tx, mut rx) = mpsc::unbounded_channel();
ctx.task_executor().spawn_critical("payload builder", async move {
#[allow(clippy::collection_is_never_read)]
let mut subscriptions = Vec::new();
while let Some(message) = rx.recv().await {
match message {
PayloadServiceCommand::Subscribe(tx) => {
let (events_tx, events_rx) = broadcast::channel(100);
// Retain senders to make sure that channels are not getting closed
subscriptions.push(events_tx);
let _ = tx.send(events_rx);
}
message => warn!(?message, "Noop payload service received a message"),
}
}
});
Ok(PayloadBuilderHandle::new(tx))
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/builder/src/components/consensus.rs | crates/node/builder/src/components/consensus.rs | //! Consensus component for the node builder.
use reth_consensus::{ConsensusError, FullConsensus};
use reth_node_api::PrimitivesTy;
use crate::{BuilderContext, FullNodeTypes};
use std::future::Future;
/// A type that knows how to build the consensus implementation.
pub trait ConsensusBuilder<Node: FullNodeTypes>: Send {
/// The consensus implementation to build.
type Consensus: FullConsensus<PrimitivesTy<Node::Types>, Error = ConsensusError>
+ Clone
+ Unpin
+ 'static;
/// Creates the consensus implementation.
fn build_consensus(
self,
ctx: &BuilderContext<Node>,
) -> impl Future<Output = eyre::Result<Self::Consensus>> + Send;
}
impl<Node, F, Fut, Consensus> ConsensusBuilder<Node> for F
where
Node: FullNodeTypes,
Consensus:
FullConsensus<PrimitivesTy<Node::Types>, Error = ConsensusError> + Clone + Unpin + 'static,
F: FnOnce(&BuilderContext<Node>) -> Fut + Send,
Fut: Future<Output = eyre::Result<Consensus>> + Send,
{
type Consensus = Consensus;
fn build_consensus(
self,
ctx: &BuilderContext<Node>,
) -> impl Future<Output = eyre::Result<Self::Consensus>> {
self(ctx)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/builder/src/components/pool.rs | crates/node/builder/src/components/pool.rs | //! Pool component for the node builder.
use alloy_primitives::Address;
use reth_chain_state::CanonStateSubscriptions;
use reth_node_api::TxTy;
use reth_transaction_pool::{
blobstore::DiskFileBlobStore, CoinbaseTipOrdering, PoolConfig, PoolTransaction, SubPoolLimit,
TransactionPool, TransactionValidationTaskExecutor, TransactionValidator,
};
use std::{collections::HashSet, future::Future};
use crate::{BuilderContext, FullNodeTypes};
/// A type that knows how to build the transaction pool.
pub trait PoolBuilder<Node: FullNodeTypes>: Send {
/// The transaction pool to build.
type Pool: TransactionPool<Transaction: PoolTransaction<Consensus = TxTy<Node::Types>>>
+ Unpin
+ 'static;
/// Creates the transaction pool.
fn build_pool(
self,
ctx: &BuilderContext<Node>,
) -> impl Future<Output = eyre::Result<Self::Pool>> + Send;
}
impl<Node, F, Fut, Pool> PoolBuilder<Node> for F
where
Node: FullNodeTypes,
Pool: TransactionPool<Transaction: PoolTransaction<Consensus = TxTy<Node::Types>>>
+ Unpin
+ 'static,
F: FnOnce(&BuilderContext<Node>) -> Fut + Send,
Fut: Future<Output = eyre::Result<Pool>> + Send,
{
type Pool = Pool;
fn build_pool(
self,
ctx: &BuilderContext<Node>,
) -> impl Future<Output = eyre::Result<Self::Pool>> {
self(ctx)
}
}
/// Convenience type to override cli or default pool configuration during build.
#[derive(Debug, Clone, Default)]
pub struct PoolBuilderConfigOverrides {
/// Max number of transaction in the pending sub-pool
pub pending_limit: Option<SubPoolLimit>,
/// Max number of transaction in the basefee sub-pool
pub basefee_limit: Option<SubPoolLimit>,
/// Max number of transaction in the queued sub-pool
pub queued_limit: Option<SubPoolLimit>,
/// Max number of transactions in the blob sub-pool
pub blob_limit: Option<SubPoolLimit>,
/// Max number of executable transaction slots guaranteed per account
pub max_account_slots: Option<usize>,
/// Minimum base fee required by the protocol.
pub minimal_protocol_basefee: Option<u64>,
/// Addresses that will be considered as local. Above exemptions apply.
pub local_addresses: HashSet<Address>,
/// Additional tasks to validate new transactions.
pub additional_validation_tasks: Option<usize>,
}
impl PoolBuilderConfigOverrides {
/// Applies the configured overrides to the given [`PoolConfig`].
pub fn apply(self, mut config: PoolConfig) -> PoolConfig {
let Self {
pending_limit,
basefee_limit,
queued_limit,
blob_limit,
max_account_slots,
minimal_protocol_basefee,
local_addresses,
additional_validation_tasks: _,
} = self;
if let Some(pending_limit) = pending_limit {
config.pending_limit = pending_limit;
}
if let Some(basefee_limit) = basefee_limit {
config.basefee_limit = basefee_limit;
}
if let Some(queued_limit) = queued_limit {
config.queued_limit = queued_limit;
}
if let Some(blob_limit) = blob_limit {
config.blob_limit = blob_limit;
}
if let Some(max_account_slots) = max_account_slots {
config.max_account_slots = max_account_slots;
}
if let Some(minimal_protocol_basefee) = minimal_protocol_basefee {
config.minimal_protocol_basefee = minimal_protocol_basefee;
}
config.local_transactions_config.local_addresses.extend(local_addresses);
config
}
}
/// A builder for creating transaction pools with common configuration options.
///
/// This builder provides a fluent API for setting up transaction pools with various
/// configurations like blob stores, validators, and maintenance tasks.
pub struct TxPoolBuilder<'a, Node: FullNodeTypes, V = ()> {
ctx: &'a BuilderContext<Node>,
validator: V,
}
impl<'a, Node: FullNodeTypes> TxPoolBuilder<'a, Node> {
/// Creates a new `TxPoolBuilder` with the given context.
pub const fn new(ctx: &'a BuilderContext<Node>) -> Self {
Self { ctx, validator: () }
}
}
impl<'a, Node: FullNodeTypes, V> TxPoolBuilder<'a, Node, V> {
/// Configure the validator for the transaction pool.
pub fn with_validator<NewV>(self, validator: NewV) -> TxPoolBuilder<'a, Node, NewV> {
TxPoolBuilder { ctx: self.ctx, validator }
}
}
impl<'a, Node: FullNodeTypes, V> TxPoolBuilder<'a, Node, TransactionValidationTaskExecutor<V>>
where
V: TransactionValidator + 'static,
V::Transaction:
PoolTransaction<Consensus = TxTy<Node::Types>> + reth_transaction_pool::EthPoolTransaction,
{
/// Build the transaction pool and spawn its maintenance tasks.
/// This method creates the blob store, builds the pool, and spawns maintenance tasks.
pub fn build_and_spawn_maintenance_task(
self,
blob_store: DiskFileBlobStore,
pool_config: PoolConfig,
) -> eyre::Result<
reth_transaction_pool::Pool<
TransactionValidationTaskExecutor<V>,
CoinbaseTipOrdering<V::Transaction>,
DiskFileBlobStore,
>,
> {
// Destructure self to avoid partial move issues
let TxPoolBuilder { ctx, validator, .. } = self;
let transaction_pool = reth_transaction_pool::Pool::new(
validator,
CoinbaseTipOrdering::default(),
blob_store,
pool_config.clone(),
);
// Spawn maintenance tasks using standalone functions
spawn_maintenance_tasks(ctx, transaction_pool.clone(), &pool_config)?;
Ok(transaction_pool)
}
}
/// Create blob store with default configuration.
pub fn create_blob_store<Node: FullNodeTypes>(
ctx: &BuilderContext<Node>,
) -> eyre::Result<DiskFileBlobStore> {
let data_dir = ctx.config().datadir();
Ok(reth_transaction_pool::blobstore::DiskFileBlobStore::open(
data_dir.blobstore(),
Default::default(),
)?)
}
/// Create blob store with custom cache size configuration.
pub fn create_blob_store_with_cache<Node: FullNodeTypes>(
ctx: &BuilderContext<Node>,
cache_size: Option<u32>,
) -> eyre::Result<DiskFileBlobStore> {
let data_dir = ctx.config().datadir();
let config = if let Some(cache_size) = cache_size {
reth_transaction_pool::blobstore::DiskFileBlobStoreConfig::default()
.with_max_cached_entries(cache_size)
} else {
Default::default()
};
Ok(reth_transaction_pool::blobstore::DiskFileBlobStore::open(data_dir.blobstore(), config)?)
}
/// Spawn local transaction backup task if enabled.
fn spawn_local_backup_task<Node, Pool>(ctx: &BuilderContext<Node>, pool: Pool) -> eyre::Result<()>
where
Node: FullNodeTypes,
Pool: TransactionPool + Clone + 'static,
{
if !ctx.config().txpool.disable_transactions_backup {
let data_dir = ctx.config().datadir();
let transactions_path = ctx
.config()
.txpool
.transactions_backup_path
.clone()
.unwrap_or_else(|| data_dir.txpool_transactions());
let transactions_backup_config =
reth_transaction_pool::maintain::LocalTransactionBackupConfig::with_local_txs_backup(
transactions_path,
);
ctx.task_executor().spawn_critical_with_graceful_shutdown_signal(
"local transactions backup task",
|shutdown| {
reth_transaction_pool::maintain::backup_local_transactions_task(
shutdown,
pool,
transactions_backup_config,
)
},
);
}
Ok(())
}
/// Spawn the main maintenance task for transaction pool.
fn spawn_pool_maintenance_task<Node, Pool>(
ctx: &BuilderContext<Node>,
pool: Pool,
pool_config: &PoolConfig,
) -> eyre::Result<()>
where
Node: FullNodeTypes,
Pool: reth_transaction_pool::TransactionPoolExt + Clone + 'static,
Pool::Transaction: PoolTransaction<Consensus = TxTy<Node::Types>>,
{
let chain_events = ctx.provider().canonical_state_stream();
let client = ctx.provider().clone();
ctx.task_executor().spawn_critical(
"txpool maintenance task",
reth_transaction_pool::maintain::maintain_transaction_pool_future(
client,
pool,
chain_events,
ctx.task_executor().clone(),
reth_transaction_pool::maintain::MaintainPoolConfig {
max_tx_lifetime: pool_config.max_queued_lifetime,
no_local_exemptions: pool_config.local_transactions_config.no_exemptions,
..Default::default()
},
),
);
Ok(())
}
/// Spawn all maintenance tasks for a transaction pool (backup + main maintenance).
fn spawn_maintenance_tasks<Node, Pool>(
ctx: &BuilderContext<Node>,
pool: Pool,
pool_config: &PoolConfig,
) -> eyre::Result<()>
where
Node: FullNodeTypes,
Pool: reth_transaction_pool::TransactionPoolExt + Clone + 'static,
Pool::Transaction: PoolTransaction<Consensus = TxTy<Node::Types>>,
{
spawn_local_backup_task(ctx, pool.clone())?;
spawn_pool_maintenance_task(ctx, pool, pool_config)?;
Ok(())
}
impl<Node: FullNodeTypes, V: std::fmt::Debug> std::fmt::Debug for TxPoolBuilder<'_, Node, V> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("TxPoolBuilder").field("validator", &self.validator).finish()
}
}
#[cfg(test)]
mod tests {
use super::*;
use reth_transaction_pool::PoolConfig;
#[test]
fn test_pool_builder_config_overrides_apply() {
let base_config = PoolConfig::default();
let overrides = PoolBuilderConfigOverrides {
pending_limit: Some(SubPoolLimit::default()),
max_account_slots: Some(100),
minimal_protocol_basefee: Some(1000),
..Default::default()
};
let updated_config = overrides.apply(base_config);
assert_eq!(updated_config.max_account_slots, 100);
assert_eq!(updated_config.minimal_protocol_basefee, 1000);
}
#[test]
fn test_pool_builder_config_overrides_default() {
let overrides = PoolBuilderConfigOverrides::default();
assert!(overrides.pending_limit.is_none());
assert!(overrides.max_account_slots.is_none());
assert!(overrides.local_addresses.is_empty());
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/builder/src/launch/engine.rs | crates/node/builder/src/launch/engine.rs | //! Engine node related functionality.
use crate::{
common::{Attached, LaunchContextWith, WithConfigs},
hooks::NodeHooks,
rpc::{EngineValidatorAddOn, EngineValidatorBuilder, RethRpcAddOns, RpcHandle},
setup::build_networked_pipeline,
AddOns, AddOnsContext, FullNode, LaunchContext, LaunchNode, NodeAdapter,
NodeBuilderWithComponents, NodeComponents, NodeComponentsBuilder, NodeHandle, NodeTypesAdapter,
};
use alloy_consensus::BlockHeader;
use futures::{stream_select, StreamExt};
use reth_chainspec::{EthChainSpec, EthereumHardforks};
use reth_db_api::{database_metrics::DatabaseMetrics, Database};
use reth_engine_service::service::{ChainEvent, EngineService};
use reth_engine_tree::{
engine::{EngineApiRequest, EngineRequestHandler},
tree::TreeConfig,
};
use reth_engine_util::EngineMessageStreamExt;
use reth_exex::ExExManagerHandle;
use reth_network::{types::BlockRangeUpdate, NetworkSyncUpdater, SyncState};
use reth_network_api::BlockDownloaderProvider;
use reth_node_api::{
BuiltPayload, ConsensusEngineHandle, FullNodeTypes, NodeTypes, NodeTypesWithDBAdapter,
};
use reth_node_core::{
dirs::{ChainPath, DataDirPath},
exit::NodeExitFuture,
primitives::Head,
};
use reth_node_events::node;
use reth_provider::{
providers::{BlockchainProvider, NodeTypesForProvider},
BlockNumReader,
};
use reth_tasks::TaskExecutor;
use reth_tokio_util::EventSender;
use reth_tracing::tracing::{debug, error, info};
use std::sync::Arc;
use tokio::sync::{mpsc::unbounded_channel, oneshot};
use tokio_stream::wrappers::UnboundedReceiverStream;
/// The engine node launcher.
#[derive(Debug)]
pub struct EngineNodeLauncher {
/// The task executor for the node.
pub ctx: LaunchContext,
/// Temporary configuration for engine tree.
/// After engine is stabilized, this should be configured through node builder.
pub engine_tree_config: TreeConfig,
}
impl EngineNodeLauncher {
/// Create a new instance of the ethereum node launcher.
pub const fn new(
task_executor: TaskExecutor,
data_dir: ChainPath<DataDirPath>,
engine_tree_config: TreeConfig,
) -> Self {
Self { ctx: LaunchContext::new(task_executor, data_dir), engine_tree_config }
}
}
impl<Types, DB, T, CB, AO> LaunchNode<NodeBuilderWithComponents<T, CB, AO>> for EngineNodeLauncher
where
Types: NodeTypesForProvider + NodeTypes,
DB: Database + DatabaseMetrics + Clone + Unpin + 'static,
T: FullNodeTypes<
Types = Types,
DB = DB,
Provider = BlockchainProvider<NodeTypesWithDBAdapter<Types, DB>>,
>,
CB: NodeComponentsBuilder<T>,
AO: RethRpcAddOns<NodeAdapter<T, CB::Components>>
+ EngineValidatorAddOn<NodeAdapter<T, CB::Components>>,
{
type Node = NodeHandle<NodeAdapter<T, CB::Components>, AO>;
async fn launch_node(
self,
target: NodeBuilderWithComponents<T, CB, AO>,
) -> eyre::Result<Self::Node> {
let Self { ctx, engine_tree_config } = self;
let NodeBuilderWithComponents {
adapter: NodeTypesAdapter { database },
components_builder,
add_ons: AddOns { hooks, exexs: installed_exex, add_ons },
config,
} = target;
let NodeHooks { on_component_initialized, on_node_started, .. } = hooks;
// setup the launch context
let ctx = ctx
.with_configured_globals(engine_tree_config.reserved_cpu_cores())
// load the toml config
.with_loaded_toml_config(config)?
// add resolved peers
.with_resolved_peers()?
// attach the database
.attach(database.clone())
// ensure certain settings take effect
.with_adjusted_configs()
// Create the provider factory
.with_provider_factory::<_, <CB::Components as NodeComponents<T>>::Evm>().await?
.inspect(|_| {
info!(target: "reth::cli", "Database opened");
})
.with_prometheus_server().await?
.inspect(|this| {
debug!(target: "reth::cli", chain=%this.chain_id(), genesis=?this.genesis_hash(), "Initializing genesis");
})
.with_genesis()?
.inspect(|this: &LaunchContextWith<Attached<WithConfigs<Types::ChainSpec>, _>>| {
info!(target: "reth::cli", "\n{}", this.chain_spec().display_hardforks());
})
.with_metrics_task()
// passing FullNodeTypes as type parameter here so that we can build
// later the components.
.with_blockchain_db::<T, _>(move |provider_factory| {
Ok(BlockchainProvider::new(provider_factory)?)
})?
.with_components(components_builder, on_component_initialized).await?;
// Try to expire pre-merge transaction history if configured
ctx.expire_pre_merge_transactions()?;
// spawn exexs if any
let maybe_exex_manager_handle = ctx.launch_exex(installed_exex).await?;
// create pipeline
let network_handle = ctx.components().network().clone();
let network_client = network_handle.fetch_client().await?;
let (consensus_engine_tx, consensus_engine_rx) = unbounded_channel();
let node_config = ctx.node_config();
// We always assume that node is syncing after a restart
network_handle.update_sync_state(SyncState::Syncing);
let max_block = ctx.max_block(network_client.clone()).await?;
let static_file_producer = ctx.static_file_producer();
let static_file_producer_events = static_file_producer.lock().events();
info!(target: "reth::cli", "StaticFileProducer initialized");
let consensus = Arc::new(ctx.components().consensus().clone());
let pipeline = build_networked_pipeline(
&ctx.toml_config().stages,
network_client.clone(),
consensus.clone(),
ctx.provider_factory().clone(),
ctx.task_executor(),
ctx.sync_metrics_tx(),
ctx.prune_config(),
max_block,
static_file_producer,
ctx.components().evm_config().clone(),
maybe_exex_manager_handle.clone().unwrap_or_else(ExExManagerHandle::empty),
ctx.era_import_source(),
)?;
// The new engine writes directly to static files. This ensures that they're up to the tip.
pipeline.move_to_static_files()?;
let pipeline_events = pipeline.events();
let mut pruner_builder = ctx.pruner_builder();
if let Some(exex_manager_handle) = &maybe_exex_manager_handle {
pruner_builder =
pruner_builder.finished_exex_height(exex_manager_handle.finished_height());
}
let pruner = pruner_builder.build_with_provider_factory(ctx.provider_factory().clone());
let pruner_events = pruner.events();
info!(target: "reth::cli", prune_config=?ctx.prune_config().unwrap_or_default(), "Pruner initialized");
let event_sender = EventSender::default();
let beacon_engine_handle = ConsensusEngineHandle::new(consensus_engine_tx.clone());
// extract the jwt secret from the args if possible
let jwt_secret = ctx.auth_jwt_secret()?;
let add_ons_ctx = AddOnsContext {
node: ctx.node_adapter().clone(),
config: ctx.node_config(),
beacon_engine_handle: beacon_engine_handle.clone(),
jwt_secret,
engine_events: event_sender.clone(),
};
let validator_builder = add_ons.engine_validator_builder();
// Build the engine validator with all required components
let engine_validator = validator_builder
.clone()
.build_tree_validator(&add_ons_ctx, engine_tree_config.clone())
.await?;
// Create the consensus engine stream with optional reorg
let consensus_engine_stream = UnboundedReceiverStream::from(consensus_engine_rx)
.maybe_skip_fcu(node_config.debug.skip_fcu)
.maybe_skip_new_payload(node_config.debug.skip_new_payload)
.maybe_reorg(
ctx.blockchain_db().clone(),
ctx.components().evm_config().clone(),
|| validator_builder.build_tree_validator(&add_ons_ctx, engine_tree_config.clone()),
node_config.debug.reorg_frequency,
node_config.debug.reorg_depth,
)
.await?
// Store messages _after_ skipping so that `replay-engine` command
// would replay only the messages that were observed by the engine
// during this run.
.maybe_store_messages(node_config.debug.engine_api_store.clone());
let mut engine_service = EngineService::new(
consensus.clone(),
ctx.chain_spec(),
network_client.clone(),
Box::pin(consensus_engine_stream),
pipeline,
Box::new(ctx.task_executor().clone()),
ctx.provider_factory().clone(),
ctx.blockchain_db().clone(),
pruner,
ctx.components().payload_builder_handle().clone(),
engine_validator,
engine_tree_config,
ctx.sync_metrics_tx(),
ctx.components().evm_config().clone(),
ctx.data_dir().clone(),
);
info!(target: "reth::cli", "Consensus engine initialized");
let events = stream_select!(
event_sender.new_listener().map(Into::into),
pipeline_events.map(Into::into),
ctx.consensus_layer_events(),
pruner_events.map(Into::into),
static_file_producer_events.map(Into::into),
);
ctx.task_executor().spawn_critical(
"events task",
Box::pin(node::handle_events(
Some(Box::new(ctx.components().network().clone())),
Some(ctx.head().number),
events,
)),
);
let RpcHandle { rpc_server_handles, rpc_registry, engine_events, beacon_engine_handle } =
add_ons.launch_add_ons(add_ons_ctx).await?;
// Run consensus engine to completion
let initial_target = ctx.initial_backfill_target()?;
let mut built_payloads = ctx
.components()
.payload_builder_handle()
.subscribe()
.await
.map_err(|e| eyre::eyre!("Failed to subscribe to payload builder events: {:?}", e))?
.into_built_payload_stream()
.fuse();
let chainspec = ctx.chain_spec();
let provider = ctx.blockchain_db().clone();
let (exit, rx) = oneshot::channel();
let terminate_after_backfill = ctx.terminate_after_initial_backfill();
info!(target: "reth::cli", "Starting consensus engine");
ctx.task_executor().spawn_critical("consensus engine", Box::pin(async move {
if let Some(initial_target) = initial_target {
debug!(target: "reth::cli", %initial_target, "start backfill sync");
engine_service.orchestrator_mut().start_backfill_sync(initial_target);
}
let mut res = Ok(());
// advance the chain and await payloads built locally to add into the engine api tree handler to prevent re-execution if that block is received as payload from the CL
loop {
tokio::select! {
payload = built_payloads.select_next_some() => {
if let Some(executed_block) = payload.executed_block() {
debug!(target: "reth::cli", block=?executed_block.recovered_block().num_hash(), "inserting built payload");
engine_service.orchestrator_mut().handler_mut().handler_mut().on_event(EngineApiRequest::InsertExecutedBlock(executed_block).into());
}
}
event = engine_service.next() => {
let Some(event) = event else { break };
// debug!(target: "reth::cli", "ChainEvent: {event}");
match event {
ChainEvent::BackfillSyncFinished => {
if terminate_after_backfill {
debug!(target: "reth::cli", "Terminating after initial backfill");
break
}
}
ChainEvent::BackfillSyncStarted => {
network_handle.update_sync_state(SyncState::Syncing);
}
ChainEvent::FatalError => {
error!(target: "reth::cli", "Fatal error in consensus engine");
res = Err(eyre::eyre!("Fatal error in consensus engine"));
break
}
ChainEvent::Handler(ev) => {
if let Some(head) = ev.canonical_header() {
// Once we're progressing via live sync, we can consider the node is not syncing anymore
network_handle.update_sync_state(SyncState::Idle);
let head_block = Head {
number: head.number(),
hash: head.hash(),
difficulty: head.difficulty(),
timestamp: head.timestamp(),
total_difficulty: chainspec.final_paris_total_difficulty().filter(|_| chainspec.is_paris_active_at_block(head.number())).unwrap_or_default(),
};
network_handle.update_status(head_block);
let updated = BlockRangeUpdate {
earliest: provider.earliest_block_number().unwrap_or_default(),
latest:head.number(),
latest_hash:head.hash()
};
network_handle.update_block_range(updated);
}
event_sender.notify(ev);
}
}
}
}
}
let _ = exit.send(res);
}));
let full_node = FullNode {
evm_config: ctx.components().evm_config().clone(),
pool: ctx.components().pool().clone(),
network: ctx.components().network().clone(),
provider: ctx.node_adapter().provider.clone(),
payload_builder_handle: ctx.components().payload_builder_handle().clone(),
task_executor: ctx.task_executor().clone(),
config: ctx.node_config().clone(),
data_dir: ctx.data_dir().clone(),
add_ons_handle: RpcHandle {
rpc_server_handles,
rpc_registry,
engine_events,
beacon_engine_handle,
},
};
// Notify on node started
on_node_started.on_event(FullNode::clone(&full_node))?;
ctx.spawn_ethstats().await?;
let handle = NodeHandle {
node_exit_future: NodeExitFuture::new(
async { rx.await? },
full_node.config.debug.terminate,
),
node: full_node,
};
Ok(handle)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/builder/src/launch/invalid_block_hook.rs | crates/node/builder/src/launch/invalid_block_hook.rs | //! Invalid block hook helpers for the node builder.
use crate::AddOnsContext;
use alloy_rpc_types::{Block, Header, Receipt, Transaction, TransactionRequest};
use eyre::OptionExt;
use reth_chainspec::EthChainSpec;
use reth_engine_primitives::InvalidBlockHook;
use reth_node_api::{FullNodeComponents, NodeTypes};
use reth_node_core::{
args::InvalidBlockHookType,
dirs::{ChainPath, DataDirPath},
node_config::NodeConfig,
};
use reth_primitives_traits::NodePrimitives;
use reth_provider::ChainSpecProvider;
use reth_rpc_api::EthApiClient;
/// Extension trait for [`AddOnsContext`] to create invalid block hooks.
pub trait InvalidBlockHookExt {
/// Node primitives type.
type Primitives: NodePrimitives;
/// Creates an invalid block hook based on the node configuration.
fn create_invalid_block_hook(
&self,
data_dir: &ChainPath<DataDirPath>,
) -> impl std::future::Future<Output = eyre::Result<Box<dyn InvalidBlockHook<Self::Primitives>>>>
+ Send;
}
impl<N> InvalidBlockHookExt for AddOnsContext<'_, N>
where
N: FullNodeComponents,
{
type Primitives = <N::Types as NodeTypes>::Primitives;
async fn create_invalid_block_hook(
&self,
data_dir: &ChainPath<DataDirPath>,
) -> eyre::Result<Box<dyn InvalidBlockHook<Self::Primitives>>> {
create_invalid_block_hook(
self.config,
data_dir,
self.node.provider().clone(),
self.node.evm_config().clone(),
self.node.provider().chain_spec().chain().id(),
)
.await
}
}
/// Creates an invalid block hook based on the node configuration.
///
/// This function constructs the appropriate [`InvalidBlockHook`] based on the debug
/// configuration in the node config. It supports:
/// - Witness hooks for capturing block witness data
/// - Healthy node verification via RPC
///
/// # Arguments
/// * `config` - The node configuration containing debug settings
/// * `data_dir` - The data directory for storing hook outputs
/// * `provider` - The blockchain database provider
/// * `evm_config` - The EVM configuration
/// * `chain_id` - The chain ID for verification
pub async fn create_invalid_block_hook<N, P, E>(
config: &NodeConfig<P::ChainSpec>,
data_dir: &ChainPath<DataDirPath>,
provider: P,
evm_config: E,
chain_id: u64,
) -> eyre::Result<Box<dyn InvalidBlockHook<N>>>
where
N: NodePrimitives,
P: reth_provider::StateProviderFactory
+ reth_provider::ChainSpecProvider
+ Clone
+ Send
+ Sync
+ 'static,
E: reth_evm::ConfigureEvm<Primitives = N> + Clone + 'static,
{
use reth_engine_primitives::{InvalidBlockHooks, NoopInvalidBlockHook};
use reth_invalid_block_hooks::InvalidBlockWitnessHook;
let Some(ref hook) = config.debug.invalid_block_hook else {
return Ok(Box::new(NoopInvalidBlockHook::default()))
};
let healthy_node_rpc_client = get_healthy_node_client(config, chain_id).await?;
let output_directory = data_dir.invalid_block_hooks();
let hooks = hook
.iter()
.copied()
.map(|hook| {
let output_directory = output_directory.join(hook.to_string());
std::fs::create_dir_all(&output_directory)?;
Ok(match hook {
InvalidBlockHookType::Witness => Box::new(InvalidBlockWitnessHook::new(
provider.clone(),
evm_config.clone(),
output_directory,
healthy_node_rpc_client.clone(),
)),
InvalidBlockHookType::PreState | InvalidBlockHookType::Opcode => {
eyre::bail!("invalid block hook {hook:?} is not implemented yet")
}
} as Box<dyn InvalidBlockHook<_>>)
})
.collect::<Result<_, _>>()?;
Ok(Box::new(InvalidBlockHooks(hooks)))
}
/// Returns an RPC client for the healthy node, if configured in the node config.
async fn get_healthy_node_client<C>(
config: &NodeConfig<C>,
chain_id: u64,
) -> eyre::Result<Option<jsonrpsee::http_client::HttpClient>>
where
C: EthChainSpec,
{
let Some(url) = config.debug.healthy_node_rpc_url.as_ref() else {
return Ok(None);
};
let client = jsonrpsee::http_client::HttpClientBuilder::default().build(url)?;
// Verify that the healthy node is running the same chain as the current node.
let healthy_chain_id =
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::chain_id(&client)
.await?
.ok_or_eyre("healthy node rpc client didn't return a chain id")?;
if healthy_chain_id.to::<u64>() != chain_id {
eyre::bail!("Invalid chain ID. Expected {}, got {}", chain_id, healthy_chain_id);
}
Ok(Some(client))
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/builder/src/launch/debug.rs | crates/node/builder/src/launch/debug.rs | use super::LaunchNode;
use crate::{rpc::RethRpcAddOns, EngineNodeLauncher, Node, NodeHandle};
use alloy_provider::network::AnyNetwork;
use jsonrpsee::core::{DeserializeOwned, Serialize};
use reth_chainspec::EthChainSpec;
use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider, RpcBlockProvider};
use reth_engine_local::LocalMiner;
use reth_node_api::{BlockTy, FullNodeComponents, PayloadAttributesBuilder, PayloadTypes};
use std::sync::Arc;
use tracing::info;
/// [`Node`] extension with support for debugging utilities.
///
/// This trait provides additional necessary conversion from RPC block type to the node's
/// primitive block type, e.g. `alloy_rpc_types_eth::Block` to the node's internal block
/// representation.
///
/// This is used in conjunction with the [`DebugNodeLauncher`] to enable debugging features such as:
///
/// - **Etherscan Integration**: Use Etherscan as a consensus client to follow the chain and submit
/// blocks to the local engine.
/// - **RPC Consensus Client**: Connect to an external RPC endpoint to fetch blocks and submit them
/// to the local engine to follow the chain.
///
/// See [`DebugNodeLauncher`] for the launcher that enables these features.
///
/// # Implementation
///
/// To implement this trait, you need to:
/// 1. Define the RPC block type (typically `alloy_rpc_types_eth::Block`)
/// 2. Implement the conversion from RPC format to your primitive block type
///
/// # Example
///
/// ```ignore
/// impl<N: FullNodeComponents<Types = Self>> DebugNode<N> for MyNode {
/// type RpcBlock = alloy_rpc_types_eth::Block;
///
/// fn rpc_to_primitive_block(rpc_block: Self::RpcBlock) -> BlockTy<Self> {
/// // Convert from RPC format to primitive format by converting the transactions
/// rpc_block.into_consensus().convert_transactions()
/// }
/// }
/// ```
pub trait DebugNode<N: FullNodeComponents>: Node<N> {
/// RPC block type. Used by [`DebugConsensusClient`] to fetch blocks and submit them to the
/// engine. This is intended to match the block format returned by the external RPC endpoint.
type RpcBlock: Serialize + DeserializeOwned + 'static;
/// Converts an RPC block to a primitive block.
///
/// This method handles the conversion between the RPC block format and the internal primitive
/// block format used by the node's consensus engine.
///
/// # Example
///
/// For Ethereum nodes, this typically converts from `alloy_rpc_types_eth::Block`
/// to the node's internal block representation.
fn rpc_to_primitive_block(rpc_block: Self::RpcBlock) -> BlockTy<Self>;
/// Creates a payload attributes builder for local mining in dev mode.
///
/// It will be used by the `LocalMiner` when dev mode is enabled.
///
/// The builder is responsible for creating the payload attributes that define how blocks should
/// be constructed during local mining.
fn local_payload_attributes_builder(
chain_spec: &Self::ChainSpec,
) -> impl PayloadAttributesBuilder<
<<Self as reth_node_api::NodeTypes>::Payload as PayloadTypes>::PayloadAttributes,
>;
}
/// Node launcher with support for launching various debugging utilities.
///
/// This launcher wraps an existing launcher and adds debugging capabilities when
/// certain debug flags are enabled. It provides two main debugging features:
///
/// ## RPC Consensus Client
///
/// When `--debug.rpc-consensus-ws <URL>` is provided, the launcher will:
/// - Connect to an external RPC `WebSocket` endpoint
/// - Fetch blocks from that endpoint
/// - Submit them to the local engine for execution
/// - Useful for testing engine behavior with real network data
///
/// ## Etherscan Consensus Client
///
/// When `--debug.etherscan [URL]` is provided, the launcher will:
/// - Use Etherscan API as a consensus client
/// - Fetch recent blocks from Etherscan
/// - Submit them to the local engine
/// - Requires `ETHERSCAN_API_KEY` environment variable
/// - Falls back to default Etherscan URL for the chain if URL not provided
#[derive(Debug, Clone)]
pub struct DebugNodeLauncher<L = EngineNodeLauncher> {
inner: L,
}
impl<L> DebugNodeLauncher<L> {
/// Creates a new instance of the [`DebugNodeLauncher`].
pub const fn new(inner: L) -> Self {
Self { inner }
}
}
impl<L, Target, N, AddOns> LaunchNode<Target> for DebugNodeLauncher<L>
where
N: FullNodeComponents<Types: DebugNode<N>>,
AddOns: RethRpcAddOns<N>,
L: LaunchNode<Target, Node = NodeHandle<N, AddOns>>,
{
type Node = NodeHandle<N, AddOns>;
async fn launch_node(self, target: Target) -> eyre::Result<Self::Node> {
let handle = self.inner.launch_node(target).await?;
let config = &handle.node.config;
if let Some(ws_url) = config.debug.rpc_consensus_ws.clone() {
info!(target: "reth::cli", "Using RPC WebSocket consensus client: {}", ws_url);
let block_provider =
RpcBlockProvider::<AnyNetwork, _>::new(ws_url.as_str(), |block_response| {
let json = serde_json::to_value(block_response)
.expect("Block serialization cannot fail");
let rpc_block =
serde_json::from_value(json).expect("Block deserialization cannot fail");
N::Types::rpc_to_primitive_block(rpc_block)
})
.await?;
let rpc_consensus_client = DebugConsensusClient::new(
handle.node.add_ons_handle.beacon_engine_handle.clone(),
Arc::new(block_provider),
);
handle.node.task_executor.spawn_critical("rpc-ws consensus client", async move {
rpc_consensus_client.run().await
});
}
if let Some(maybe_custom_etherscan_url) = config.debug.etherscan.clone() {
info!(target: "reth::cli", "Using etherscan as consensus client");
let chain = config.chain.chain();
let etherscan_url = maybe_custom_etherscan_url.map(Ok).unwrap_or_else(|| {
// If URL isn't provided, use default Etherscan URL for the chain if it is known
chain
.etherscan_urls()
.map(|urls| urls.0.to_string())
.ok_or_else(|| eyre::eyre!("failed to get etherscan url for chain: {chain}"))
})?;
let block_provider = EtherscanBlockProvider::new(
etherscan_url,
chain.etherscan_api_key().ok_or_else(|| {
eyre::eyre!(
"etherscan api key not found for rpc consensus client for chain: {chain}"
)
})?,
chain.id(),
N::Types::rpc_to_primitive_block,
);
let rpc_consensus_client = DebugConsensusClient::new(
handle.node.add_ons_handle.beacon_engine_handle.clone(),
Arc::new(block_provider),
);
handle.node.task_executor.spawn_critical("etherscan consensus client", async move {
rpc_consensus_client.run().await
});
}
if config.dev.dev {
info!(target: "reth::cli", "Using local payload attributes builder for dev mode");
let blockchain_db = handle.node.provider.clone();
let chain_spec = config.chain.clone();
let beacon_engine_handle = handle.node.add_ons_handle.beacon_engine_handle.clone();
let pool = handle.node.pool.clone();
let payload_builder_handle = handle.node.payload_builder_handle.clone();
let dev_mining_mode = handle.node.config.dev_mining_mode(pool);
handle.node.task_executor.spawn_critical("local engine", async move {
LocalMiner::new(
blockchain_db,
N::Types::local_payload_attributes_builder(&chain_spec),
beacon_engine_handle,
dev_mining_mode,
payload_builder_handle,
)
.run()
.await
});
}
Ok(handle)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/builder/src/launch/mod.rs | crates/node/builder/src/launch/mod.rs | //! Abstraction for launching a node.
pub mod common;
mod exex;
pub mod invalid_block_hook;
pub(crate) mod debug;
pub(crate) mod engine;
pub use common::LaunchContext;
pub use exex::ExExLauncher;
use std::future::Future;
/// A general purpose trait that launches a new node of any kind.
///
/// Acts as a node factory that targets a certain node configuration and returns a handle to the
/// node.
///
/// This is essentially the launch logic for a node.
///
/// See also [`EngineNodeLauncher`](crate::EngineNodeLauncher) and
/// [`NodeBuilderWithComponents::launch_with`](crate::NodeBuilderWithComponents)
pub trait LaunchNode<Target> {
/// The node type that is created.
type Node;
/// Create and return a new node asynchronously.
fn launch_node(self, target: Target) -> impl Future<Output = eyre::Result<Self::Node>>;
}
impl<F, Target, Fut, Node> LaunchNode<Target> for F
where
F: FnOnce(Target) -> Fut + Send,
Fut: Future<Output = eyre::Result<Node>> + Send,
{
type Node = Node;
fn launch_node(self, target: Target) -> impl Future<Output = eyre::Result<Self::Node>> {
self(target)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/builder/src/launch/exex.rs | crates/node/builder/src/launch/exex.rs | //! Support for launching execution extensions.
use alloy_eips::{eip2124::Head, BlockNumHash};
use futures::future;
use reth_chain_state::ForkChoiceSubscriptions;
use reth_chainspec::EthChainSpec;
use reth_exex::{
ExExContext, ExExHandle, ExExManager, ExExManagerHandle, ExExNotificationSource, Wal,
DEFAULT_EXEX_MANAGER_CAPACITY,
};
use reth_node_api::{FullNodeComponents, NodeTypes, PrimitivesTy};
use reth_provider::CanonStateSubscriptions;
use reth_tracing::tracing::{debug, info};
use std::{fmt, fmt::Debug};
use tracing::Instrument;
use crate::{common::WithConfigs, exex::BoxedLaunchExEx};
/// Can launch execution extensions.
pub struct ExExLauncher<Node: FullNodeComponents> {
head: Head,
extensions: Vec<(String, Box<dyn BoxedLaunchExEx<Node>>)>,
components: Node,
config_container: WithConfigs<<Node::Types as NodeTypes>::ChainSpec>,
}
impl<Node: FullNodeComponents + Clone> ExExLauncher<Node> {
/// Create a new `ExExLauncher` with the given extensions.
pub const fn new(
head: Head,
components: Node,
extensions: Vec<(String, Box<dyn BoxedLaunchExEx<Node>>)>,
config_container: WithConfigs<<Node::Types as NodeTypes>::ChainSpec>,
) -> Self {
Self { head, extensions, components, config_container }
}
/// Launches all execution extensions.
///
/// Spawns all extensions and returns the handle to the exex manager if any extensions are
/// installed.
pub async fn launch(
self,
) -> eyre::Result<Option<ExExManagerHandle<PrimitivesTy<Node::Types>>>> {
let Self { head, extensions, components, config_container } = self;
let head = BlockNumHash::new(head.number, head.hash);
if extensions.is_empty() {
// nothing to launch
return Ok(None)
}
info!(target: "reth::cli", "Loading ExEx Write-Ahead Log...");
let exex_wal = Wal::new(
config_container
.config
.datadir
.clone()
.resolve_datadir(config_container.config.chain.chain())
.exex_wal(),
)?;
let mut exex_handles = Vec::with_capacity(extensions.len());
let mut exexes = Vec::with_capacity(extensions.len());
for (id, exex) in extensions {
// create a new exex handle
let (handle, events, notifications) = ExExHandle::new(
id.clone(),
head,
components.provider().clone(),
components.evm_config().clone(),
exex_wal.handle(),
);
exex_handles.push(handle);
// create the launch context for the exex
let context = ExExContext {
head,
config: config_container.config.clone(),
reth_config: config_container.toml_config.clone(),
components: components.clone(),
events,
notifications,
};
let executor = components.task_executor().clone();
exexes.push(async move {
debug!(target: "reth::cli", id, "spawning exex");
let span = reth_tracing::tracing::info_span!("exex", id);
// init the exex
let exex = exex.launch(context).instrument(span.clone()).await?;
// spawn it as a crit task
executor.spawn_critical(
"exex",
async move {
info!(target: "reth::cli", "ExEx started");
match exex.await {
Ok(_) => panic!("ExEx {id} finished. ExExes should run indefinitely"),
Err(err) => panic!("ExEx {id} crashed: {err}"),
}
}
.instrument(span),
);
Ok::<(), eyre::Error>(())
});
}
future::try_join_all(exexes).await?;
// spawn exex manager
debug!(target: "reth::cli", "spawning exex manager");
let exex_manager = ExExManager::new(
components.provider().clone(),
exex_handles,
DEFAULT_EXEX_MANAGER_CAPACITY,
exex_wal,
components.provider().finalized_block_stream(),
);
let exex_manager_handle = exex_manager.handle();
components.task_executor().spawn_critical("exex manager", async move {
exex_manager.await.expect("exex manager crashed");
});
// send notifications from the blockchain tree to exex manager
let mut canon_state_notifications = components.provider().subscribe_to_canonical_state();
let mut handle = exex_manager_handle.clone();
components.task_executor().spawn_critical(
"exex manager blockchain tree notifications",
async move {
while let Ok(notification) = canon_state_notifications.recv().await {
handle
.send_async(ExExNotificationSource::BlockchainTree, notification.into())
.await
.expect("blockchain tree notification could not be sent to exex manager");
}
},
);
info!(target: "reth::cli", "ExEx Manager started");
Ok(Some(exex_manager_handle))
}
}
impl<Node: FullNodeComponents> Debug for ExExLauncher<Node> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ExExLauncher")
.field("head", &self.head)
.field("extensions", &self.extensions.iter().map(|(id, _)| id).collect::<Vec<_>>())
.field("components", &"...")
.field("config_container", &self.config_container)
.finish()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/builder/src/launch/common.rs | crates/node/builder/src/launch/common.rs | //! Helper types that can be used by launchers.
//!
//! ## Launch Context Type System
//!
//! The node launch process uses a type-state pattern to ensure correct initialization
//! order at compile time. Methods are only available when their prerequisites are met.
//!
//! ### Core Types
//!
//! - [`LaunchContext`]: Base context with executor and data directory
//! - [`LaunchContextWith<T>`]: Context with an attached value of type `T`
//! - [`Attached<L, R>`]: Pairs values, preserving both previous (L) and new (R) state
//!
//! ### Helper Attachments
//!
//! - [`WithConfigs`]: Node config + TOML config
//! - [`WithMeteredProvider`]: Provider factory with metrics
//! - [`WithMeteredProviders`]: Provider factory + blockchain provider
//! - [`WithComponents`]: Final form with all components
//!
//! ### Method Availability
//!
//! Methods are implemented on specific type combinations:
//! - `impl<T> LaunchContextWith<T>`: Generic methods available for any attachment
//! - `impl LaunchContextWith<WithConfigs>`: Config-specific methods
//! - `impl LaunchContextWith<Attached<WithConfigs, DB>>`: Database operations
//! - `impl LaunchContextWith<Attached<WithConfigs, ProviderFactory>>`: Provider operations
//! - etc.
//!
//! This ensures correct initialization order without runtime checks.
use crate::{
components::{NodeComponents, NodeComponentsBuilder},
hooks::OnComponentInitializedHook,
BuilderContext, ExExLauncher, NodeAdapter, PrimitivesTy,
};
use alloy_consensus::BlockHeader as _;
use alloy_eips::eip2124::Head;
use alloy_primitives::{BlockNumber, B256};
use eyre::Context;
use rayon::ThreadPoolBuilder;
use reth_chainspec::{Chain, EthChainSpec, EthereumHardfork, EthereumHardforks};
use reth_config::{config::EtlConfig, PruneConfig};
use reth_consensus::noop::NoopConsensus;
use reth_db_api::{database::Database, database_metrics::DatabaseMetrics};
use reth_db_common::init::{init_genesis, InitStorageError};
use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader};
use reth_engine_local::MiningMode;
use reth_evm::{noop::NoopEvmConfig, ConfigureEvm};
use reth_exex::ExExManagerHandle;
use reth_fs_util as fs;
use reth_network_p2p::headers::client::HeadersClient;
use reth_node_api::{FullNodeTypes, NodeTypes, NodeTypesWithDB, NodeTypesWithDBAdapter};
use reth_node_core::{
args::DefaultEraHost,
dirs::{ChainPath, DataDirPath},
node_config::NodeConfig,
primitives::BlockHeader,
version::version_metadata,
};
use reth_node_metrics::{
chain::ChainSpecInfo,
hooks::Hooks,
recorder::install_prometheus_recorder,
server::{MetricServer, MetricServerConfig},
version::VersionInfo,
};
use reth_provider::{
providers::{NodeTypesForProvider, ProviderNodeTypes, StaticFileProvider},
BlockHashReader, BlockNumReader, BlockReaderIdExt, ChainSpecProvider, ProviderError,
ProviderFactory, ProviderResult, StageCheckpointReader, StateProviderFactory,
StaticFileProviderFactory,
};
use reth_prune::{PruneModes, PrunerBuilder};
use reth_rpc_builder::config::RethRpcServerConfig;
use reth_rpc_layer::JwtSecret;
use reth_stages::{
sets::DefaultStages, stages::EraImportSource, MetricEvent, PipelineBuilder, PipelineTarget,
StageId,
};
use reth_static_file::StaticFileProducer;
use reth_tasks::TaskExecutor;
use reth_tracing::tracing::{debug, error, info, warn};
use reth_transaction_pool::TransactionPool;
use std::{sync::Arc, thread::available_parallelism};
use tokio::sync::{
mpsc::{unbounded_channel, UnboundedSender},
oneshot, watch,
};
use futures::{future::Either, stream, Stream, StreamExt};
use reth_node_ethstats::EthStatsService;
use reth_node_events::{cl::ConsensusLayerHealthEvents, node::NodeEvent};
/// Reusable setup for launching a node.
///
/// This is the entry point for the node launch process. It implements a builder
/// pattern using type-state programming to enforce correct initialization order.
///
/// ## Type Evolution
///
/// Starting from `LaunchContext`, each method transforms the type to reflect
/// accumulated state:
///
/// ```text
/// LaunchContext
/// └─> LaunchContextWith<WithConfigs>
/// └─> LaunchContextWith<Attached<WithConfigs, DB>>
/// └─> LaunchContextWith<Attached<WithConfigs, ProviderFactory>>
/// └─> LaunchContextWith<Attached<WithConfigs, WithMeteredProviders>>
/// └─> LaunchContextWith<Attached<WithConfigs, WithComponents>>
/// ```
#[derive(Debug, Clone)]
pub struct LaunchContext {
/// The task executor for the node.
pub task_executor: TaskExecutor,
/// The data directory for the node.
pub data_dir: ChainPath<DataDirPath>,
}
impl LaunchContext {
/// Create a new instance of the default node launcher.
pub const fn new(task_executor: TaskExecutor, data_dir: ChainPath<DataDirPath>) -> Self {
Self { task_executor, data_dir }
}
/// Create launch context with attachment.
pub const fn with<T>(self, attachment: T) -> LaunchContextWith<T> {
LaunchContextWith { inner: self, attachment }
}
/// Loads the reth config with the configured `data_dir` and overrides settings according to the
/// `config`.
///
/// Attaches both the `NodeConfig` and the loaded `reth.toml` config to the launch context.
pub fn with_loaded_toml_config<ChainSpec>(
self,
config: NodeConfig<ChainSpec>,
) -> eyre::Result<LaunchContextWith<WithConfigs<ChainSpec>>>
where
ChainSpec: EthChainSpec + reth_chainspec::EthereumHardforks,
{
let toml_config = self.load_toml_config(&config)?;
Ok(self.with(WithConfigs { config, toml_config }))
}
/// Loads the reth config with the configured `data_dir` and overrides settings according to the
/// `config`.
///
/// This is async because the trusted peers may have to be resolved.
pub fn load_toml_config<ChainSpec>(
&self,
config: &NodeConfig<ChainSpec>,
) -> eyre::Result<reth_config::Config>
where
ChainSpec: EthChainSpec + reth_chainspec::EthereumHardforks,
{
let config_path = config.config.clone().unwrap_or_else(|| self.data_dir.config());
let mut toml_config = reth_config::Config::from_path(&config_path)
.wrap_err_with(|| format!("Could not load config file {config_path:?}"))?;
Self::save_pruning_config_if_full_node(&mut toml_config, config, &config_path)?;
info!(target: "reth::cli", path = ?config_path, "Configuration loaded");
// Update the config with the command line arguments
toml_config.peers.trusted_nodes_only = config.network.trusted_only;
Ok(toml_config)
}
/// Save prune config to the toml file if node is a full node.
fn save_pruning_config_if_full_node<ChainSpec>(
reth_config: &mut reth_config::Config,
config: &NodeConfig<ChainSpec>,
config_path: impl AsRef<std::path::Path>,
) -> eyre::Result<()>
where
ChainSpec: EthChainSpec + reth_chainspec::EthereumHardforks,
{
if reth_config.prune.is_none() {
if let Some(prune_config) = config.prune_config() {
reth_config.update_prune_config(prune_config);
info!(target: "reth::cli", "Saving prune config to toml file");
reth_config.save(config_path.as_ref())?;
}
} else if config.prune_config().is_none() {
warn!(target: "reth::cli", "Prune configs present in config file but --full not provided. Running as a Full node");
}
Ok(())
}
/// Convenience function to [`Self::configure_globals`]
pub fn with_configured_globals(self, reserved_cpu_cores: usize) -> Self {
self.configure_globals(reserved_cpu_cores);
self
}
/// Configure global settings this includes:
///
/// - Raising the file descriptor limit
/// - Configuring the global rayon thread pool with available parallelism. Honoring
/// engine.reserved-cpu-cores to reserve given number of cores for O while using at least 1
/// core for the rayon thread pool
pub fn configure_globals(&self, reserved_cpu_cores: usize) {
// Raise the fd limit of the process.
// Does not do anything on windows.
match fdlimit::raise_fd_limit() {
Ok(fdlimit::Outcome::LimitRaised { from, to }) => {
debug!(from, to, "Raised file descriptor limit");
}
Ok(fdlimit::Outcome::Unsupported) => {}
Err(err) => warn!(%err, "Failed to raise file descriptor limit"),
}
// Reserving the given number of CPU cores for the rest of OS.
// Users can reserve more cores by setting engine.reserved-cpu-cores
// Note: The global rayon thread pool will use at least one core.
let num_threads = available_parallelism()
.map_or(0, |num| num.get().saturating_sub(reserved_cpu_cores).max(1));
if let Err(err) = ThreadPoolBuilder::new()
.num_threads(num_threads)
.thread_name(|i| format!("reth-rayon-{i}"))
.build_global()
{
warn!(%err, "Failed to build global thread pool")
}
}
}
/// A [`LaunchContext`] along with an additional value.
///
/// The type parameter `T` represents the current state of the launch process.
/// Methods are conditionally implemented based on `T`, ensuring operations
/// are only available when their prerequisites are met.
///
/// For example:
/// - Config methods when `T = WithConfigs<ChainSpec>`
/// - Database operations when `T = Attached<WithConfigs<ChainSpec>, DB>`
/// - Provider operations when `T = Attached<WithConfigs<ChainSpec>, ProviderFactory<N>>`
#[derive(Debug, Clone)]
pub struct LaunchContextWith<T> {
/// The wrapped launch context.
pub inner: LaunchContext,
/// The additional attached value.
pub attachment: T,
}
impl<T> LaunchContextWith<T> {
/// Configure global settings this includes:
///
/// - Raising the file descriptor limit
/// - Configuring the global rayon thread pool
pub fn configure_globals(&self, reserved_cpu_cores: u64) {
self.inner.configure_globals(reserved_cpu_cores.try_into().unwrap());
}
/// Returns the data directory.
pub const fn data_dir(&self) -> &ChainPath<DataDirPath> {
&self.inner.data_dir
}
/// Returns the task executor.
pub const fn task_executor(&self) -> &TaskExecutor {
&self.inner.task_executor
}
/// Attaches another value to the launch context.
pub fn attach<A>(self, attachment: A) -> LaunchContextWith<Attached<T, A>> {
LaunchContextWith {
inner: self.inner,
attachment: Attached::new(self.attachment, attachment),
}
}
/// Consumes the type and calls a function with a reference to the context.
// Returns the context again
pub fn inspect<F>(self, f: F) -> Self
where
F: FnOnce(&Self),
{
f(&self);
self
}
}
impl<ChainSpec> LaunchContextWith<WithConfigs<ChainSpec>> {
/// Resolves the trusted peers and adds them to the toml config.
pub fn with_resolved_peers(mut self) -> eyre::Result<Self> {
if !self.attachment.config.network.trusted_peers.is_empty() {
info!(target: "reth::cli", "Adding trusted nodes");
self.attachment
.toml_config
.peers
.trusted_nodes
.extend(self.attachment.config.network.trusted_peers.clone());
}
Ok(self)
}
}
impl<L, R> LaunchContextWith<Attached<L, R>> {
/// Get a reference to the left value.
pub const fn left(&self) -> &L {
&self.attachment.left
}
/// Get a reference to the right value.
pub const fn right(&self) -> &R {
&self.attachment.right
}
/// Get a mutable reference to the left value.
pub const fn left_mut(&mut self) -> &mut L {
&mut self.attachment.left
}
/// Get a mutable reference to the right value.
pub const fn right_mut(&mut self) -> &mut R {
&mut self.attachment.right
}
}
impl<R, ChainSpec: EthChainSpec> LaunchContextWith<Attached<WithConfigs<ChainSpec>, R>> {
/// Adjust certain settings in the config to make sure they are set correctly
///
/// This includes:
/// - Making sure the ETL dir is set to the datadir
/// - RPC settings are adjusted to the correct port
pub fn with_adjusted_configs(self) -> Self {
self.ensure_etl_datadir().with_adjusted_instance_ports()
}
/// Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to
pub fn ensure_etl_datadir(mut self) -> Self {
if self.toml_config_mut().stages.etl.dir.is_none() {
let etl_path = EtlConfig::from_datadir(self.data_dir().data_dir());
if etl_path.exists() {
// Remove etl-path files on launch
if let Err(err) = fs::remove_dir_all(&etl_path) {
warn!(target: "reth::cli", ?etl_path, %err, "Failed to remove ETL path on launch");
}
}
self.toml_config_mut().stages.etl.dir = Some(etl_path);
}
self
}
/// Change rpc port numbers based on the instance number.
pub fn with_adjusted_instance_ports(mut self) -> Self {
self.node_config_mut().adjust_instance_ports();
self
}
/// Returns the container for all config types
pub const fn configs(&self) -> &WithConfigs<ChainSpec> {
self.attachment.left()
}
/// Returns the attached [`NodeConfig`].
pub const fn node_config(&self) -> &NodeConfig<ChainSpec> {
&self.left().config
}
/// Returns the attached [`NodeConfig`].
pub const fn node_config_mut(&mut self) -> &mut NodeConfig<ChainSpec> {
&mut self.left_mut().config
}
/// Returns the attached toml config [`reth_config::Config`].
pub const fn toml_config(&self) -> &reth_config::Config {
&self.left().toml_config
}
/// Returns the attached toml config [`reth_config::Config`].
pub const fn toml_config_mut(&mut self) -> &mut reth_config::Config {
&mut self.left_mut().toml_config
}
/// Returns the configured chain spec.
pub fn chain_spec(&self) -> Arc<ChainSpec> {
self.node_config().chain.clone()
}
/// Get the hash of the genesis block.
pub fn genesis_hash(&self) -> B256 {
self.node_config().chain.genesis_hash()
}
/// Returns the chain identifier of the node.
pub fn chain_id(&self) -> Chain {
self.node_config().chain.chain()
}
/// Returns true if the node is configured as --dev
pub const fn is_dev(&self) -> bool {
self.node_config().dev.dev
}
/// Returns the configured [`PruneConfig`]
///
/// Any configuration set in CLI will take precedence over those set in toml
pub fn prune_config(&self) -> Option<PruneConfig>
where
ChainSpec: reth_chainspec::EthereumHardforks,
{
let Some(mut node_prune_config) = self.node_config().prune_config() else {
// No CLI config is set, use the toml config.
return self.toml_config().prune.clone();
};
// Otherwise, use the CLI configuration and merge with toml config.
node_prune_config.merge(self.toml_config().prune.clone());
Some(node_prune_config)
}
/// Returns the configured [`PruneModes`], returning the default if no config was available.
pub fn prune_modes(&self) -> PruneModes
where
ChainSpec: reth_chainspec::EthereumHardforks,
{
self.prune_config().map(|config| config.segments).unwrap_or_default()
}
/// Returns an initialized [`PrunerBuilder`] based on the configured [`PruneConfig`]
pub fn pruner_builder(&self) -> PrunerBuilder
where
ChainSpec: reth_chainspec::EthereumHardforks,
{
PrunerBuilder::new(self.prune_config().unwrap_or_default())
.delete_limit(self.chain_spec().prune_delete_limit())
.timeout(PrunerBuilder::DEFAULT_TIMEOUT)
}
/// Loads the JWT secret for the engine API
pub fn auth_jwt_secret(&self) -> eyre::Result<JwtSecret> {
let default_jwt_path = self.data_dir().jwt();
let secret = self.node_config().rpc.auth_jwt_secret(default_jwt_path)?;
Ok(secret)
}
/// Returns the [`MiningMode`] intended for --dev mode.
pub fn dev_mining_mode<Pool>(&self, pool: Pool) -> MiningMode<Pool>
where
Pool: TransactionPool + Unpin,
{
if let Some(interval) = self.node_config().dev.block_time {
MiningMode::interval(interval)
} else {
MiningMode::instant(pool, self.node_config().dev.block_max_transactions)
}
}
}
impl<DB, ChainSpec> LaunchContextWith<Attached<WithConfigs<ChainSpec>, DB>>
where
DB: Database + Clone + 'static,
ChainSpec: EthChainSpec + EthereumHardforks + 'static,
{
/// Returns the [`ProviderFactory`] for the attached storage after executing a consistent check
/// between the database and static files. **It may execute a pipeline unwind if it fails this
/// check.**
pub async fn create_provider_factory<N, Evm>(&self) -> eyre::Result<ProviderFactory<N>>
where
N: ProviderNodeTypes<DB = DB, ChainSpec = ChainSpec>,
Evm: ConfigureEvm<Primitives = N::Primitives> + 'static,
{
let factory = ProviderFactory::new(
self.right().clone(),
self.chain_spec(),
StaticFileProvider::read_write(self.data_dir().static_files())?,
)
.with_prune_modes(self.prune_modes())
.with_static_files_metrics();
let has_receipt_pruning =
self.toml_config().prune.as_ref().is_some_and(|a| a.has_receipts_pruning());
// Check for consistency between database and static files. If it fails, it unwinds to
// the first block that's consistent between database and static files.
if let Some(unwind_target) = factory
.static_file_provider()
.check_consistency(&factory.provider()?, has_receipt_pruning)?
{
// Highly unlikely to happen, and given its destructive nature, it's better to panic
// instead.
assert_ne!(
unwind_target,
PipelineTarget::Unwind(0),
"A static file <> database inconsistency was found that would trigger an unwind to block 0"
);
info!(target: "reth::cli", unwind_target = %unwind_target, "Executing an unwind after a failed storage consistency check.");
let (_tip_tx, tip_rx) = watch::channel(B256::ZERO);
// Builds an unwind-only pipeline
let pipeline = PipelineBuilder::default()
.add_stages(DefaultStages::new(
factory.clone(),
tip_rx,
Arc::new(NoopConsensus::default()),
NoopHeaderDownloader::default(),
NoopBodiesDownloader::default(),
NoopEvmConfig::<Evm>::default(),
self.toml_config().stages.clone(),
self.prune_modes(),
None,
))
.build(
factory.clone(),
StaticFileProducer::new(factory.clone(), self.prune_modes()),
);
// Unwinds to block
let (tx, rx) = oneshot::channel();
// Pipeline should be run as blocking and panic if it fails.
self.task_executor().spawn_critical_blocking(
"pipeline task",
Box::pin(async move {
let (_, result) = pipeline.run_as_fut(Some(unwind_target)).await;
let _ = tx.send(result);
}),
);
rx.await?.inspect_err(|err| {
error!(target: "reth::cli", unwind_target = %unwind_target, %err, "failed to run unwind")
})?;
}
Ok(factory)
}
/// Creates a new [`ProviderFactory`] and attaches it to the launch context.
pub async fn with_provider_factory<N, Evm>(
self,
) -> eyre::Result<LaunchContextWith<Attached<WithConfigs<ChainSpec>, ProviderFactory<N>>>>
where
N: ProviderNodeTypes<DB = DB, ChainSpec = ChainSpec>,
Evm: ConfigureEvm<Primitives = N::Primitives> + 'static,
{
let factory = self.create_provider_factory::<N, Evm>().await?;
let ctx = LaunchContextWith {
inner: self.inner,
attachment: self.attachment.map_right(|_| factory),
};
Ok(ctx)
}
}
impl<T> LaunchContextWith<Attached<WithConfigs<T::ChainSpec>, ProviderFactory<T>>>
where
T: ProviderNodeTypes,
{
/// Returns access to the underlying database.
pub const fn database(&self) -> &T::DB {
self.right().db_ref()
}
/// Returns the configured `ProviderFactory`.
pub const fn provider_factory(&self) -> &ProviderFactory<T> {
self.right()
}
/// Returns the static file provider to interact with the static files.
pub fn static_file_provider(&self) -> StaticFileProvider<T::Primitives> {
self.right().static_file_provider()
}
/// This launches the prometheus endpoint.
///
/// Convenience function to [`Self::start_prometheus_endpoint`]
pub async fn with_prometheus_server(self) -> eyre::Result<Self> {
self.start_prometheus_endpoint().await?;
Ok(self)
}
/// Starts the prometheus endpoint.
pub async fn start_prometheus_endpoint(&self) -> eyre::Result<()> {
// ensure recorder runs upkeep periodically
install_prometheus_recorder().spawn_upkeep();
let listen_addr = self.node_config().metrics;
if let Some(addr) = listen_addr {
info!(target: "reth::cli", "Starting metrics endpoint at {}", addr);
let config = MetricServerConfig::new(
addr,
VersionInfo {
version: version_metadata().cargo_pkg_version.as_ref(),
build_timestamp: version_metadata().vergen_build_timestamp.as_ref(),
cargo_features: version_metadata().vergen_cargo_features.as_ref(),
git_sha: version_metadata().vergen_git_sha.as_ref(),
target_triple: version_metadata().vergen_cargo_target_triple.as_ref(),
build_profile: version_metadata().build_profile_name.as_ref(),
},
ChainSpecInfo { name: self.left().config.chain.chain().to_string() },
self.task_executor().clone(),
Hooks::builder()
.with_hook({
let db = self.database().clone();
move || db.report_metrics()
})
.with_hook({
let sfp = self.static_file_provider();
move || {
if let Err(error) = sfp.report_metrics() {
error!(%error, "Failed to report metrics for the static file provider");
}
}
})
.build(),
);
MetricServer::new(config).serve().await?;
}
Ok(())
}
/// Convenience function to [`Self::init_genesis`]
pub fn with_genesis(self) -> Result<Self, InitStorageError> {
init_genesis(self.provider_factory())?;
Ok(self)
}
/// Write the genesis block and state if it has not already been written
pub fn init_genesis(&self) -> Result<B256, InitStorageError> {
init_genesis(self.provider_factory())
}
/// Creates a new `WithMeteredProvider` container and attaches it to the
/// launch context.
///
/// This spawns a metrics task that listens for metrics related events and updates metrics for
/// prometheus.
pub fn with_metrics_task(
self,
) -> LaunchContextWith<Attached<WithConfigs<T::ChainSpec>, WithMeteredProvider<T>>> {
let (metrics_sender, metrics_receiver) = unbounded_channel();
let with_metrics =
WithMeteredProvider { provider_factory: self.right().clone(), metrics_sender };
debug!(target: "reth::cli", "Spawning stages metrics listener task");
let sync_metrics_listener = reth_stages::MetricsListener::new(metrics_receiver);
self.task_executor().spawn_critical("stages metrics listener task", sync_metrics_listener);
LaunchContextWith {
inner: self.inner,
attachment: self.attachment.map_right(|_| with_metrics),
}
}
}
impl<N, DB>
LaunchContextWith<
Attached<WithConfigs<N::ChainSpec>, WithMeteredProvider<NodeTypesWithDBAdapter<N, DB>>>,
>
where
N: NodeTypes,
DB: Database + DatabaseMetrics + Clone + Unpin + 'static,
{
/// Returns the configured `ProviderFactory`.
const fn provider_factory(&self) -> &ProviderFactory<NodeTypesWithDBAdapter<N, DB>> {
&self.right().provider_factory
}
/// Returns the metrics sender.
fn sync_metrics_tx(&self) -> UnboundedSender<MetricEvent> {
self.right().metrics_sender.clone()
}
/// Creates a `BlockchainProvider` and attaches it to the launch context.
#[expect(clippy::complexity)]
pub fn with_blockchain_db<T, F>(
self,
create_blockchain_provider: F,
) -> eyre::Result<LaunchContextWith<Attached<WithConfigs<N::ChainSpec>, WithMeteredProviders<T>>>>
where
T: FullNodeTypes<Types = N, DB = DB>,
F: FnOnce(ProviderFactory<NodeTypesWithDBAdapter<N, DB>>) -> eyre::Result<T::Provider>,
{
let blockchain_db = create_blockchain_provider(self.provider_factory().clone())?;
let metered_providers = WithMeteredProviders {
db_provider_container: WithMeteredProvider {
provider_factory: self.provider_factory().clone(),
metrics_sender: self.sync_metrics_tx(),
},
blockchain_db,
};
let ctx = LaunchContextWith {
inner: self.inner,
attachment: self.attachment.map_right(|_| metered_providers),
};
Ok(ctx)
}
}
impl<T>
LaunchContextWith<
Attached<WithConfigs<<T::Types as NodeTypes>::ChainSpec>, WithMeteredProviders<T>>,
>
where
T: FullNodeTypes<Types: NodeTypesForProvider>,
{
/// Returns access to the underlying database.
pub const fn database(&self) -> &T::DB {
self.provider_factory().db_ref()
}
/// Returns the configured `ProviderFactory`.
pub const fn provider_factory(
&self,
) -> &ProviderFactory<NodeTypesWithDBAdapter<T::Types, T::DB>> {
&self.right().db_provider_container.provider_factory
}
/// Fetches the head block from the database.
///
/// If the database is empty, returns the genesis block.
pub fn lookup_head(&self) -> eyre::Result<Head> {
self.node_config()
.lookup_head(self.provider_factory())
.wrap_err("the head block is missing")
}
/// Returns the metrics sender.
pub fn sync_metrics_tx(&self) -> UnboundedSender<MetricEvent> {
self.right().db_provider_container.metrics_sender.clone()
}
/// Returns a reference to the blockchain provider.
pub const fn blockchain_db(&self) -> &T::Provider {
&self.right().blockchain_db
}
/// Creates a `NodeAdapter` and attaches it to the launch context.
pub async fn with_components<CB>(
self,
components_builder: CB,
on_component_initialized: Box<
dyn OnComponentInitializedHook<NodeAdapter<T, CB::Components>>,
>,
) -> eyre::Result<
LaunchContextWith<
Attached<WithConfigs<<T::Types as NodeTypes>::ChainSpec>, WithComponents<T, CB>>,
>,
>
where
CB: NodeComponentsBuilder<T>,
{
// fetch the head block from the database
let head = self.lookup_head()?;
let builder_ctx = BuilderContext::new(
head,
self.blockchain_db().clone(),
self.task_executor().clone(),
self.configs().clone(),
);
debug!(target: "reth::cli", "creating components");
let components = components_builder.build_components(&builder_ctx).await?;
let blockchain_db = self.blockchain_db().clone();
let node_adapter = NodeAdapter {
components,
task_executor: self.task_executor().clone(),
provider: blockchain_db,
};
debug!(target: "reth::cli", "calling on_component_initialized hook");
on_component_initialized.on_event(node_adapter.clone())?;
let components_container = WithComponents {
db_provider_container: WithMeteredProvider {
provider_factory: self.provider_factory().clone(),
metrics_sender: self.sync_metrics_tx(),
},
node_adapter,
head,
};
let ctx = LaunchContextWith {
inner: self.inner,
attachment: self.attachment.map_right(|_| components_container),
};
Ok(ctx)
}
}
impl<T, CB>
LaunchContextWith<
Attached<WithConfigs<<T::Types as NodeTypes>::ChainSpec>, WithComponents<T, CB>>,
>
where
T: FullNodeTypes<Types: NodeTypesForProvider>,
CB: NodeComponentsBuilder<T>,
{
/// Returns the configured `ProviderFactory`.
pub const fn provider_factory(
&self,
) -> &ProviderFactory<NodeTypesWithDBAdapter<T::Types, T::DB>> {
&self.right().db_provider_container.provider_factory
}
/// Returns the max block that the node should run to, looking it up from the network if
/// necessary
pub async fn max_block<C>(&self, client: C) -> eyre::Result<Option<BlockNumber>>
where
C: HeadersClient<Header: BlockHeader>,
{
self.node_config().max_block(client, self.provider_factory().clone()).await
}
/// Returns the static file provider to interact with the static files.
pub fn static_file_provider(&self) -> StaticFileProvider<<T::Types as NodeTypes>::Primitives> {
self.provider_factory().static_file_provider()
}
/// Creates a new [`StaticFileProducer`] with the attached database.
pub fn static_file_producer(
&self,
) -> StaticFileProducer<ProviderFactory<NodeTypesWithDBAdapter<T::Types, T::DB>>> {
StaticFileProducer::new(self.provider_factory().clone(), self.prune_modes())
}
/// Returns the current head block.
pub const fn head(&self) -> Head {
self.right().head
}
/// Returns the configured `NodeAdapter`.
pub const fn node_adapter(&self) -> &NodeAdapter<T, CB::Components> {
&self.right().node_adapter
}
/// Returns mutable reference to the configured `NodeAdapter`.
pub const fn node_adapter_mut(&mut self) -> &mut NodeAdapter<T, CB::Components> {
&mut self.right_mut().node_adapter
}
/// Returns a reference to the blockchain provider.
pub const fn blockchain_db(&self) -> &T::Provider {
&self.node_adapter().provider
}
/// Returns the initial backfill to sync to at launch.
///
/// This returns the configured `debug.tip` if set, otherwise it will check if backfill was
/// previously interrupted and returns the block hash of the last checkpoint, see also
/// [`Self::check_pipeline_consistency`]
pub fn initial_backfill_target(&self) -> ProviderResult<Option<B256>> {
let mut initial_target = self.node_config().debug.tip;
if initial_target.is_none() {
initial_target = self.check_pipeline_consistency()?;
}
Ok(initial_target)
}
/// Returns true if the node should terminate after the initial backfill run.
///
/// This is the case if any of these configs are set:
/// `--debug.max-block`
/// `--debug.terminate`
pub const fn terminate_after_initial_backfill(&self) -> bool {
self.node_config().debug.terminate || self.node_config().debug.max_block.is_some()
}
/// Ensures that the database matches chain-specific requirements.
///
/// This checks for OP-Mainnet and ensures we have all the necessary data to progress (past
/// bedrock height)
fn ensure_chain_specific_db_checks(&self) -> ProviderResult<()> {
if self.chain_spec().is_optimism() &&
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/metrics/src/lib.rs | crates/node/metrics/src/lib.rs | //! Metrics utilities for the node.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
pub mod chain;
/// The metrics hooks for prometheus.
pub mod hooks;
pub mod recorder;
/// The metric server serving the metrics.
pub mod server;
pub mod version;
pub use metrics_exporter_prometheus::*;
pub use metrics_process::*;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/metrics/src/version.rs | crates/node/metrics/src/version.rs | //! This exposes reth's version information over prometheus.
use metrics::gauge;
/// Contains version information for the application.
#[derive(Debug, Clone)]
pub struct VersionInfo {
/// The version of the application.
pub version: &'static str,
/// The build timestamp of the application.
pub build_timestamp: &'static str,
/// The cargo features enabled for the build.
pub cargo_features: &'static str,
/// The Git SHA of the build.
pub git_sha: &'static str,
/// The target triple for the build.
pub target_triple: &'static str,
/// The build profile (e.g., debug or release).
pub build_profile: &'static str,
}
impl VersionInfo {
/// This exposes reth's version information over prometheus.
pub fn register_version_metrics(&self) {
let labels: [(&str, &str); 6] = [
("version", self.version),
("build_timestamp", self.build_timestamp),
("cargo_features", self.cargo_features),
("git_sha", self.git_sha),
("target_triple", self.target_triple),
("build_profile", self.build_profile),
];
let gauge = gauge!("info", &labels);
gauge.set(1);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/metrics/src/chain.rs | crates/node/metrics/src/chain.rs | //! This exposes reth's chain information over prometheus.
use metrics::{describe_gauge, gauge};
/// Contains chain information for the application.
#[derive(Debug, Clone)]
pub struct ChainSpecInfo {
/// The name of the chain.
pub name: String,
}
impl ChainSpecInfo {
/// This exposes reth's chain information over prometheus.
pub fn register_chain_spec_metrics(&self) {
let labels: [(&str, String); 1] = [("name", self.name.clone())];
describe_gauge!("chain_spec", "Information about the chain");
let _gauge = gauge!("chain_spec", &labels);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/metrics/src/recorder.rs | crates/node/metrics/src/recorder.rs | //! Prometheus recorder
use eyre::WrapErr;
use metrics_exporter_prometheus::{PrometheusBuilder, PrometheusHandle};
use metrics_util::layers::{PrefixLayer, Stack};
use std::sync::{atomic::AtomicBool, LazyLock};
/// Installs the Prometheus recorder as the global recorder.
///
/// Note: This must be installed before any metrics are `described`.
///
/// Caution: This only configures the global recorder and does not spawn the exporter.
/// Callers must run [`PrometheusRecorder::spawn_upkeep`] manually.
pub fn install_prometheus_recorder() -> &'static PrometheusRecorder {
&PROMETHEUS_RECORDER_HANDLE
}
/// The default Prometheus recorder handle. We use a global static to ensure that it is only
/// installed once.
static PROMETHEUS_RECORDER_HANDLE: LazyLock<PrometheusRecorder> =
LazyLock::new(|| PrometheusRecorder::install().unwrap());
/// A handle to the Prometheus recorder.
///
/// This is intended to be used as the global recorder.
/// Callers must ensure that [`PrometheusRecorder::spawn_upkeep`] is called once.
#[derive(Debug)]
pub struct PrometheusRecorder {
handle: PrometheusHandle,
upkeep: AtomicBool,
}
impl PrometheusRecorder {
const fn new(handle: PrometheusHandle) -> Self {
Self { handle, upkeep: AtomicBool::new(false) }
}
/// Returns a reference to the [`PrometheusHandle`].
pub const fn handle(&self) -> &PrometheusHandle {
&self.handle
}
/// Spawns the upkeep task if there hasn't been one spawned already.
///
/// ## Panics
///
/// This method must be called from within an existing Tokio runtime or it will panic.
///
/// See also [`PrometheusHandle::run_upkeep`]
pub fn spawn_upkeep(&self) {
if self
.upkeep
.compare_exchange(
false,
true,
std::sync::atomic::Ordering::SeqCst,
std::sync::atomic::Ordering::Acquire,
)
.is_err()
{
return;
}
let handle = self.handle.clone();
tokio::spawn(async move {
loop {
tokio::time::sleep(std::time::Duration::from_secs(5)).await;
handle.run_upkeep();
}
});
}
/// Installs Prometheus as the metrics recorder.
///
/// Caution: This only configures the global recorder and does not spawn the exporter.
/// Callers must run [`Self::spawn_upkeep`] manually.
pub fn install() -> eyre::Result<Self> {
let recorder = PrometheusBuilder::new().build_recorder();
let handle = recorder.handle();
// Build metrics stack
Stack::new(recorder)
.push(PrefixLayer::new("reth"))
.install()
.wrap_err("Couldn't set metrics recorder.")?;
Ok(Self::new(handle))
}
}
#[cfg(test)]
mod tests {
use super::*;
// Dependencies using different version of the `metrics` crate (to be exact, 0.21 vs 0.22)
// may not be able to communicate with each other through the global recorder.
//
// This test ensures that `metrics-process` dependency plays well with the current
// `metrics-exporter-prometheus` dependency version.
#[test]
fn process_metrics() {
// initialize the lazy handle
let _ = &*PROMETHEUS_RECORDER_HANDLE;
let process = metrics_process::Collector::default();
process.describe();
process.collect();
let metrics = PROMETHEUS_RECORDER_HANDLE.handle.render();
assert!(metrics.contains("process_cpu_seconds_total"), "{metrics:?}");
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/metrics/src/hooks.rs | crates/node/metrics/src/hooks.rs | use metrics_process::Collector;
use std::{fmt, sync::Arc};
/// The simple alias for function types that are `'static`, `Send`, and `Sync`.
pub trait Hook: Fn() + Send + Sync + 'static {}
impl<T: 'static + Fn() + Send + Sync> Hook for T {}
/// A builder-like type to create a new [`Hooks`] instance.
pub struct HooksBuilder {
hooks: Vec<Box<dyn Hook<Output = ()>>>,
}
impl HooksBuilder {
/// Registers a [`Hook`].
pub fn with_hook(self, hook: impl Hook) -> Self {
self.with_boxed_hook(Box::new(hook))
}
/// Registers a [`Hook`] by calling the provided closure.
pub fn install_hook<F, H>(self, f: F) -> Self
where
F: FnOnce() -> H,
H: Hook,
{
self.with_hook(f())
}
/// Registers a [`Hook`].
#[inline]
pub fn with_boxed_hook(mut self, hook: Box<dyn Hook<Output = ()>>) -> Self {
self.hooks.push(hook);
self
}
/// Builds the [`Hooks`] collection from the registered hooks.
pub fn build(self) -> Hooks {
Hooks { inner: Arc::new(self.hooks) }
}
}
impl Default for HooksBuilder {
fn default() -> Self {
Self {
hooks: vec![
Box::new(|| Collector::default().collect()),
Box::new(collect_memory_stats),
Box::new(collect_io_stats),
],
}
}
}
impl std::fmt::Debug for HooksBuilder {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("HooksBuilder")
.field("hooks", &format_args!("Vec<Box<dyn Hook>>, len: {}", self.hooks.len()))
.finish()
}
}
/// Helper type for managing hooks
#[derive(Clone)]
pub struct Hooks {
inner: Arc<Vec<Box<dyn Hook<Output = ()>>>>,
}
impl Hooks {
/// Creates a new [`HooksBuilder`] instance.
#[inline]
pub fn builder() -> HooksBuilder {
HooksBuilder::default()
}
pub(crate) fn iter(&self) -> impl Iterator<Item = &Box<dyn Hook<Output = ()>>> {
self.inner.iter()
}
}
impl fmt::Debug for Hooks {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let hooks_len = self.inner.len();
f.debug_struct("Hooks")
.field("inner", &format_args!("Arc<Vec<Box<dyn Hook>>>, len: {hooks_len}"))
.finish()
}
}
#[cfg(all(feature = "jemalloc", unix))]
fn collect_memory_stats() {
use metrics::gauge;
use tikv_jemalloc_ctl::{epoch, stats};
use tracing::error;
if epoch::advance().map_err(|error| error!(%error, "Failed to advance jemalloc epoch")).is_err()
{
return
}
if let Ok(value) = stats::active::read()
.map_err(|error| error!(%error, "Failed to read jemalloc.stats.active"))
{
gauge!("jemalloc.active").set(value as f64);
}
if let Ok(value) = stats::allocated::read()
.map_err(|error| error!(%error, "Failed to read jemalloc.stats.allocated"))
{
gauge!("jemalloc.allocated").set(value as f64);
}
if let Ok(value) = stats::mapped::read()
.map_err(|error| error!(%error, "Failed to read jemalloc.stats.mapped"))
{
gauge!("jemalloc.mapped").set(value as f64);
}
if let Ok(value) = stats::metadata::read()
.map_err(|error| error!(%error, "Failed to read jemalloc.stats.metadata"))
{
gauge!("jemalloc.metadata").set(value as f64);
}
if let Ok(value) = stats::resident::read()
.map_err(|error| error!(%error, "Failed to read jemalloc.stats.resident"))
{
gauge!("jemalloc.resident").set(value as f64);
}
if let Ok(value) = stats::retained::read()
.map_err(|error| error!(%error, "Failed to read jemalloc.stats.retained"))
{
gauge!("jemalloc.retained").set(value as f64);
}
}
#[cfg(not(all(feature = "jemalloc", unix)))]
const fn collect_memory_stats() {}
#[cfg(target_os = "linux")]
fn collect_io_stats() {
use metrics::counter;
use tracing::error;
let Ok(process) = procfs::process::Process::myself()
.map_err(|error| error!(%error, "Failed to get currently running process"))
else {
return
};
let Ok(io) = process.io().map_err(
|error| error!(%error, "Failed to get IO stats for the currently running process"),
) else {
return
};
counter!("io.rchar").absolute(io.rchar);
counter!("io.wchar").absolute(io.wchar);
counter!("io.syscr").absolute(io.syscr);
counter!("io.syscw").absolute(io.syscw);
counter!("io.read_bytes").absolute(io.read_bytes);
counter!("io.write_bytes").absolute(io.write_bytes);
counter!("io.cancelled_write_bytes").absolute(io.cancelled_write_bytes);
}
#[cfg(not(target_os = "linux"))]
const fn collect_io_stats() {}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/metrics/src/server.rs | crates/node/metrics/src/server.rs | use crate::{
chain::ChainSpecInfo,
hooks::{Hook, Hooks},
recorder::install_prometheus_recorder,
version::VersionInfo,
};
use eyre::WrapErr;
use http::{header::CONTENT_TYPE, HeaderValue, Response};
use metrics::describe_gauge;
use metrics_process::Collector;
use reth_metrics::metrics::Unit;
use reth_tasks::TaskExecutor;
use std::{convert::Infallible, net::SocketAddr, sync::Arc};
/// Configuration for the [`MetricServer`]
#[derive(Debug)]
pub struct MetricServerConfig {
listen_addr: SocketAddr,
version_info: VersionInfo,
chain_spec_info: ChainSpecInfo,
task_executor: TaskExecutor,
hooks: Hooks,
}
impl MetricServerConfig {
/// Create a new [`MetricServerConfig`] with the given configuration
pub const fn new(
listen_addr: SocketAddr,
version_info: VersionInfo,
chain_spec_info: ChainSpecInfo,
task_executor: TaskExecutor,
hooks: Hooks,
) -> Self {
Self { listen_addr, hooks, task_executor, version_info, chain_spec_info }
}
}
/// [`MetricServer`] responsible for serving the metrics endpoint
#[derive(Debug)]
pub struct MetricServer {
config: MetricServerConfig,
}
impl MetricServer {
/// Create a new [`MetricServer`] with the given configuration
pub const fn new(config: MetricServerConfig) -> Self {
Self { config }
}
/// Spawns the metrics server
pub async fn serve(&self) -> eyre::Result<()> {
let MetricServerConfig { listen_addr, hooks, task_executor, version_info, chain_spec_info } =
&self.config;
let hooks = hooks.clone();
self.start_endpoint(
*listen_addr,
Arc::new(move || hooks.iter().for_each(|hook| hook())),
task_executor.clone(),
)
.await
.wrap_err_with(|| format!("Could not start Prometheus endpoint at {listen_addr}"))?;
// Describe metrics after recorder installation
describe_db_metrics();
describe_static_file_metrics();
Collector::default().describe();
describe_memory_stats();
describe_io_stats();
version_info.register_version_metrics();
chain_spec_info.register_chain_spec_metrics();
Ok(())
}
async fn start_endpoint<F: Hook + 'static>(
&self,
listen_addr: SocketAddr,
hook: Arc<F>,
task_executor: TaskExecutor,
) -> eyre::Result<()> {
let listener = tokio::net::TcpListener::bind(listen_addr)
.await
.wrap_err("Could not bind to address")?;
task_executor.spawn_with_graceful_shutdown_signal(|mut signal| {
Box::pin(async move {
loop {
let io = tokio::select! {
_ = &mut signal => break,
io = listener.accept() => {
match io {
Ok((stream, _remote_addr)) => stream,
Err(err) => {
tracing::error!(%err, "failed to accept connection");
continue;
}
}
}
};
let handle = install_prometheus_recorder();
let hook = hook.clone();
let service = tower::service_fn(move |_| {
(hook)();
let metrics = handle.handle().render();
let mut response = Response::new(metrics);
response
.headers_mut()
.insert(CONTENT_TYPE, HeaderValue::from_static("text/plain"));
async move { Ok::<_, Infallible>(response) }
});
let mut shutdown = signal.clone().ignore_guard();
tokio::task::spawn(async move {
let _ = jsonrpsee_server::serve_with_graceful_shutdown(
io,
service,
&mut shutdown,
)
.await
.inspect_err(|error| tracing::debug!(%error, "failed to serve request"));
});
}
})
});
Ok(())
}
}
fn describe_db_metrics() {
describe_gauge!("db.table_size", Unit::Bytes, "The size of a database table (in bytes)");
describe_gauge!("db.table_pages", "The number of database pages for a table");
describe_gauge!("db.table_entries", "The number of entries for a table");
describe_gauge!("db.freelist", "The number of pages on the freelist");
describe_gauge!("db.page_size", Unit::Bytes, "The size of a database page (in bytes)");
describe_gauge!(
"db.timed_out_not_aborted_transactions",
"Number of timed out transactions that were not aborted by the user yet"
);
}
fn describe_static_file_metrics() {
describe_gauge!("static_files.segment_size", Unit::Bytes, "The size of a static file segment");
describe_gauge!("static_files.segment_files", "The number of files for a static file segment");
describe_gauge!(
"static_files.segment_entries",
"The number of entries for a static file segment"
);
}
#[cfg(all(feature = "jemalloc", unix))]
fn describe_memory_stats() {
describe_gauge!(
"jemalloc.active",
Unit::Bytes,
"Total number of bytes in active pages allocated by the application"
);
describe_gauge!(
"jemalloc.allocated",
Unit::Bytes,
"Total number of bytes allocated by the application"
);
describe_gauge!(
"jemalloc.mapped",
Unit::Bytes,
"Total number of bytes in active extents mapped by the allocator"
);
describe_gauge!(
"jemalloc.metadata",
Unit::Bytes,
"Total number of bytes dedicated to jemalloc metadata"
);
describe_gauge!(
"jemalloc.resident",
Unit::Bytes,
"Total number of bytes in physically resident data pages mapped by the allocator"
);
describe_gauge!(
"jemalloc.retained",
Unit::Bytes,
"Total number of bytes in virtual memory mappings that were retained rather than \
being returned to the operating system via e.g. munmap(2)"
);
}
#[cfg(not(all(feature = "jemalloc", unix)))]
const fn describe_memory_stats() {}
#[cfg(target_os = "linux")]
fn describe_io_stats() {
use metrics::describe_counter;
describe_counter!("io.rchar", "Characters read");
describe_counter!("io.wchar", "Characters written");
describe_counter!("io.syscr", "Read syscalls");
describe_counter!("io.syscw", "Write syscalls");
describe_counter!("io.read_bytes", Unit::Bytes, "Bytes read");
describe_counter!("io.write_bytes", Unit::Bytes, "Bytes written");
describe_counter!("io.cancelled_write_bytes", Unit::Bytes, "Cancelled write bytes");
}
#[cfg(not(target_os = "linux"))]
const fn describe_io_stats() {}
#[cfg(test)]
mod tests {
use super::*;
use reqwest::Client;
use reth_tasks::TaskManager;
use socket2::{Domain, Socket, Type};
use std::net::{SocketAddr, TcpListener};
fn get_random_available_addr() -> SocketAddr {
let addr = &"127.0.0.1:0".parse::<SocketAddr>().unwrap().into();
let socket = Socket::new(Domain::IPV4, Type::STREAM, None).unwrap();
socket.set_reuse_address(true).unwrap();
socket.bind(addr).unwrap();
socket.listen(1).unwrap();
let listener = TcpListener::from(socket);
listener.local_addr().unwrap()
}
#[tokio::test]
async fn test_metrics_endpoint() {
let chain_spec_info = ChainSpecInfo { name: "test".to_string() };
let version_info = VersionInfo {
version: "test",
build_timestamp: "test",
cargo_features: "test",
git_sha: "test",
target_triple: "test",
build_profile: "test",
};
let tasks = TaskManager::current();
let executor = tasks.executor();
let hooks = Hooks::builder().build();
let listen_addr = get_random_available_addr();
let config =
MetricServerConfig::new(listen_addr, version_info, chain_spec_info, executor, hooks);
MetricServer::new(config).serve().await.unwrap();
// Send request to the metrics endpoint
let url = format!("http://{listen_addr}");
let response = Client::new().get(&url).send().await.unwrap();
assert!(response.status().is_success());
// Check the response body
let body = response.text().await.unwrap();
assert!(body.contains("reth_process_cpu_seconds_total"));
assert!(body.contains("reth_process_start_time_seconds"));
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/ethstats/src/lib.rs | crates/node/ethstats/src/lib.rs | //!
//! `EthStats` client support for Reth.
//!
//! This crate provides the necessary components to connect to, authenticate with, and report
//! node and network statistics to an `EthStats` server. It includes abstractions for `WebSocket`
//! connections, error handling, event/message types, and the main `EthStats` service logic.
//!
//! - `connection`: `WebSocket` connection management and utilities
//! - `error`: Error types for connection and `EthStats` operations
//! - `ethstats`: Main service logic for `EthStats` client
//! - `events`: Data structures for `EthStats` protocol messages
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
mod connection;
mod credentials;
mod error;
mod ethstats;
pub use ethstats::*;
mod events;
pub use events::*;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/ethstats/src/ethstats.rs | crates/node/ethstats/src/ethstats.rs | use crate::{
connection::ConnWrapper,
credentials::EthstatsCredentials,
error::EthStatsError,
events::{
AuthMsg, BlockMsg, BlockStats, HistoryMsg, LatencyMsg, NodeInfo, NodeStats, PendingMsg,
PendingStats, PingMsg, StatsMsg, TxStats, UncleStats,
},
};
use alloy_consensus::{BlockHeader, Sealable};
use alloy_primitives::U256;
use reth_chain_state::{CanonStateNotification, CanonStateSubscriptions};
use reth_network_api::{NetworkInfo, Peers};
use reth_primitives_traits::{Block, BlockBody};
use reth_storage_api::{BlockReader, BlockReaderIdExt, NodePrimitivesProvider};
use reth_transaction_pool::TransactionPool;
use chrono::Local;
use serde_json::Value;
use std::{
str::FromStr,
sync::Arc,
time::{Duration, Instant},
};
use tokio::{
sync::{mpsc, Mutex, RwLock},
time::{interval, sleep, timeout},
};
use tokio_stream::StreamExt;
use tokio_tungstenite::connect_async;
use tracing::{debug, info};
use url::Url;
/// Number of historical blocks to include in a history update sent to the `EthStats` server
const HISTORY_UPDATE_RANGE: u64 = 50;
/// Duration to wait before attempting to reconnect to the `EthStats` server
const RECONNECT_INTERVAL: Duration = Duration::from_secs(5);
/// Maximum time to wait for a ping response from the server
const PING_TIMEOUT: Duration = Duration::from_secs(5);
/// Interval between regular stats reports to the server
const REPORT_INTERVAL: Duration = Duration::from_secs(15);
/// Maximum time to wait for initial connection establishment
const CONNECT_TIMEOUT: Duration = Duration::from_secs(10);
/// Maximum time to wait for reading messages from the server
const READ_TIMEOUT: Duration = Duration::from_secs(30);
/// Main service for interacting with an `EthStats` server
///
/// This service handles all communication with the `EthStats` server including
/// authentication, stats reporting, block notifications, and connection management.
/// It maintains a persistent `WebSocket` connection and automatically reconnects
/// when the connection is lost.
#[derive(Debug)]
pub struct EthStatsService<Network, Provider, Pool> {
/// Authentication credentials for the `EthStats` server
credentials: EthstatsCredentials,
/// `WebSocket` connection wrapper, wrapped in `Arc<RwLock>` for shared access
conn: Arc<RwLock<Option<ConnWrapper>>>,
/// Timestamp of the last ping sent to the server
last_ping: Arc<Mutex<Option<Instant>>>,
/// Network interface for getting peer and sync information
network: Network,
/// Blockchain provider for reading block data and state
provider: Provider,
/// Transaction pool for getting pending transaction statistics
pool: Pool,
}
impl<Network, Provider, Pool> EthStatsService<Network, Provider, Pool>
where
Network: NetworkInfo + Peers,
Provider: BlockReaderIdExt + CanonStateSubscriptions,
Pool: TransactionPool,
{
/// Create a new `EthStats` service and establish initial connection
///
/// # Arguments
/// * `url` - Connection string in format "`node_id:secret@host`"
/// * `network` - Network interface implementation
/// * `provider` - Blockchain provider implementation
/// * `pool` - Transaction pool implementation
pub async fn new(
url: &str,
network: Network,
provider: Provider,
pool: Pool,
) -> Result<Self, EthStatsError> {
let credentials = EthstatsCredentials::from_str(url)?;
let service = Self {
credentials,
conn: Arc::new(RwLock::new(None)),
last_ping: Arc::new(Mutex::new(None)),
network,
provider,
pool,
};
service.connect().await?;
Ok(service)
}
/// Establish `WebSocket` connection to the `EthStats` server
///
/// Attempts to connect to the server using the credentials and handles
/// connection timeouts and errors.
async fn connect(&self) -> Result<(), EthStatsError> {
debug!(
target: "ethstats",
"Attempting to connect to EthStats server at {}", self.credentials.host
);
let full_url = format!("ws://{}/api", self.credentials.host);
let url = Url::parse(&full_url)
.map_err(|e| EthStatsError::InvalidUrl(format!("Invalid URL: {full_url} - {e}")))?;
match timeout(CONNECT_TIMEOUT, connect_async(url.to_string())).await {
Ok(Ok((ws_stream, _))) => {
debug!(
target: "ethstats",
"Successfully connected to EthStats server at {}", self.credentials.host
);
let conn: ConnWrapper = ConnWrapper::new(ws_stream);
*self.conn.write().await = Some(conn.clone());
self.login().await?;
Ok(())
}
Ok(Err(e)) => Err(EthStatsError::InvalidUrl(e.to_string())),
Err(_) => {
debug!(target: "ethstats", "Connection to EthStats server timed out");
Err(EthStatsError::Timeout)
}
}
}
/// Authenticate with the `EthStats` server
///
/// Sends authentication credentials and node information to the server
/// and waits for a successful acknowledgment.
async fn login(&self) -> Result<(), EthStatsError> {
debug!(
target: "ethstats",
"Attempting to login to EthStats server as node_id {}", self.credentials.node_id
);
let conn = self.conn.read().await;
let conn = conn.as_ref().ok_or(EthStatsError::NotConnected)?;
let network_status = self
.network
.network_status()
.await
.map_err(|e| EthStatsError::AuthError(e.to_string()))?;
let id = &self.credentials.node_id;
let secret = &self.credentials.secret;
let protocol = network_status
.capabilities
.iter()
.map(|cap| format!("{}/{}", cap.name, cap.version))
.collect::<Vec<_>>()
.join(", ");
let port = self.network.local_addr().port() as u64;
let auth = AuthMsg {
id: id.clone(),
secret: secret.clone(),
info: NodeInfo {
name: id.clone(),
node: network_status.client_version.clone(),
port,
network: self.network.chain_id().to_string(),
protocol,
api: "No".to_string(),
os: std::env::consts::OS.into(),
os_ver: std::env::consts::ARCH.into(),
client: "0.1.1".to_string(),
history: true,
},
};
let message = auth.generate_login_message();
conn.write_json(&message).await?;
let response =
timeout(READ_TIMEOUT, conn.read_json()).await.map_err(|_| EthStatsError::Timeout)??;
if let Some(ack) = response.get("emit") {
if ack.get(0) == Some(&Value::String("ready".to_string())) {
info!(
target: "ethstats",
"Login successful to EthStats server as node_id {}", self.credentials.node_id
);
return Ok(());
}
}
debug!(target: "ethstats", "Login failed: Unauthorized or unexpected login response");
Err(EthStatsError::AuthError("Unauthorized or unexpected login response".into()))
}
/// Report current node statistics to the `EthStats` server
///
/// Sends information about the node's current state including sync status,
/// peer count, and uptime.
async fn report_stats(&self) -> Result<(), EthStatsError> {
let conn = self.conn.read().await;
let conn = conn.as_ref().ok_or(EthStatsError::NotConnected)?;
let stats_msg = StatsMsg {
id: self.credentials.node_id.clone(),
stats: NodeStats {
active: true,
syncing: self.network.is_syncing(),
peers: self.network.num_connected_peers() as u64,
gas_price: 0, // TODO
uptime: 100,
},
};
let message = stats_msg.generate_stats_message();
conn.write_json(&message).await?;
Ok(())
}
/// Send a ping message to the `EthStats` server
///
/// Records the ping time and starts a timeout task to detect if the server
/// doesn't respond within the expected timeframe.
async fn send_ping(&self) -> Result<(), EthStatsError> {
let conn = self.conn.read().await;
let conn = conn.as_ref().ok_or(EthStatsError::NotConnected)?;
let ping_time = Instant::now();
*self.last_ping.lock().await = Some(ping_time);
let client_time = Local::now().format("%Y-%m-%d %H:%M:%S%.f %:z %Z").to_string();
let ping_msg = PingMsg { id: self.credentials.node_id.clone(), client_time };
let message = ping_msg.generate_ping_message();
conn.write_json(&message).await?;
// Start ping timeout
let active_ping = self.last_ping.clone();
let conn_ref = self.conn.clone();
tokio::spawn(async move {
sleep(PING_TIMEOUT).await;
let mut active = active_ping.lock().await;
if active.is_some() {
debug!(target: "ethstats", "Ping timeout");
*active = None;
// Clear connection to trigger reconnect
if let Some(conn) = conn_ref.write().await.take() {
let _ = conn.close().await;
}
}
});
Ok(())
}
/// Report latency measurement to the `EthStats` server
///
/// Calculates the round-trip time from the last ping and sends it to
/// the server. This is called when a pong response is received.
async fn report_latency(&self) -> Result<(), EthStatsError> {
let conn = self.conn.read().await;
let conn = conn.as_ref().ok_or(EthStatsError::NotConnected)?;
let mut active = self.last_ping.lock().await;
if let Some(start) = active.take() {
let latency = start.elapsed().as_millis() as u64 / 2;
debug!(target: "ethstats", "Reporting latency: {}ms", latency);
let latency_msg = LatencyMsg { id: self.credentials.node_id.clone(), latency };
let message = latency_msg.generate_latency_message();
conn.write_json(&message).await?
}
Ok(())
}
/// Report pending transaction count to the `EthStats` server
///
/// Gets the current number of pending transactions from the pool and
/// sends this information to the server.
async fn report_pending(&self) -> Result<(), EthStatsError> {
let conn = self.conn.read().await;
let conn = conn.as_ref().ok_or(EthStatsError::NotConnected)?;
let pending = self.pool.pool_size().pending as u64;
debug!(target: "ethstats", "Reporting pending txs: {}", pending);
let pending_msg =
PendingMsg { id: self.credentials.node_id.clone(), stats: PendingStats { pending } };
let message = pending_msg.generate_pending_message();
conn.write_json(&message).await?;
Ok(())
}
/// Report block information to the `EthStats` server
///
/// Fetches block data either from a canonical state notification or
/// the current best block, converts it to stats format, and sends
/// it to the server.
///
/// # Arguments
/// * `head` - Optional canonical state notification containing new block info
async fn report_block(
&self,
head: Option<CanonStateNotification<<Provider as NodePrimitivesProvider>::Primitives>>,
) -> Result<(), EthStatsError> {
let conn = self.conn.read().await;
let conn = conn.as_ref().ok_or(EthStatsError::NotConnected)?;
let block_number = if let Some(head) = head {
head.tip().header().number()
} else {
self.provider
.best_block_number()
.map_err(|e| EthStatsError::DataFetchError(e.to_string()))?
};
match self.provider.block_by_id(block_number.into()) {
Ok(Some(block)) => {
let block_msg = BlockMsg {
id: self.credentials.node_id.clone(),
block: self.block_to_stats(&block)?,
};
debug!(target: "ethstats", "Reporting block: {}", block_number);
let message = block_msg.generate_block_message();
conn.write_json(&message).await?;
}
Ok(None) => {
// Block not found, stop fetching
debug!(target: "ethstats", "Block {} not found", block_number);
return Err(EthStatsError::BlockNotFound(block_number));
}
Err(e) => {
debug!(target: "ethstats", "Error fetching block {}: {}", block_number, e);
return Err(EthStatsError::DataFetchError(e.to_string()));
}
};
Ok(())
}
/// Convert a block to `EthStats` block statistics format
///
/// Extracts relevant information from a block and formats it according
/// to the `EthStats` protocol specification.
///
/// # Arguments
/// * `block` - The block to convert
fn block_to_stats(
&self,
block: &<Provider as BlockReader>::Block,
) -> Result<BlockStats, EthStatsError> {
let body = block.body();
let header = block.header();
let txs = body.transaction_hashes_iter().copied().map(|hash| TxStats { hash }).collect();
Ok(BlockStats {
number: U256::from(header.number()),
hash: header.hash_slow(),
parent_hash: header.parent_hash(),
timestamp: U256::from(header.timestamp()),
miner: header.beneficiary(),
gas_used: header.gas_used(),
gas_limit: header.gas_limit(),
diff: header.difficulty().to_string(),
total_diff: "0".into(),
txs,
tx_root: header.transactions_root(),
root: header.state_root(),
uncles: UncleStats(vec![]),
})
}
/// Report historical block data to the `EthStats` server
///
/// Fetches multiple blocks by their numbers and sends their statistics
/// to the server. This is typically called in response to a history
/// request from the server.
///
/// # Arguments
/// * `list` - Vector of block numbers to fetch and report
async fn report_history(&self, list: Option<&Vec<u64>>) -> Result<(), EthStatsError> {
let conn = self.conn.read().await;
let conn = conn.as_ref().ok_or(EthStatsError::NotConnected)?;
let indexes = if let Some(list) = list {
list
} else {
let best_block_number = self
.provider
.best_block_number()
.map_err(|e| EthStatsError::DataFetchError(e.to_string()))?;
let start = best_block_number.saturating_sub(HISTORY_UPDATE_RANGE);
&(start..=best_block_number).collect()
};
let mut blocks = Vec::with_capacity(indexes.len());
for &block_number in indexes {
match self.provider.block_by_id(block_number.into()) {
Ok(Some(block)) => {
blocks.push(block);
}
Ok(None) => {
// Block not found, stop fetching
debug!(target: "ethstats", "Block {} not found", block_number);
break;
}
Err(e) => {
debug!(target: "ethstats", "Error fetching block {}: {}", block_number, e);
break;
}
}
}
let history: Vec<BlockStats> =
blocks.iter().map(|block| self.block_to_stats(block)).collect::<Result<_, _>>()?;
if history.is_empty() {
debug!(target: "ethstats", "No history to send to stats server");
} else {
debug!(
target: "ethstats",
"Sending historical blocks to ethstats, first: {}, last: {}",
history.first().unwrap().number,
history.last().unwrap().number
);
}
let history_msg = HistoryMsg { id: self.credentials.node_id.clone(), history };
let message = history_msg.generate_history_message();
conn.write_json(&message).await?;
Ok(())
}
/// Send a complete status report to the `EthStats` server
///
/// Performs all regular reporting tasks: ping, block info, pending
/// transactions, and general statistics.
async fn report(&self) -> Result<(), EthStatsError> {
self.send_ping().await?;
self.report_block(None).await?;
self.report_pending().await?;
self.report_stats().await?;
Ok(())
}
/// Handle incoming messages from the `EthStats` server
///
/// # Expected Message Variants
///
/// This function expects messages in the following format:
///
/// ```json
/// { "emit": [<command: String>, <payload: Object>] }
/// ```
///
/// ## Supported Commands:
///
/// - `"node-pong"`: Indicates a pong response to a previously sent ping. The payload is
/// ignored. Triggers a latency report to the server.
/// - Example: ```json { "emit": [ "node-pong", { "clientTime": "2025-07-10 12:00:00.123
/// +00:00 UTC", "serverTime": "2025-07-10 12:00:01.456 +00:00 UTC" } ] } ```
///
/// - `"history"`: Requests historical block data. The payload may contain a `list` field with
/// block numbers to fetch. If `list` is not present, the default range is used.
/// - Example with list: `{ "emit": ["history", {"list": [1, 2, 3], "min": 1, "max": 3}] }`
/// - Example without list: `{ "emit": ["history", {}] }`
///
/// ## Other Commands:
///
/// Any other command is logged as unhandled and ignored.
async fn handle_message(&self, msg: Value) -> Result<(), EthStatsError> {
let emit = match msg.get("emit") {
Some(emit) => emit,
None => {
debug!(target: "ethstats", "Stats server sent non-broadcast, msg {}", msg);
return Err(EthStatsError::InvalidRequest);
}
};
let command = match emit.get(0) {
Some(Value::String(command)) => command.as_str(),
_ => {
debug!(target: "ethstats", "Invalid stats server message type, msg {}", msg);
return Err(EthStatsError::InvalidRequest);
}
};
match command {
"node-pong" => {
self.report_latency().await?;
}
"history" => {
let block_numbers = emit
.get(1)
.and_then(|v| v.as_object())
.and_then(|obj| obj.get("list"))
.and_then(|v| v.as_array());
if block_numbers.is_none() {
self.report_history(None).await?;
return Ok(());
}
let block_numbers = block_numbers
.unwrap()
.iter()
.map(|val| {
val.as_u64().ok_or_else(|| {
debug!(
target: "ethstats",
"Invalid stats history block number, msg {}", msg
);
EthStatsError::InvalidRequest
})
})
.collect::<Result<_, _>>()?;
self.report_history(Some(&block_numbers)).await?;
}
other => debug!(target: "ethstats", "Unhandled command: {}", other),
}
Ok(())
}
/// Main service loop that handles all `EthStats` communication
///
/// This method runs the main event loop that:
/// - Maintains the `WebSocket` connection
/// - Handles incoming messages from the server
/// - Reports statistics at regular intervals
/// - Processes new block notifications
/// - Automatically reconnects when the connection is lost
///
/// The service runs until explicitly shut down or an unrecoverable
/// error occurs.
pub async fn run(self) {
// Create channels for internal communication
let (shutdown_tx, mut shutdown_rx) = mpsc::channel(1);
let (message_tx, mut message_rx) = mpsc::channel(32);
let (head_tx, mut head_rx) = mpsc::channel(10);
// Start the read loop in a separate task
let read_handle = {
let conn = self.conn.clone();
let message_tx = message_tx.clone();
let shutdown_tx = shutdown_tx.clone();
tokio::spawn(async move {
loop {
let conn = conn.read().await;
if let Some(conn) = conn.as_ref() {
match conn.read_json().await {
Ok(msg) => {
if message_tx.send(msg).await.is_err() {
break;
}
}
Err(e) => {
debug!(target: "ethstats", "Read error: {}", e);
break;
}
}
} else {
sleep(RECONNECT_INTERVAL).await;
}
}
let _ = shutdown_tx.send(()).await;
})
};
let canonical_stream_handle = {
let mut canonical_stream = self.provider.canonical_state_stream();
let head_tx = head_tx.clone();
let shutdown_tx = shutdown_tx.clone();
tokio::spawn(async move {
loop {
let head = canonical_stream.next().await;
if let Some(head) = head {
if head_tx.send(head).await.is_err() {
break;
}
}
}
let _ = shutdown_tx.send(()).await;
})
};
let mut pending_tx_receiver = self.pool.pending_transactions_listener();
// Set up intervals
let mut report_interval = interval(REPORT_INTERVAL);
let mut reconnect_interval = interval(RECONNECT_INTERVAL);
// Main event loop using select!
loop {
tokio::select! {
// Handle shutdown signal
_ = shutdown_rx.recv() => {
info!(target: "ethstats", "Shutting down ethstats service");
break;
}
// Handle messages from the read loop
Some(msg) = message_rx.recv() => {
if let Err(e) = self.handle_message(msg).await {
debug!(target: "ethstats", "Error handling message: {}", e);
self.disconnect().await;
}
}
// Handle new block
Some(head) = head_rx.recv() => {
if let Err(e) = self.report_block(Some(head)).await {
debug!(target: "ethstats", "Failed to report block: {}", e);
self.disconnect().await;
}
if let Err(e) = self.report_pending().await {
debug!(target: "ethstats", "Failed to report pending: {}", e);
self.disconnect().await;
}
}
// Handle new pending tx
_= pending_tx_receiver.recv() => {
if let Err(e) = self.report_pending().await {
debug!(target: "ethstats", "Failed to report pending: {}", e);
self.disconnect().await;
}
}
// Handle stats reporting
_ = report_interval.tick() => {
if let Err(e) = self.report().await {
debug!(target: "ethstats", "Failed to report: {}", e);
self.disconnect().await;
}
}
// Handle reconnection
_ = reconnect_interval.tick(), if self.conn.read().await.is_none() => {
match self.connect().await {
Ok(_) => info!(target: "ethstats", "Reconnected successfully"),
Err(e) => debug!(target: "ethstats", "Reconnect failed: {}", e),
}
}
}
}
// Cleanup
self.disconnect().await;
// Cancel background tasks
read_handle.abort();
canonical_stream_handle.abort();
}
/// Gracefully close the `WebSocket` connection
///
/// Attempts to close the connection cleanly and logs any errors
/// that occur during the process.
async fn disconnect(&self) {
if let Some(conn) = self.conn.write().await.take() {
if let Err(e) = conn.close().await {
debug!(target: "ethstats", "Error closing connection: {}", e);
}
}
}
/// Test helper to check connection status
#[cfg(test)]
pub async fn is_connected(&self) -> bool {
self.conn.read().await.is_some()
}
}
#[cfg(test)]
mod tests {
use super::*;
use futures_util::{SinkExt, StreamExt};
use reth_network_api::noop::NoopNetwork;
use reth_storage_api::noop::NoopProvider;
use reth_transaction_pool::noop::NoopTransactionPool;
use serde_json::json;
use tokio::net::TcpListener;
use tokio_tungstenite::tungstenite::protocol::{frame::Utf8Bytes, Message};
const TEST_HOST: &str = "127.0.0.1";
const TEST_PORT: u16 = 0; // Let OS choose port
async fn setup_mock_server() -> (String, tokio::task::JoinHandle<()>) {
let listener = TcpListener::bind((TEST_HOST, TEST_PORT)).await.unwrap();
let addr = listener.local_addr().unwrap();
let handle = tokio::spawn(async move {
let (stream, _) = listener.accept().await.unwrap();
let mut ws_stream = tokio_tungstenite::accept_async(stream).await.unwrap();
// Handle login
if let Some(Ok(Message::Text(text))) = ws_stream.next().await {
let value: serde_json::Value = serde_json::from_str(&text).unwrap();
if value["emit"][0] == "hello" {
let response = json!({
"emit": ["ready", []]
});
ws_stream
.send(Message::Text(Utf8Bytes::from(response.to_string())))
.await
.unwrap();
}
}
// Handle ping
while let Some(Ok(msg)) = ws_stream.next().await {
if let Message::Text(text) = msg {
if text.contains("node-ping") {
let pong = json!({
"emit": ["node-pong", {"id": "test-node"}]
});
ws_stream
.send(Message::Text(Utf8Bytes::from(pong.to_string())))
.await
.unwrap();
}
}
}
});
(addr.to_string(), handle)
}
#[tokio::test]
async fn test_connection_and_login() {
let (server_url, server_handle) = setup_mock_server().await;
let ethstats_url = format!("test-node:test-secret@{server_url}");
let network = NoopNetwork::default();
let provider = NoopProvider::default();
let pool = NoopTransactionPool::default();
let service = EthStatsService::new(ðstats_url, network, provider, pool)
.await
.expect("Service should connect");
// Verify connection was established
assert!(service.is_connected().await, "Service should be connected");
// Clean up server
server_handle.abort();
}
#[tokio::test]
async fn test_history_command_handling() {
let (server_url, server_handle) = setup_mock_server().await;
let ethstats_url = format!("test-node:test-secret@{server_url}");
let network = NoopNetwork::default();
let provider = NoopProvider::default();
let pool = NoopTransactionPool::default();
let service = EthStatsService::new(ðstats_url, network, provider, pool)
.await
.expect("Service should connect");
// Simulate receiving a history command
let history_cmd = json!({
"emit": ["history", {"list": [1, 2, 3]}]
});
service.handle_message(history_cmd).await.expect("History command should be handled");
// Clean up server
server_handle.abort();
}
#[tokio::test]
async fn test_invalid_url_handling() {
let network = NoopNetwork::default();
let provider = NoopProvider::default();
let pool = NoopTransactionPool::default();
// Test missing secret
let result = EthStatsService::new(
"test-node@localhost",
network.clone(),
provider.clone(),
pool.clone(),
)
.await;
assert!(
matches!(result, Err(EthStatsError::InvalidUrl(_))),
"Should detect invalid URL format"
);
// Test invalid URL format
let result = EthStatsService::new("invalid-url", network, provider, pool).await;
assert!(
matches!(result, Err(EthStatsError::InvalidUrl(_))),
"Should detect invalid URL format"
);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/ethstats/src/connection.rs | crates/node/ethstats/src/connection.rs | /// Abstractions for managing `WebSocket` connections in the ethstats service.
use crate::error::ConnectionError;
use futures_util::{
stream::{SplitSink, SplitStream},
SinkExt, StreamExt,
};
use serde_json::Value;
use std::sync::Arc;
use tokio::{net::TcpStream, sync::Mutex};
use tokio_tungstenite::{
tungstenite::protocol::{frame::Utf8Bytes, Message},
MaybeTlsStream, WebSocketStream,
};
/// Type alias for a `WebSocket` stream that may be TLS or plain TCP
pub(crate) type WsStream = WebSocketStream<MaybeTlsStream<TcpStream>>;
/// Wrapper for a thread-safe, asynchronously accessible `WebSocket` connection
#[derive(Debug, Clone)]
pub(crate) struct ConnWrapper {
/// Write-only part of the `WebSocket` stream
writer: Arc<Mutex<SplitSink<WsStream, Message>>>,
/// Read-only part of the `WebSocket` stream
reader: Arc<Mutex<SplitStream<WsStream>>>,
}
impl ConnWrapper {
/// Create a new connection wrapper from a `WebSocket` stream
pub(crate) fn new(stream: WsStream) -> Self {
let (writer, reader) = stream.split();
Self { writer: Arc::new(Mutex::new(writer)), reader: Arc::new(Mutex::new(reader)) }
}
/// Write a JSON string as a text message to the `WebSocket`
pub(crate) async fn write_json(&self, value: &str) -> Result<(), ConnectionError> {
let mut writer = self.writer.lock().await;
writer.send(Message::Text(Utf8Bytes::from(value))).await?;
Ok(())
}
/// Read the next JSON text message from the `WebSocket`
///
/// Waits for the next text message, parses it as JSON, and returns the value.
/// Ignores non-text messages. Returns an error if the connection is closed or if parsing fails.
pub(crate) async fn read_json(&self) -> Result<Value, ConnectionError> {
let mut reader = self.reader.lock().await;
while let Some(msg) = reader.next().await {
match msg? {
Message::Text(text) => return Ok(serde_json::from_str(&text)?),
Message::Close(_) => return Err(ConnectionError::ConnectionClosed),
_ => {} // Ignore non-text messages
}
}
Err(ConnectionError::ConnectionClosed)
}
/// Close the `WebSocket` connection gracefully
pub(crate) async fn close(&self) -> Result<(), ConnectionError> {
let mut writer = self.writer.lock().await;
writer.close().await?;
Ok(())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/ethstats/src/error.rs | crates/node/ethstats/src/error.rs | use thiserror::Error;
/// Errors that can occur during `WebSocket` connection handling
#[derive(Debug, Error)]
pub enum ConnectionError {
/// The `WebSocket` connection was closed unexpectedly
#[error("Connection closed")]
ConnectionClosed,
/// Error occurred during JSON serialization/deserialization
#[error("Serialization error: {0}")]
Serialization(#[from] serde_json::Error),
/// Error occurred during `WebSocket` communication
#[error("WebSocket error: {0}")]
WebSocket(#[from] tokio_tungstenite::tungstenite::Error),
}
/// Main error type for the `EthStats` client
///
/// This enum covers all possible errors that can occur when interacting
/// with an `EthStats` server, including connection issues, authentication
/// problems, data fetching errors, and various I/O operations.
#[derive(Debug, Error)]
pub enum EthStatsError {
/// The provided URL is invalid or malformed
#[error("Invalid URL: {0}")]
InvalidUrl(String),
/// Error occurred during connection establishment or maintenance
#[error("Connection error: {0}")]
ConnectionError(#[from] ConnectionError),
/// Authentication failed with the `EthStats` server
#[error("Authentication error: {0}")]
AuthError(String),
/// Attempted to perform an operation while not connected to the server
#[error("Not connected to server")]
NotConnected,
/// Error occurred during JSON serialization or deserialization
#[error("Serialization error: {0}")]
Serialization(#[from] serde_json::Error),
/// Error occurred during `WebSocket` communication
#[error("WebSocket error: {0}")]
WebSocket(#[from] tokio_tungstenite::tungstenite::Error),
/// Operation timed out
#[error("Timeout error")]
Timeout,
/// Error occurred while parsing a URL
#[error("URL parsing error: {0}")]
Url(#[from] url::ParseError),
/// Requested block was not found in the blockchain
#[error("Block not found: {0}")]
BlockNotFound(u64),
/// Error occurred while fetching data from the blockchain or server
#[error("Data fetch error: {0}")]
DataFetchError(String),
/// The request sent to the server was invalid or malformed
#[error("Inivalid request")]
InvalidRequest,
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/ethstats/src/credentials.rs | crates/node/ethstats/src/credentials.rs | use crate::error::EthStatsError;
use std::str::FromStr;
/// Credentials for connecting to an `EthStats` server
///
/// Contains the node identifier, authentication secret, and server host
/// information needed to establish a connection with the `EthStats` service.
#[derive(Debug, Clone)]
pub(crate) struct EthstatsCredentials {
/// Unique identifier for this node in the `EthStats` network
pub node_id: String,
/// Authentication secret for the `EthStats` server
pub secret: String,
/// Host address of the `EthStats` server
pub host: String,
}
impl FromStr for EthstatsCredentials {
type Err = EthStatsError;
/// Parse credentials from a string in the format "`node_id:secret@host`"
///
/// # Arguments
/// * `s` - String containing credentials in the format "`node_id:secret@host`"
///
/// # Returns
/// * `Ok(EthstatsCredentials)` - Successfully parsed credentials
/// * `Err(EthStatsError::InvalidUrl)` - Invalid format or missing separators
fn from_str(s: &str) -> Result<Self, Self::Err> {
let parts: Vec<&str> = s.split('@').collect();
if parts.len() != 2 {
return Err(EthStatsError::InvalidUrl("Missing '@' separator".to_string()));
}
let creds = parts[0];
let host = parts[1].to_string();
let creds_parts: Vec<&str> = creds.split(':').collect();
if creds_parts.len() != 2 {
return Err(EthStatsError::InvalidUrl(
"Missing ':' separator in credentials".to_string(),
));
}
let node_id = creds_parts[0].to_string();
let secret = creds_parts[1].to_string();
Ok(Self { node_id, secret, host })
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/ethstats/src/events.rs | crates/node/ethstats/src/events.rs | //! Types for ethstats event reporting.
//! These structures define the data format used to report blockchain events to ethstats servers.
use alloy_consensus::Header;
use alloy_primitives::{Address, B256, U256};
use serde::{Deserialize, Serialize};
/// Collection of meta information about a node that is displayed on the monitoring page.
/// This information is used to identify and display node details in the ethstats monitoring
/// interface.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NodeInfo {
/// The display name of the node in the monitoring interface
pub name: String,
/// The node's unique identifier
pub node: String,
/// The port number the node is listening on for P2P connections
pub port: u64,
/// The network ID the node is connected to (e.g. "1" for mainnet)
#[serde(rename = "net")]
pub network: String,
/// Comma-separated list of supported protocols and their versions
pub protocol: String,
/// API availability indicator ("Yes" or "No")
pub api: String,
/// Operating system the node is running on
pub os: String,
/// Operating system version/architecture
#[serde(rename = "os_v")]
pub os_ver: String,
/// Client software version
pub client: String,
/// Whether the node can provide historical block data
#[serde(rename = "canUpdateHistory")]
pub history: bool,
}
/// Authentication message used to login to the ethstats monitoring server.
/// Contains node identification and authentication information.
#[derive(Debug, Serialize, Deserialize)]
pub struct AuthMsg {
/// The node's unique identifier
pub id: String,
/// Detailed information about the node
pub info: NodeInfo,
/// Secret password for authentication with the monitoring server
pub secret: String,
}
impl AuthMsg {
/// Generate a login message for the ethstats monitoring server.
pub fn generate_login_message(&self) -> String {
serde_json::json!({
"emit": ["hello", self]
})
.to_string()
}
}
/// Simplified transaction info, containing only the hash.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TxStats {
/// Transaction hash
pub hash: B256,
}
/// Wrapper for uncle block headers.
/// This ensures empty lists serialize as `[]` instead of `null`.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(transparent)]
pub struct UncleStats(pub Vec<Header>);
/// Information to report about individual blocks.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BlockStats {
/// Block number (height in the chain).
pub number: U256,
/// Hash of this block.
pub hash: B256,
/// Hash of the parent block.
#[serde(rename = "parentHash")]
pub parent_hash: B256,
/// Timestamp of the block (Unix time).
pub timestamp: U256,
/// Address of the miner who produced this block.
pub miner: Address,
/// Total gas used by all transactions in the block.
#[serde(rename = "gasUsed")]
pub gas_used: u64,
/// Maximum gas allowed for this block.
#[serde(rename = "gasLimit")]
pub gas_limit: u64,
/// Difficulty for mining this block (as a decimal string).
#[serde(rename = "difficulty")]
pub diff: String,
/// Cumulative difficulty up to this block (as a decimal string).
#[serde(rename = "totalDifficulty")]
pub total_diff: String,
/// Simplified list of transactions in the block.
#[serde(rename = "transactions")]
pub txs: Vec<TxStats>,
/// Root hash of all transactions (Merkle root).
#[serde(rename = "transactionsRoot")]
pub tx_root: B256,
/// State root after applying this block.
#[serde(rename = "stateRoot")]
pub root: B256,
/// List of uncle block headers.
pub uncles: UncleStats,
}
/// Message containing a block to be reported to the ethstats monitoring server.
#[derive(Debug, Serialize, Deserialize)]
pub struct BlockMsg {
/// The node's unique identifier
pub id: String,
/// The block to report
pub block: BlockStats,
}
impl BlockMsg {
/// Generate a block message for the ethstats monitoring server.
pub fn generate_block_message(&self) -> String {
serde_json::json!({
"emit": ["block", self]
})
.to_string()
}
}
/// Message containing historical block data to be reported to the ethstats monitoring server.
#[derive(Debug, Serialize, Deserialize)]
pub struct HistoryMsg {
/// The node's unique identifier
pub id: String,
/// The historical block data to report
pub history: Vec<BlockStats>,
}
impl HistoryMsg {
/// Generate a history message for the ethstats monitoring server.
pub fn generate_history_message(&self) -> String {
serde_json::json!({
"emit": ["history", self]
})
.to_string()
}
}
/// Message containing pending transaction statistics to be reported to the ethstats monitoring
/// server.
#[derive(Debug, Serialize, Deserialize)]
pub struct PendingStats {
/// Number of pending transactions
pub pending: u64,
}
/// Message containing pending transaction statistics to be reported to the ethstats monitoring
/// server.
#[derive(Debug, Serialize, Deserialize)]
pub struct PendingMsg {
/// The node's unique identifier
pub id: String,
/// The pending transaction statistics to report
pub stats: PendingStats,
}
impl PendingMsg {
/// Generate a pending message for the ethstats monitoring server.
pub fn generate_pending_message(&self) -> String {
serde_json::json!({
"emit": ["pending", self]
})
.to_string()
}
}
/// Information reported about the local node.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NodeStats {
/// Whether the node is active
pub active: bool,
/// Whether the node is currently syncing
pub syncing: bool,
/// Number of connected peers
pub peers: u64,
/// Current gas price in wei
#[serde(rename = "gasPrice")]
pub gas_price: u64,
/// Node uptime percentage
pub uptime: u64,
}
/// Message containing node statistics to be reported to the ethstats monitoring server.
#[derive(Debug, Serialize, Deserialize)]
pub struct StatsMsg {
/// The node's unique identifier
pub id: String,
/// The stats to report
pub stats: NodeStats,
}
impl StatsMsg {
/// Generate a stats message for the ethstats monitoring server.
pub fn generate_stats_message(&self) -> String {
serde_json::json!({
"emit": ["stats", self]
})
.to_string()
}
}
/// Latency report message used to report network latency to the ethstats monitoring server.
#[derive(Serialize, Deserialize, Debug)]
pub struct LatencyMsg {
/// The node's unique identifier
pub id: String,
/// The latency to report in milliseconds
pub latency: u64,
}
impl LatencyMsg {
/// Generate a latency message for the ethstats monitoring server.
pub fn generate_latency_message(&self) -> String {
serde_json::json!({
"emit": ["latency", self]
})
.to_string()
}
}
/// Ping message sent to the ethstats monitoring server to initiate latency measurement.
#[derive(Serialize, Deserialize, Debug)]
pub struct PingMsg {
/// The node's unique identifier
pub id: String,
/// Client timestamp when the ping was sent
#[serde(rename = "clientTime")]
pub client_time: String,
}
impl PingMsg {
/// Generate a ping message for the ethstats monitoring server.
pub fn generate_ping_message(&self) -> String {
serde_json::json!({
"emit": ["node-ping", self]
})
.to_string()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/api/src/node.rs | crates/node/api/src/node.rs | //! Traits for configuring a node.
use crate::PayloadTypes;
use alloy_rpc_types_engine::JwtSecret;
use reth_basic_payload_builder::PayloadBuilder;
use reth_consensus::{ConsensusError, FullConsensus};
use reth_db_api::{database_metrics::DatabaseMetrics, Database};
use reth_engine_primitives::{ConsensusEngineEvent, ConsensusEngineHandle};
use reth_evm::ConfigureEvm;
use reth_network_api::FullNetwork;
use reth_node_core::node_config::NodeConfig;
use reth_node_types::{NodeTypes, NodeTypesWithDBAdapter, TxTy};
use reth_payload_builder::PayloadBuilderHandle;
use reth_provider::FullProvider;
use reth_tasks::TaskExecutor;
use reth_tokio_util::EventSender;
use reth_transaction_pool::{PoolTransaction, TransactionPool};
use std::{fmt::Debug, future::Future, marker::PhantomData};
/// A helper trait that is downstream of the [`NodeTypes`] trait and adds stateful
/// components to the node.
///
/// Its types are configured by node internally and are not intended to be user configurable.
pub trait FullNodeTypes: Clone + Debug + Send + Sync + Unpin + 'static {
/// Node's types with the database.
type Types: NodeTypes;
/// Underlying database type used by the node to store and retrieve data.
type DB: Database + DatabaseMetrics + Clone + Unpin + 'static;
/// The provider type used to interact with the node.
type Provider: FullProvider<NodeTypesWithDBAdapter<Self::Types, Self::DB>>;
}
/// An adapter type that adds the builtin provider type to the user configured node types.
#[derive(Clone, Debug)]
pub struct FullNodeTypesAdapter<Types, DB, Provider>(PhantomData<(Types, DB, Provider)>);
impl<Types, DB, Provider> FullNodeTypes for FullNodeTypesAdapter<Types, DB, Provider>
where
Types: NodeTypes,
DB: Database + DatabaseMetrics + Clone + Unpin + 'static,
Provider: FullProvider<NodeTypesWithDBAdapter<Types, DB>>,
{
type Types = Types;
type DB = DB;
type Provider = Provider;
}
/// Helper trait to bound [`PayloadBuilder`] to the node's engine types.
pub trait PayloadBuilderFor<N: NodeTypes>:
PayloadBuilder<
Attributes = <N::Payload as PayloadTypes>::PayloadBuilderAttributes,
BuiltPayload = <N::Payload as PayloadTypes>::BuiltPayload,
>
{
}
impl<T, N: NodeTypes> PayloadBuilderFor<N> for T where
T: PayloadBuilder<
Attributes = <N::Payload as PayloadTypes>::PayloadBuilderAttributes,
BuiltPayload = <N::Payload as PayloadTypes>::BuiltPayload,
>
{
}
/// Encapsulates all types and components of the node.
pub trait FullNodeComponents: FullNodeTypes + Clone + 'static {
/// The transaction pool of the node.
type Pool: TransactionPool<Transaction: PoolTransaction<Consensus = TxTy<Self::Types>>> + Unpin;
/// The node's EVM configuration, defining settings for the Ethereum Virtual Machine.
type Evm: ConfigureEvm<Primitives = <Self::Types as NodeTypes>::Primitives>;
/// The consensus type of the node.
type Consensus: FullConsensus<<Self::Types as NodeTypes>::Primitives, Error = ConsensusError>
+ Clone
+ Unpin
+ 'static;
/// Network API.
type Network: FullNetwork;
/// Returns the transaction pool of the node.
fn pool(&self) -> &Self::Pool;
/// Returns the node's evm config.
fn evm_config(&self) -> &Self::Evm;
/// Returns the node's consensus type.
fn consensus(&self) -> &Self::Consensus;
/// Returns the handle to the network
fn network(&self) -> &Self::Network;
/// Returns the handle to the payload builder service handling payload building requests from
/// the engine.
fn payload_builder_handle(&self) -> &PayloadBuilderHandle<<Self::Types as NodeTypes>::Payload>;
/// Returns the provider of the node.
fn provider(&self) -> &Self::Provider;
/// Returns an executor handle to spawn tasks.
///
/// This can be used to spawn critical, blocking tasks or register tasks that should be
/// terminated gracefully. See also [`TaskSpawner`](reth_tasks::TaskSpawner).
fn task_executor(&self) -> &TaskExecutor;
}
/// Context passed to [`NodeAddOns::launch_add_ons`],
#[derive(Debug, Clone)]
pub struct AddOnsContext<'a, N: FullNodeComponents> {
/// Node with all configured components.
pub node: N,
/// Node configuration.
pub config: &'a NodeConfig<<N::Types as NodeTypes>::ChainSpec>,
/// Handle to the beacon consensus engine.
pub beacon_engine_handle: ConsensusEngineHandle<<N::Types as NodeTypes>::Payload>,
/// Notification channel for engine API events
pub engine_events: EventSender<ConsensusEngineEvent<<N::Types as NodeTypes>::Primitives>>,
/// JWT secret for the node.
pub jwt_secret: JwtSecret,
}
/// Customizable node add-on types.
///
/// This trait defines the interface for extending a node with additional functionality beyond
/// the core [`FullNodeComponents`]. It provides a way to launch supplementary services such as
/// RPC servers, monitoring, external integrations, or any custom functionality that builds on
/// top of the core node components.
///
/// ## Purpose
///
/// The `NodeAddOns` trait serves as an extension point in the node builder architecture,
/// allowing developers to:
/// - Define custom services that run alongside the main node
/// - Access all node components and configuration during initialization
/// - Return a handle for managing the launched services (e.g. handle to rpc server)
///
/// ## How it fits into `NodeBuilder`
///
/// In the node builder pattern, add-ons are the final layer that gets applied after all core
/// components are configured and started. The builder flow typically follows:
///
/// 1. Configure [`NodeTypes`] (chain spec, database types, etc.)
/// 2. Build [`FullNodeComponents`] (consensus, networking, transaction pool, etc.)
/// 3. Launch [`NodeAddOns`] with access to all components via [`AddOnsContext`]
///
/// ## Primary Use Case
///
/// The primary use of this trait is to launch RPC servers that provide external API access to
/// the node. For Ethereum nodes, this typically includes two main servers: the regular RPC
/// server (HTTP/WS/IPC) that handles user requests and the authenticated Engine API server
/// that communicates with the consensus layer. The returned handle contains the necessary
/// endpoints and control mechanisms for these servers, allowing the node to serve JSON-RPC
/// requests and participate in consensus. While RPC is the main use case, the trait is
/// intentionally flexible to support other kinds of add-ons such as monitoring, indexing, or
/// custom protocol extensions.
///
/// ## Context Access
///
/// The [`AddOnsContext`] provides access to:
/// - All node components via the `node` field
/// - Node configuration
/// - Engine API handles for consensus layer communication
/// - JWT secrets for authenticated endpoints
///
/// This ensures add-ons can integrate deeply with the node while maintaining clean separation
/// of concerns.
pub trait NodeAddOns<N: FullNodeComponents>: Send {
/// Handle to add-ons.
///
/// This type is returned by [`launch_add_ons`](Self::launch_add_ons) and represents a
/// handle to the launched services. It must be `Clone` to allow multiple components to
/// hold references and should provide methods to interact with the running services.
///
/// For RPC add-ons, this typically includes:
/// - Server handles to access local addresses and shutdown methods
/// - RPC module registry for runtime inspection of available methods
/// - Configured middleware and transport-specific settings
/// - For Engine API implementations, this also includes handles for consensus layer
/// communication
type Handle: Send + Sync + Clone;
/// Configures and launches the add-ons.
///
/// This method is called once during node startup after all core components are initialized.
/// It receives an [`AddOnsContext`] that provides access to:
///
/// - The fully configured node with all its components
/// - Node configuration for reading settings
/// - Engine API handles for consensus layer communication
/// - JWT secrets for setting up authenticated endpoints (if any).
///
/// The implementation should:
/// 1. Use the context to configure the add-on services
/// 2. Launch any background tasks using the node's task executor
/// 3. Return a handle that allows interaction with the launched services
///
/// # Errors
///
/// This method may fail if the add-ons cannot be properly configured or launched,
/// for example due to port binding issues or invalid configuration.
fn launch_add_ons(
self,
ctx: AddOnsContext<'_, N>,
) -> impl Future<Output = eyre::Result<Self::Handle>> + Send;
}
impl<N: FullNodeComponents> NodeAddOns<N> for () {
type Handle = ();
async fn launch_add_ons(self, _components: AddOnsContext<'_, N>) -> eyre::Result<Self::Handle> {
Ok(())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/api/src/lib.rs | crates/node/api/src/lib.rs | //! Standalone crate for Reth configuration traits and builder types.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
/// Traits, validation methods, and helper types used to abstract over engine types.
pub use reth_engine_primitives as engine;
pub use reth_engine_primitives::*;
/// Traits and helper types used to abstract over payload types.
pub use reth_payload_primitives as payload;
pub use reth_payload_primitives::*;
/// Traits and helper types used to abstract over payload builder types.
pub use reth_payload_builder_primitives as payload_builder;
pub use reth_payload_builder_primitives::*;
/// Traits and helper types used to abstract over EVM methods and types.
pub use reth_evm::{ConfigureEvm, NextBlockEnvAttributes};
pub mod node;
pub use node::*;
// re-export for convenience
pub use reth_node_types::*;
pub use reth_provider::FullProvider;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/types/src/lib.rs | crates/node/types/src/lib.rs | //! Standalone crate for Reth configuration traits and builder types.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(feature = "std"), no_std)]
use core::{fmt::Debug, marker::PhantomData};
pub use reth_primitives_traits::{
Block, BlockBody, FullBlock, FullNodePrimitives, FullReceipt, FullSignedTx, NodePrimitives,
};
use reth_chainspec::EthChainSpec;
use reth_db_api::{database_metrics::DatabaseMetrics, Database};
use reth_engine_primitives::EngineTypes;
use reth_payload_primitives::{BuiltPayload, PayloadTypes};
/// The type that configures the essential types of an Ethereum-like node.
///
/// This includes the primitive types of a node and chain specification.
///
/// This trait is intended to be stateless and only define the types of the node.
pub trait NodeTypes: Clone + Debug + Send + Sync + Unpin + 'static {
/// The node's primitive types, defining basic operations and structures.
type Primitives: NodePrimitives;
/// The type used for configuration of the EVM.
type ChainSpec: EthChainSpec<Header = <Self::Primitives as NodePrimitives>::BlockHeader>;
/// The type responsible for writing chain primitives to storage.
type Storage: Default + Send + Sync + Unpin + Debug + 'static;
/// The node's engine types, defining the interaction with the consensus engine.
type Payload: PayloadTypes<BuiltPayload: BuiltPayload<Primitives = Self::Primitives>>;
}
/// A helper trait that is downstream of the [`NodeTypes`] trait and adds database to the
/// node.
///
/// Its types are configured by node internally and are not intended to be user configurable.
pub trait NodeTypesWithDB: NodeTypes {
/// Underlying database type used by the node to store and retrieve data.
type DB: Database + DatabaseMetrics + Clone + Unpin + 'static;
}
/// An adapter type combining [`NodeTypes`] and db into [`NodeTypesWithDB`].
#[derive(Clone, Debug, Default)]
pub struct NodeTypesWithDBAdapter<Types, DB> {
types: PhantomData<Types>,
db: PhantomData<DB>,
}
impl<Types, DB> NodeTypesWithDBAdapter<Types, DB> {
/// Create a new adapter with the configured types.
pub fn new() -> Self {
Self { types: Default::default(), db: Default::default() }
}
}
impl<Types, DB> NodeTypes for NodeTypesWithDBAdapter<Types, DB>
where
Types: NodeTypes,
DB: Clone + Debug + Send + Sync + Unpin + 'static,
{
type Primitives = Types::Primitives;
type ChainSpec = Types::ChainSpec;
type Storage = Types::Storage;
type Payload = Types::Payload;
}
impl<Types, DB> NodeTypesWithDB for NodeTypesWithDBAdapter<Types, DB>
where
Types: NodeTypes,
DB: Database + DatabaseMetrics + Clone + Unpin + 'static,
{
type DB = DB;
}
/// A [`NodeTypes`] type builder.
#[derive(Clone, Debug, Default)]
pub struct AnyNodeTypes<P = (), C = (), S = (), PL = ()>(
PhantomData<P>,
PhantomData<C>,
PhantomData<S>,
PhantomData<PL>,
);
impl<P, C, S, PL> AnyNodeTypes<P, C, S, PL> {
/// Creates a new instance of [`AnyNodeTypes`].
pub const fn new() -> Self {
Self(PhantomData, PhantomData, PhantomData, PhantomData)
}
/// Sets the `Primitives` associated type.
pub const fn primitives<T>(self) -> AnyNodeTypes<T, C, S, PL> {
AnyNodeTypes::new()
}
/// Sets the `ChainSpec` associated type.
pub const fn chain_spec<T>(self) -> AnyNodeTypes<P, T, S, PL> {
AnyNodeTypes::new()
}
/// Sets the `Storage` associated type.
pub const fn storage<T>(self) -> AnyNodeTypes<P, C, T, PL> {
AnyNodeTypes::new()
}
/// Sets the `Payload` associated type.
pub const fn payload<T>(self) -> AnyNodeTypes<P, C, S, T> {
AnyNodeTypes::new()
}
}
impl<P, C, S, PL> NodeTypes for AnyNodeTypes<P, C, S, PL>
where
P: NodePrimitives + Send + Sync + Unpin + 'static,
C: EthChainSpec<Header = P::BlockHeader> + Clone + 'static,
S: Default + Clone + Send + Sync + Unpin + Debug + 'static,
PL: PayloadTypes<BuiltPayload: BuiltPayload<Primitives = P>> + Send + Sync + Unpin + 'static,
{
type Primitives = P;
type ChainSpec = C;
type Storage = S;
type Payload = PL;
}
/// A [`NodeTypes`] type builder.
#[derive(Clone, Debug, Default)]
pub struct AnyNodeTypesWithEngine<P = (), E = (), C = (), S = (), PL = ()> {
/// Embedding the basic node types.
_base: AnyNodeTypes<P, C, S, PL>,
/// Phantom data for the engine.
_engine: PhantomData<E>,
}
impl<P, E, C, S, PL> AnyNodeTypesWithEngine<P, E, C, S, PL> {
/// Creates a new instance of [`AnyNodeTypesWithEngine`].
pub const fn new() -> Self {
Self { _base: AnyNodeTypes::new(), _engine: PhantomData }
}
/// Sets the `Primitives` associated type.
pub const fn primitives<T>(self) -> AnyNodeTypesWithEngine<T, E, C, S, PL> {
AnyNodeTypesWithEngine::new()
}
/// Sets the `Engine` associated type.
pub const fn engine<T>(self) -> AnyNodeTypesWithEngine<P, T, C, S, PL> {
AnyNodeTypesWithEngine::new()
}
/// Sets the `ChainSpec` associated type.
pub const fn chain_spec<T>(self) -> AnyNodeTypesWithEngine<P, E, T, S, PL> {
AnyNodeTypesWithEngine::new()
}
/// Sets the `Storage` associated type.
pub const fn storage<T>(self) -> AnyNodeTypesWithEngine<P, E, C, T, PL> {
AnyNodeTypesWithEngine::new()
}
/// Sets the `Payload` associated type.
pub const fn payload<T>(self) -> AnyNodeTypesWithEngine<P, E, C, S, T> {
AnyNodeTypesWithEngine::new()
}
}
impl<P, E, C, S, PL> NodeTypes for AnyNodeTypesWithEngine<P, E, C, S, PL>
where
P: NodePrimitives + Send + Sync + Unpin + 'static,
E: EngineTypes + Send + Sync + Unpin,
C: EthChainSpec<Header = P::BlockHeader> + Clone + 'static,
S: Default + Clone + Send + Sync + Unpin + Debug + 'static,
PL: PayloadTypes<BuiltPayload: BuiltPayload<Primitives = P>> + Send + Sync + Unpin + 'static,
{
type Primitives = P;
type ChainSpec = C;
type Storage = S;
type Payload = PL;
}
/// Helper adapter type for accessing [`NodePrimitives::Block`] on [`NodeTypes`].
pub type BlockTy<N> = <PrimitivesTy<N> as NodePrimitives>::Block;
/// Helper adapter type for accessing [`NodePrimitives::BlockHeader`] on [`NodeTypes`].
pub type HeaderTy<N> = <PrimitivesTy<N> as NodePrimitives>::BlockHeader;
/// Helper adapter type for accessing [`NodePrimitives::BlockBody`] on [`NodeTypes`].
pub type BodyTy<N> = <PrimitivesTy<N> as NodePrimitives>::BlockBody;
/// Helper adapter type for accessing [`NodePrimitives::SignedTx`] on [`NodeTypes`].
pub type TxTy<N> = <PrimitivesTy<N> as NodePrimitives>::SignedTx;
/// Helper adapter type for accessing [`NodePrimitives::Receipt`] on [`NodeTypes`].
pub type ReceiptTy<N> = <PrimitivesTy<N> as NodePrimitives>::Receipt;
/// Helper type for getting the `Primitives` associated type from a [`NodeTypes`].
pub type PrimitivesTy<N> = <N as NodeTypes>::Primitives;
/// Helper adapter type for accessing [`PayloadTypes::PayloadAttributes`] on [`NodeTypes`].
pub type PayloadAttrTy<N> = <<N as NodeTypes>::Payload as PayloadTypes>::PayloadAttributes;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/events/src/node.rs | crates/node/events/src/node.rs | //! Support for handling events emitted by node components.
use crate::cl::ConsensusLayerHealthEvent;
use alloy_consensus::{constants::GWEI_TO_WEI, BlockHeader};
use alloy_primitives::{BlockNumber, B256};
use alloy_rpc_types_engine::ForkchoiceState;
use futures::Stream;
use reth_engine_primitives::{
ConsensusEngineEvent, ConsensusEngineLiveSyncProgress, ForkchoiceStatus,
};
use reth_network_api::PeersInfo;
use reth_primitives_traits::{format_gas, format_gas_throughput, BlockBody, NodePrimitives};
use reth_prune_types::PrunerEvent;
use reth_stages::{EntitiesCheckpoint, ExecOutput, PipelineEvent, StageCheckpoint, StageId};
use reth_static_file_types::StaticFileProducerEvent;
use std::{
fmt::{Display, Formatter},
future::Future,
pin::Pin,
task::{Context, Poll},
time::{Duration, Instant, SystemTime, UNIX_EPOCH},
};
use tokio::time::Interval;
use tracing::{debug, info, warn};
/// Interval of reporting node state.
const INFO_MESSAGE_INTERVAL: Duration = Duration::from_secs(25);
/// The current high-level state of the node, including the node's database environment, network
/// connections, current processing stage, and the latest block information. It provides
/// methods to handle different types of events that affect the node's state, such as pipeline
/// events, network events, and consensus engine events.
struct NodeState {
/// Information about connected peers.
peers_info: Option<Box<dyn PeersInfo>>,
/// The stage currently being executed.
current_stage: Option<CurrentStage>,
/// The latest block reached by either pipeline or consensus engine.
latest_block: Option<BlockNumber>,
/// The time of the latest block seen by the pipeline
latest_block_time: Option<u64>,
/// Hash of the head block last set by fork choice update
head_block_hash: Option<B256>,
/// Hash of the safe block last set by fork choice update
safe_block_hash: Option<B256>,
/// Hash of finalized block last set by fork choice update
finalized_block_hash: Option<B256>,
}
impl NodeState {
const fn new(
peers_info: Option<Box<dyn PeersInfo>>,
latest_block: Option<BlockNumber>,
) -> Self {
Self {
peers_info,
current_stage: None,
latest_block,
latest_block_time: None,
head_block_hash: None,
safe_block_hash: None,
finalized_block_hash: None,
}
}
fn num_connected_peers(&self) -> usize {
self.peers_info.as_ref().map(|info| info.num_connected_peers()).unwrap_or_default()
}
fn build_current_stage(
&self,
stage_id: StageId,
checkpoint: StageCheckpoint,
target: Option<BlockNumber>,
) -> CurrentStage {
let (eta, entities_checkpoint) = self
.current_stage
.as_ref()
.filter(|current_stage| current_stage.stage_id == stage_id)
.map_or_else(
|| (Eta::default(), None),
|current_stage| (current_stage.eta, current_stage.entities_checkpoint),
);
CurrentStage { stage_id, eta, checkpoint, entities_checkpoint, target }
}
/// Processes an event emitted by the pipeline
fn handle_pipeline_event(&mut self, event: PipelineEvent) {
match event {
PipelineEvent::Prepare { pipeline_stages_progress, stage_id, checkpoint, target } => {
let checkpoint = checkpoint.unwrap_or_default();
let current_stage = self.build_current_stage(stage_id, checkpoint, target);
info!(
pipeline_stages = %pipeline_stages_progress,
stage = %stage_id,
checkpoint = %checkpoint.block_number,
target = %OptionalField(target),
"Preparing stage",
);
self.current_stage = Some(current_stage);
}
PipelineEvent::Run { pipeline_stages_progress, stage_id, checkpoint, target } => {
let checkpoint = checkpoint.unwrap_or_default();
let current_stage = self.build_current_stage(stage_id, checkpoint, target);
if let Some(stage_eta) = current_stage.eta.fmt_for_stage(stage_id) {
info!(
pipeline_stages = %pipeline_stages_progress,
stage = %stage_id,
checkpoint = %checkpoint.block_number,
target = %OptionalField(target),
%stage_eta,
"Executing stage",
);
} else {
info!(
pipeline_stages = %pipeline_stages_progress,
stage = %stage_id,
checkpoint = %checkpoint.block_number,
target = %OptionalField(target),
"Executing stage",
);
}
self.current_stage = Some(current_stage);
}
PipelineEvent::Ran {
pipeline_stages_progress,
stage_id,
result: ExecOutput { checkpoint, done },
} => {
if stage_id.is_finish() {
self.latest_block = Some(checkpoint.block_number);
}
if let Some(current_stage) = self.current_stage.as_mut() {
current_stage.checkpoint = checkpoint;
current_stage.entities_checkpoint = checkpoint.entities();
current_stage.eta.update(stage_id, checkpoint);
let target = OptionalField(current_stage.target);
let stage_progress = current_stage
.entities_checkpoint
.and_then(|entities| entities.fmt_percentage());
let stage_eta = current_stage.eta.fmt_for_stage(stage_id);
let message = if done { "Finished stage" } else { "Committed stage progress" };
match (stage_progress, stage_eta) {
(Some(stage_progress), Some(stage_eta)) => {
info!(
pipeline_stages = %pipeline_stages_progress,
stage = %stage_id,
checkpoint = %checkpoint.block_number,
%target,
%stage_progress,
%stage_eta,
"{message}",
)
}
(Some(stage_progress), None) => {
info!(
pipeline_stages = %pipeline_stages_progress,
stage = %stage_id,
checkpoint = %checkpoint.block_number,
%target,
%stage_progress,
"{message}",
)
}
(None, Some(stage_eta)) => {
info!(
pipeline_stages = %pipeline_stages_progress,
stage = %stage_id,
checkpoint = %checkpoint.block_number,
%target,
%stage_eta,
"{message}",
)
}
(None, None) => {
info!(
pipeline_stages = %pipeline_stages_progress,
stage = %stage_id,
checkpoint = %checkpoint.block_number,
%target,
"{message}",
)
}
}
}
if done {
self.current_stage = None;
}
}
PipelineEvent::Unwind { stage_id, input } => {
let current_stage = CurrentStage {
stage_id,
eta: Eta::default(),
checkpoint: input.checkpoint,
target: Some(input.unwind_to),
entities_checkpoint: input.checkpoint.entities(),
};
self.current_stage = Some(current_stage);
}
_ => (),
}
}
fn handle_consensus_engine_event<N: NodePrimitives>(&mut self, event: ConsensusEngineEvent<N>) {
match event {
ConsensusEngineEvent::ForkchoiceUpdated(state, status) => {
let ForkchoiceState { head_block_hash, safe_block_hash, finalized_block_hash } =
state;
if self.safe_block_hash != Some(safe_block_hash) &&
self.finalized_block_hash != Some(finalized_block_hash)
{
let msg = match status {
ForkchoiceStatus::Valid => "Forkchoice updated",
ForkchoiceStatus::Invalid => "Received invalid forkchoice updated message",
ForkchoiceStatus::Syncing => {
"Received forkchoice updated message when syncing"
}
};
info!(?head_block_hash, ?safe_block_hash, ?finalized_block_hash, "{}", msg);
}
self.head_block_hash = Some(head_block_hash);
self.safe_block_hash = Some(safe_block_hash);
self.finalized_block_hash = Some(finalized_block_hash);
}
ConsensusEngineEvent::LiveSyncProgress(live_sync_progress) => {
match live_sync_progress {
ConsensusEngineLiveSyncProgress::DownloadingBlocks {
remaining_blocks,
target,
} => {
info!(
remaining_blocks,
target_block_hash=?target,
"Live sync in progress, downloading blocks"
);
}
}
}
ConsensusEngineEvent::CanonicalBlockAdded(executed, elapsed) => {
let block = executed.sealed_block();
info!(
number=block.number(),
hash=?block.hash(),
peers=self.num_connected_peers(),
txs=block.body().transactions().len(),
gas_used=%format_gas(block.gas_used()),
gas_throughput=%format_gas_throughput(block.gas_used(), elapsed),
gas_limit=%format_gas(block.gas_limit()),
full=%format!("{:.1}%", block.gas_used() as f64 * 100.0 / block.gas_limit() as f64),
base_fee=%format!("{:.2}Gwei", block.base_fee_per_gas().unwrap_or(0) as f64 / GWEI_TO_WEI as f64),
blobs=block.blob_gas_used().unwrap_or(0) / alloy_eips::eip4844::DATA_GAS_PER_BLOB,
excess_blobs=block.excess_blob_gas().unwrap_or(0) / alloy_eips::eip4844::DATA_GAS_PER_BLOB,
?elapsed,
"Block added to canonical chain"
);
}
ConsensusEngineEvent::CanonicalChainCommitted(head, elapsed) => {
self.latest_block = Some(head.number());
self.latest_block_time = Some(head.timestamp());
info!(number=head.number(), hash=?head.hash(), ?elapsed, "Canonical chain committed");
}
ConsensusEngineEvent::ForkBlockAdded(executed, elapsed) => {
let block = executed.sealed_block();
info!(number=block.number(), hash=?block.hash(), ?elapsed, "Block added to fork chain");
}
ConsensusEngineEvent::InvalidBlock(block) => {
warn!(number=block.number(), hash=?block.hash(), "Encountered invalid block");
}
ConsensusEngineEvent::BlockReceived(num_hash) => {
info!(number=num_hash.number, hash=?num_hash.hash, "Received block from consensus engine");
}
}
}
fn handle_consensus_layer_health_event(&self, event: ConsensusLayerHealthEvent) {
// If pipeline is running, it's fine to not receive any messages from the CL.
// So we need to report about CL health only when pipeline is idle.
if self.current_stage.is_none() {
match event {
ConsensusLayerHealthEvent::NeverSeen => {
warn!(
"Post-merge network, but never seen beacon client. Please launch one to follow the chain!"
)
}
ConsensusLayerHealthEvent::HasNotBeenSeenForAWhile(period) => {
warn!(
?period,
"Post-merge network, but no beacon client seen for a while. Please launch one to follow the chain!"
)
}
ConsensusLayerHealthEvent::NeverReceivedUpdates => {
warn!(
"Beacon client online, but never received consensus updates. Please ensure your beacon client is operational to follow the chain!"
)
}
ConsensusLayerHealthEvent::HaveNotReceivedUpdatesForAWhile(period) => {
warn!(
?period,
"Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs!"
)
}
}
}
}
fn handle_pruner_event(&self, event: PrunerEvent) {
match event {
PrunerEvent::Started { tip_block_number } => {
debug!(tip_block_number, "Pruner started");
}
PrunerEvent::Finished { tip_block_number, elapsed, stats } => {
let stats = format!(
"[{}]",
stats.iter().map(|item| item.to_string()).collect::<Vec<_>>().join(", ")
);
debug!(tip_block_number, ?elapsed, pruned_segments = %stats, "Pruner finished");
}
}
}
fn handle_static_file_producer_event(&self, event: StaticFileProducerEvent) {
match event {
StaticFileProducerEvent::Started { targets } => {
debug!(?targets, "Static File Producer started");
}
StaticFileProducerEvent::Finished { targets, elapsed } => {
debug!(?targets, ?elapsed, "Static File Producer finished");
}
}
}
}
/// Helper type for formatting of optional fields:
/// - If [Some(x)], then `x` is written
/// - If [None], then `None` is written
struct OptionalField<T: Display>(Option<T>);
impl<T: Display> Display for OptionalField<T> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
if let Some(field) = &self.0 {
write!(f, "{field}")
} else {
write!(f, "None")
}
}
}
/// The stage currently being executed.
struct CurrentStage {
stage_id: StageId,
eta: Eta,
checkpoint: StageCheckpoint,
/// The entities checkpoint for reporting the progress. If `None`, then the progress is not
/// available, probably because the stage didn't finish running and didn't update its
/// checkpoint yet.
entities_checkpoint: Option<EntitiesCheckpoint>,
target: Option<BlockNumber>,
}
/// A node event.
#[derive(Debug, derive_more::From)]
pub enum NodeEvent<N: NodePrimitives> {
/// A sync pipeline event.
Pipeline(PipelineEvent),
/// A consensus engine event.
ConsensusEngine(ConsensusEngineEvent<N>),
/// A Consensus Layer health event.
ConsensusLayerHealth(ConsensusLayerHealthEvent),
/// A pruner event
Pruner(PrunerEvent),
/// A `static_file_producer` event
StaticFileProducer(StaticFileProducerEvent),
/// Used to encapsulate various conditions or situations that do not
/// naturally fit into the other more specific variants.
Other(String),
}
/// Displays relevant information to the user from components of the node, and periodically
/// displays the high-level status of the node.
pub async fn handle_events<E, N: NodePrimitives>(
peers_info: Option<Box<dyn PeersInfo>>,
latest_block_number: Option<BlockNumber>,
events: E,
) where
E: Stream<Item = NodeEvent<N>> + Unpin,
{
let state = NodeState::new(peers_info, latest_block_number);
let start = tokio::time::Instant::now() + Duration::from_secs(3);
let mut info_interval = tokio::time::interval_at(start, INFO_MESSAGE_INTERVAL);
info_interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
let handler = EventHandler { state, events, info_interval };
handler.await
}
/// Handles events emitted by the node and logs them accordingly.
#[pin_project::pin_project]
struct EventHandler<E> {
state: NodeState,
#[pin]
events: E,
#[pin]
info_interval: Interval,
}
impl<E, N: NodePrimitives> Future for EventHandler<E>
where
E: Stream<Item = NodeEvent<N>> + Unpin,
{
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.project();
while this.info_interval.poll_tick(cx).is_ready() {
if let Some(CurrentStage { stage_id, eta, checkpoint, entities_checkpoint, target }) =
&this.state.current_stage
{
let stage_progress =
entities_checkpoint.and_then(|entities| entities.fmt_percentage());
let stage_eta = eta.fmt_for_stage(*stage_id);
match (stage_progress, stage_eta) {
(Some(stage_progress), Some(stage_eta)) => {
info!(
target: "reth::cli",
connected_peers = this.state.num_connected_peers(),
stage = %stage_id,
checkpoint = checkpoint.block_number,
target = %OptionalField(*target),
%stage_progress,
%stage_eta,
"Status"
)
}
(Some(stage_progress), None) => {
info!(
target: "reth::cli",
connected_peers = this.state.num_connected_peers(),
stage = %stage_id,
checkpoint = checkpoint.block_number,
target = %OptionalField(*target),
%stage_progress,
"Status"
)
}
(None, Some(stage_eta)) => {
info!(
target: "reth::cli",
connected_peers = this.state.num_connected_peers(),
stage = %stage_id,
checkpoint = checkpoint.block_number,
target = %OptionalField(*target),
%stage_eta,
"Status"
)
}
(None, None) => {
info!(
target: "reth::cli",
connected_peers = this.state.num_connected_peers(),
stage = %stage_id,
checkpoint = checkpoint.block_number,
target = %OptionalField(*target),
"Status"
)
}
}
} else if let Some(latest_block) = this.state.latest_block {
let now =
SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
if now.saturating_sub(this.state.latest_block_time.unwrap_or(0)) > 60 {
// Once we start receiving consensus nodes, don't emit status unless stalled for
// 1 minute
info!(
target: "reth::cli",
connected_peers = this.state.num_connected_peers(),
%latest_block,
"Status"
);
}
} else {
info!(
target: "reth::cli",
connected_peers = this.state.num_connected_peers(),
"Status"
);
}
}
while let Poll::Ready(Some(event)) = this.events.as_mut().poll_next(cx) {
match event {
NodeEvent::Pipeline(event) => {
this.state.handle_pipeline_event(event);
}
NodeEvent::ConsensusEngine(event) => {
this.state.handle_consensus_engine_event(event);
}
NodeEvent::ConsensusLayerHealth(event) => {
this.state.handle_consensus_layer_health_event(event)
}
NodeEvent::Pruner(event) => {
this.state.handle_pruner_event(event);
}
NodeEvent::StaticFileProducer(event) => {
this.state.handle_static_file_producer_event(event);
}
NodeEvent::Other(event_description) => {
warn!("{event_description}");
}
}
}
Poll::Pending
}
}
/// A container calculating the estimated time that a stage will complete in, based on stage
/// checkpoints reported by the pipeline.
///
/// One `Eta` is only valid for a single stage.
#[derive(Default, Copy, Clone)]
struct Eta {
/// The last stage checkpoint
last_checkpoint: EntitiesCheckpoint,
/// The last time the stage reported its checkpoint
last_checkpoint_time: Option<Instant>,
/// The current ETA
eta: Option<Duration>,
}
impl Eta {
/// Update the ETA given the checkpoint, if possible.
fn update(&mut self, stage: StageId, checkpoint: StageCheckpoint) {
let Some(current) = checkpoint.entities() else { return };
if let Some(last_checkpoint_time) = &self.last_checkpoint_time {
let Some(processed_since_last) =
current.processed.checked_sub(self.last_checkpoint.processed)
else {
self.eta = None;
debug!(target: "reth::cli", %stage, ?current, ?self.last_checkpoint, "Failed to calculate the ETA: processed entities is less than the last checkpoint");
return
};
let elapsed = last_checkpoint_time.elapsed();
let per_second = processed_since_last as f64 / elapsed.as_secs_f64();
let Some(remaining) = current.total.checked_sub(current.processed) else {
self.eta = None;
debug!(target: "reth::cli", %stage, ?current, "Failed to calculate the ETA: total entities is less than processed entities");
return
};
self.eta = Duration::try_from_secs_f64(remaining as f64 / per_second).ok();
}
self.last_checkpoint = current;
self.last_checkpoint_time = Some(Instant::now());
}
/// Returns `true` if the ETA is available, i.e. at least one checkpoint has been reported.
fn is_available(&self) -> bool {
self.eta.zip(self.last_checkpoint_time).is_some()
}
/// Format ETA for a given stage.
///
/// NOTE: Currently ETA is enabled only for the stages that have predictable progress.
/// It's not the case for network-dependent ([`StageId::Headers`] and [`StageId::Bodies`]) and
/// [`StageId::Execution`] stages.
fn fmt_for_stage(&self, stage: StageId) -> Option<String> {
if !self.is_available() ||
matches!(stage, StageId::Headers | StageId::Bodies | StageId::Execution)
{
None
} else {
Some(self.to_string())
}
}
}
impl Display for Eta {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
if let Some((eta, last_checkpoint_time)) = self.eta.zip(self.last_checkpoint_time) {
let remaining = eta.checked_sub(last_checkpoint_time.elapsed());
if let Some(remaining) = remaining {
return write!(
f,
"{}",
humantime::format_duration(Duration::from_secs(remaining.as_secs()))
)
}
}
write!(f, "unknown")
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn eta_display_no_milliseconds() {
let eta = Eta {
last_checkpoint_time: Some(Instant::now()),
eta: Some(Duration::from_millis(
13 * 60 * 1000 + // Minutes
37 * 1000 + // Seconds
999, // Milliseconds
)),
..Default::default()
}
.to_string();
assert_eq!(eta, "13m 37s");
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/events/src/lib.rs | crates/node/events/src/lib.rs | //! Various event handlers for the node.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
pub mod cl;
pub mod node;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/events/src/cl.rs | crates/node/events/src/cl.rs | //! Events related to Consensus Layer health.
use alloy_consensus::Header;
use futures::Stream;
use reth_storage_api::CanonChainTracker;
use std::{
fmt,
pin::Pin,
task::{ready, Context, Poll},
time::Duration,
};
use tokio::time::{Instant, Interval};
/// Interval of checking Consensus Layer client health.
const CHECK_INTERVAL: Duration = Duration::from_secs(300);
/// Period of not receiving fork choice updates from Consensus Layer client,
/// after which the warning is issued.
const NO_FORKCHOICE_UPDATE_RECEIVED_PERIOD: Duration = Duration::from_secs(120);
/// A Stream of [`ConsensusLayerHealthEvent`].
pub struct ConsensusLayerHealthEvents<H = Header> {
interval: Interval,
canon_chain: Box<dyn CanonChainTracker<Header = H>>,
}
impl<H> fmt::Debug for ConsensusLayerHealthEvents<H> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ConsensusLayerHealthEvents").field("interval", &self.interval).finish()
}
}
impl<H> ConsensusLayerHealthEvents<H> {
/// Creates a new [`ConsensusLayerHealthEvents`] with the given canonical chain tracker.
pub fn new(canon_chain: Box<dyn CanonChainTracker<Header = H>>) -> Self {
// Skip the first tick to prevent the false `ConsensusLayerHealthEvent::NeverSeen` event.
let interval = tokio::time::interval_at(Instant::now() + CHECK_INTERVAL, CHECK_INTERVAL);
Self { interval, canon_chain }
}
}
impl<H: Send + Sync> Stream for ConsensusLayerHealthEvents<H> {
type Item = ConsensusLayerHealthEvent;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
loop {
ready!(this.interval.poll_tick(cx));
if let Some(fork_choice) = this.canon_chain.last_received_update_timestamp() {
if fork_choice.elapsed() <= NO_FORKCHOICE_UPDATE_RECEIVED_PERIOD {
// We had an FCU, and it's recent. CL is healthy.
continue
}
// We had an FCU, but it's too old.
return Poll::Ready(Some(
ConsensusLayerHealthEvent::HaveNotReceivedUpdatesForAWhile(
fork_choice.elapsed(),
),
))
}
// We never had both FCU and transition config exchange.
return Poll::Ready(Some(ConsensusLayerHealthEvent::NeverSeen))
}
}
}
/// Event that is triggered when Consensus Layer health is degraded from the
/// Execution Layer point of view.
#[derive(Clone, Copy, Debug)]
pub enum ConsensusLayerHealthEvent {
/// Consensus Layer client was never seen.
NeverSeen,
/// Consensus Layer client has not been seen for a while.
HasNotBeenSeenForAWhile(Duration),
/// Updates from the Consensus Layer client were never received.
NeverReceivedUpdates,
/// Updates from the Consensus Layer client have not been received for a while.
HaveNotReceivedUpdatesForAWhile(Duration),
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/core/build.rs | crates/node/core/build.rs | #![allow(missing_docs)]
use std::{env, error::Error};
use vergen::{BuildBuilder, CargoBuilder, Emitter};
use vergen_git2::Git2Builder;
fn main() -> Result<(), Box<dyn Error>> {
let mut emitter = Emitter::default();
let build_builder = BuildBuilder::default().build_timestamp(true).build()?;
emitter.add_instructions(&build_builder)?;
let cargo_builder = CargoBuilder::default().features(true).target_triple(true).build()?;
emitter.add_instructions(&cargo_builder)?;
let git_builder =
Git2Builder::default().describe(false, true, None).dirty(true).sha(false).build()?;
emitter.add_instructions(&git_builder)?;
emitter.emit_and_set()?;
let sha = env::var("VERGEN_GIT_SHA")?;
let sha_short = &sha[0..7];
let is_dirty = env::var("VERGEN_GIT_DIRTY")? == "true";
// > git describe --always --tags
// if not on a tag: v0.2.0-beta.3-82-g1939939b
// if on a tag: v0.2.0-beta.3
let not_on_tag = env::var("VERGEN_GIT_DESCRIBE")?.ends_with(&format!("-g{sha_short}"));
let version_suffix = if is_dirty || not_on_tag { "-dev" } else { "" };
println!("cargo:rustc-env=RETH_VERSION_SUFFIX={version_suffix}");
// Set short SHA
println!("cargo:rustc-env=VERGEN_GIT_SHA_SHORT={}", &sha[..8]);
// Set the build profile
let out_dir = env::var("OUT_DIR").unwrap();
let profile = out_dir.rsplit(std::path::MAIN_SEPARATOR).nth(3).unwrap();
println!("cargo:rustc-env=RETH_BUILD_PROFILE={profile}");
// Set formatted version strings
let pkg_version = env!("CARGO_PKG_VERSION");
// The short version information for reth.
// - The latest version from Cargo.toml
// - The short SHA of the latest commit.
// Example: 0.1.0 (defa64b2)
println!("cargo:rustc-env=RETH_SHORT_VERSION={pkg_version}{version_suffix} ({sha_short})");
// LONG_VERSION
// The long version information for reth.
//
// - The latest version from Cargo.toml + version suffix (if any)
// - The full SHA of the latest commit
// - The build datetime
// - The build features
// - The build profile
//
// Example:
//
// ```text
// Version: 0.1.0
// Commit SHA: defa64b2
// Build Timestamp: 2023-05-19T01:47:19.815651705Z
// Build Features: jemalloc
// Build Profile: maxperf
// ```
println!("cargo:rustc-env=RETH_LONG_VERSION_0=Version: {pkg_version}{version_suffix}");
println!("cargo:rustc-env=RETH_LONG_VERSION_1=Commit SHA: {sha}");
println!(
"cargo:rustc-env=RETH_LONG_VERSION_2=Build Timestamp: {}",
env::var("VERGEN_BUILD_TIMESTAMP")?
);
println!(
"cargo:rustc-env=RETH_LONG_VERSION_3=Build Features: {}",
env::var("VERGEN_CARGO_FEATURES")?
);
println!("cargo:rustc-env=RETH_LONG_VERSION_4=Build Profile: {profile}");
// The version information for reth formatted for P2P (devp2p).
// - The latest version from Cargo.toml
// - The target triple
//
// Example: reth/v0.1.0-alpha.1-428a6dc2f/aarch64-apple-darwin
println!(
"cargo:rustc-env=RETH_P2P_CLIENT_VERSION={}",
format_args!("reth/v{pkg_version}-{sha_short}/{}", env::var("VERGEN_CARGO_TARGET_TRIPLE")?)
);
Ok(())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/core/src/lib.rs | crates/node/core/src/lib.rs | //! The core of the Ethereum node. Collection of utilities and libraries that are used by the node.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
pub mod args;
pub mod cli;
pub mod dirs;
pub mod exit;
pub mod node_config;
pub mod utils;
pub mod version;
/// Re-exported primitive types
pub mod primitives {
pub use reth_ethereum_forks::*;
pub use reth_primitives_traits::*;
}
/// Re-export of `reth_rpc_*` crates.
pub mod rpc {
/// Re-exported from `reth_rpc::rpc`.
pub mod result {
pub use reth_rpc_server_types::result::*;
}
/// Re-exported from `reth_rpc::eth`.
pub mod compat {
pub use reth_rpc_convert::*;
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/core/src/version.rs | crates/node/core/src/version.rs | //! Version information for reth.
use std::{borrow::Cow, sync::OnceLock};
use alloy_primitives::Bytes;
use alloy_rpc_types_engine::ClientCode;
use reth_db::ClientVersion;
/// The client code for Reth
pub const CLIENT_CODE: ClientCode = ClientCode::RH;
/// Global static version metadata
static VERSION_METADATA: OnceLock<RethCliVersionConsts> = OnceLock::new();
/// Initialize the global version metadata.
pub fn try_init_version_metadata(
metadata: RethCliVersionConsts,
) -> Result<(), RethCliVersionConsts> {
VERSION_METADATA.set(metadata)
}
/// Constants for reth-cli
///
/// Global defaults can be set via [`try_init_version_metadata`].
#[derive(Debug, Default)]
pub struct RethCliVersionConsts {
/// The human readable name of the client
pub name_client: Cow<'static, str>,
/// The latest version from Cargo.toml.
pub cargo_pkg_version: Cow<'static, str>,
/// The full SHA of the latest commit.
pub vergen_git_sha_long: Cow<'static, str>,
/// The 8 character short SHA of the latest commit.
pub vergen_git_sha: Cow<'static, str>,
/// The build timestamp.
pub vergen_build_timestamp: Cow<'static, str>,
/// The target triple.
pub vergen_cargo_target_triple: Cow<'static, str>,
/// The build features.
pub vergen_cargo_features: Cow<'static, str>,
/// The short version information for reth.
pub short_version: Cow<'static, str>,
/// The long version information for reth.
pub long_version: Cow<'static, str>,
/// The build profile name.
pub build_profile_name: Cow<'static, str>,
/// The version information for reth formatted for P2P (devp2p).
///
/// - The latest version from Cargo.toml
/// - The target triple
///
/// # Example
///
/// ```text
/// reth/v{major}.{minor}.{patch}-{sha1}/{target}
/// ```
/// e.g.: `reth/v0.1.0-alpha.1-428a6dc2f/aarch64-apple-darwin`
pub p2p_client_version: Cow<'static, str>,
/// extra data used for payload building
pub extra_data: Cow<'static, str>,
}
/// The default extra data used for payload building.
///
/// - The latest version from Cargo.toml
/// - The OS identifier
///
/// # Example
///
/// ```text
/// reth/v{major}.{minor}.{patch}/{OS}
/// ```
pub fn default_extra_data() -> String {
format!("reth/v{}/{}", env!("CARGO_PKG_VERSION"), std::env::consts::OS)
}
/// The default extra data in bytes.
/// See [`default_extra_data`].
pub fn default_extra_data_bytes() -> Bytes {
Bytes::from(default_extra_data().as_bytes().to_vec())
}
/// The default client version accessing the database.
pub fn default_client_version() -> ClientVersion {
let meta = version_metadata();
ClientVersion {
version: meta.cargo_pkg_version.to_string(),
git_sha: meta.vergen_git_sha.to_string(),
build_timestamp: meta.vergen_build_timestamp.to_string(),
}
}
/// Get a reference to the global version metadata
pub fn version_metadata() -> &'static RethCliVersionConsts {
VERSION_METADATA.get_or_init(default_reth_version_metadata)
}
/// default reth version metadata using compile-time env! macros.
pub fn default_reth_version_metadata() -> RethCliVersionConsts {
RethCliVersionConsts {
name_client: Cow::Borrowed("Reth"),
cargo_pkg_version: Cow::Owned(env!("CARGO_PKG_VERSION").to_string()),
vergen_git_sha_long: Cow::Owned(env!("VERGEN_GIT_SHA").to_string()),
vergen_git_sha: Cow::Owned(env!("VERGEN_GIT_SHA_SHORT").to_string()),
vergen_build_timestamp: Cow::Owned(env!("VERGEN_BUILD_TIMESTAMP").to_string()),
vergen_cargo_target_triple: Cow::Owned(env!("VERGEN_CARGO_TARGET_TRIPLE").to_string()),
vergen_cargo_features: Cow::Owned(env!("VERGEN_CARGO_FEATURES").to_string()),
short_version: Cow::Owned(env!("RETH_SHORT_VERSION").to_string()),
long_version: Cow::Owned(format!(
"{}\n{}\n{}\n{}\n{}",
env!("RETH_LONG_VERSION_0"),
env!("RETH_LONG_VERSION_1"),
env!("RETH_LONG_VERSION_2"),
env!("RETH_LONG_VERSION_3"),
env!("RETH_LONG_VERSION_4"),
)),
build_profile_name: Cow::Owned(env!("RETH_BUILD_PROFILE").to_string()),
p2p_client_version: Cow::Owned(env!("RETH_P2P_CLIENT_VERSION").to_string()),
extra_data: Cow::Owned(default_extra_data()),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn assert_extra_data_less_32bytes() {
let extra_data = default_extra_data();
assert!(extra_data.len() <= 32, "extra data must be less than 32 bytes: {extra_data}")
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/core/src/node_config.rs | crates/node/core/src/node_config.rs | //! Support for customizing the node
use crate::{
args::{
DatabaseArgs, DatadirArgs, DebugArgs, DevArgs, EnclaveArgs, EngineArgs, NetworkArgs,
PayloadBuilderArgs, PruningArgs, RpcServerArgs, TxPoolArgs,
},
dirs::{ChainPath, DataDirPath},
utils::get_single_header,
};
use alloy_consensus::BlockHeader;
use alloy_eips::BlockHashOrNumber;
use alloy_primitives::{BlockNumber, B256};
use eyre::eyre;
use reth_chainspec::{ChainSpec, EthChainSpec, MAINNET};
use reth_config::config::PruneConfig;
use reth_engine_local::MiningMode;
use reth_ethereum_forks::{EthereumHardforks, Head};
use reth_network_p2p::headers::client::HeadersClient;
use reth_primitives_traits::SealedHeader;
use reth_stages_types::StageId;
use reth_storage_api::{
BlockHashReader, DatabaseProviderFactory, HeaderProvider, StageCheckpointReader,
};
use reth_storage_errors::provider::ProviderResult;
use reth_transaction_pool::TransactionPool;
use serde::{de::DeserializeOwned, Serialize};
use std::{
fs,
net::SocketAddr,
path::{Path, PathBuf},
sync::Arc,
};
use tracing::*;
use crate::args::EraArgs;
pub use reth_engine_primitives::{
DEFAULT_MAX_PROOF_TASK_CONCURRENCY, DEFAULT_MEMORY_BLOCK_BUFFER_TARGET,
DEFAULT_RESERVED_CPU_CORES,
};
/// Triggers persistence when the number of canonical blocks in memory exceeds this threshold.
pub const DEFAULT_PERSISTENCE_THRESHOLD: u64 = 0; // todo(dalton): Maybe feature flag this? We need this so archive nodes can get an accurate
// snapshot. benchmarks dont signal this is of great cost to us
/// Default size of cross-block cache in megabytes.
pub const DEFAULT_CROSS_BLOCK_CACHE_SIZE_MB: u64 = 4 * 1024;
/// This includes all necessary configuration to launch the node.
/// The individual configuration options can be overwritten before launching the node.
///
/// # Example
/// ```rust
/// # use reth_node_core::{
/// # node_config::NodeConfig,
/// # args::RpcServerArgs,
/// # };
/// # use reth_rpc_server_types::RpcModuleSelection;
/// # use tokio::runtime::Handle;
///
/// async fn t() {
/// // create the builder
/// let builder = NodeConfig::default();
///
/// // configure the rpc apis
/// let mut rpc = RpcServerArgs::default().with_http().with_ws();
/// rpc.http_api = Some(RpcModuleSelection::All);
/// let builder = builder.with_rpc(rpc);
/// }
/// ```
///
/// This can also be used to launch a node with a temporary test database. This can be done with
/// the [`NodeConfig::test`] method.
///
/// # Example
/// ```rust
/// # use reth_node_core::{
/// # node_config::NodeConfig,
/// # args::RpcServerArgs,
/// # };
/// # use reth_rpc_server_types::RpcModuleSelection;
/// # use tokio::runtime::Handle;
///
/// async fn t() {
/// // create the builder with a test database, using the `test` method
/// let builder = NodeConfig::test();
///
/// // configure the rpc apis
/// let mut rpc = RpcServerArgs::default().with_http().with_ws();
/// rpc.http_api = Some(RpcModuleSelection::All);
/// let builder = builder.with_rpc(rpc);
/// }
/// ```
#[derive(Debug)]
pub struct NodeConfig<ChainSpec> {
/// All data directory related arguments
pub datadir: DatadirArgs,
/// The path to the configuration file to use.
pub config: Option<PathBuf>,
/// The chain this node is running.
///
/// Possible values are either a built-in chain or the path to a chain specification file.
pub chain: Arc<ChainSpec>,
/// Enable Prometheus metrics.
///
/// The metrics will be served at the given interface and port.
pub metrics: Option<SocketAddr>,
/// Add a new instance of a node.
///
/// Configures the ports of the node to avoid conflicts with the defaults.
/// This is useful for running multiple nodes on the same machine.
///
/// Max number of instances is 200. It is chosen in a way so that it's not possible to have
/// port numbers that conflict with each other.
///
/// Changes to the following port numbers:
/// - `DISCOVERY_PORT`: default + `instance` - 1
/// - `DISCOVERY_V5_PORT`: default + `instance` - 1
/// - `AUTH_PORT`: default + `instance` * 100 - 100
/// - `HTTP_RPC_PORT`: default - `instance` + 1
/// - `WS_RPC_PORT`: default + `instance` * 2 - 2
/// - `IPC_PATH`: default + `instance`
pub instance: Option<u16>,
/// All networking related arguments
pub network: NetworkArgs,
/// All rpc related arguments
pub rpc: RpcServerArgs,
/// All txpool related arguments with --txpool prefix
pub txpool: TxPoolArgs,
/// All payload builder related arguments
pub builder: PayloadBuilderArgs,
/// All debug related arguments with --debug prefix
pub debug: DebugArgs,
/// All database related arguments
pub db: DatabaseArgs,
/// All dev related arguments with --dev prefix
pub dev: DevArgs,
/// All pruning related arguments
pub pruning: PruningArgs,
/// All engine related arguments
pub engine: EngineArgs,
/// All enclave related arguments
pub enclave: EnclaveArgs,
/// All ERA import related arguments with --era prefix
pub era: EraArgs,
}
impl NodeConfig<ChainSpec> {
/// Creates a testing [`NodeConfig`], causing the database to be launched ephemerally.
pub fn test() -> Self {
Self::default()
// set all ports to zero by default for test instances
.with_unused_ports()
}
}
impl<ChainSpec> NodeConfig<ChainSpec> {
/// Creates a new config with given chain spec, setting all fields to default values.
pub fn new(chain: Arc<ChainSpec>) -> Self {
Self {
config: None,
chain,
metrics: None,
instance: None,
network: NetworkArgs::default(),
rpc: RpcServerArgs::default(),
txpool: TxPoolArgs::default(),
builder: PayloadBuilderArgs::default(),
debug: DebugArgs::default(),
db: DatabaseArgs::default(),
dev: DevArgs::default(),
pruning: PruningArgs::default(),
datadir: DatadirArgs::default(),
engine: EngineArgs::default(),
enclave: EnclaveArgs::default(),
era: EraArgs::default(),
}
}
/// Sets --dev mode for the node.
///
/// In addition to setting the `--dev` flag, this also:
/// - disables discovery in [`NetworkArgs`].
pub const fn dev(mut self) -> Self {
self.dev.dev = true;
self.network.discovery.disable_discovery = true;
self
}
/// Sets --dev mode for the node [`NodeConfig::dev`], if `dev` is true.
pub const fn set_dev(self, dev: bool) -> Self {
if dev {
self.dev()
} else {
self
}
}
/// Set the data directory args for the node
pub fn with_datadir_args(mut self, datadir_args: DatadirArgs) -> Self {
self.datadir = datadir_args;
self
}
/// Set the config file for the node
pub fn with_config(mut self, config: impl Into<PathBuf>) -> Self {
self.config = Some(config.into());
self
}
/// Set the [`ChainSpec`] for the node
pub fn with_chain(mut self, chain: impl Into<Arc<ChainSpec>>) -> Self {
self.chain = chain.into();
self
}
/// Set the metrics address for the node
pub const fn with_metrics(mut self, metrics: SocketAddr) -> Self {
self.metrics = Some(metrics);
self
}
/// Set the instance for the node
pub const fn with_instance(mut self, instance: u16) -> Self {
self.instance = Some(instance);
self
}
/// Returns the instance value, defaulting to 1 if not set.
pub fn get_instance(&self) -> u16 {
self.instance.unwrap_or(1)
}
/// Set the network args for the node
pub fn with_network(mut self, network: NetworkArgs) -> Self {
self.network = network;
self
}
/// Set the rpc args for the node
pub fn with_rpc(mut self, rpc: RpcServerArgs) -> Self {
self.rpc = rpc;
self
}
/// Set the txpool args for the node
pub fn with_txpool(mut self, txpool: TxPoolArgs) -> Self {
self.txpool = txpool;
self
}
/// Set the builder args for the node
pub fn with_payload_builder(mut self, builder: PayloadBuilderArgs) -> Self {
self.builder = builder;
self
}
/// Set the debug args for the node
pub fn with_debug(mut self, debug: DebugArgs) -> Self {
self.debug = debug;
self
}
/// Set the database args for the node
pub const fn with_db(mut self, db: DatabaseArgs) -> Self {
self.db = db;
self
}
/// Set the dev args for the node
pub const fn with_dev(mut self, dev: DevArgs) -> Self {
self.dev = dev;
self
}
/// Set the enclave args for the node
pub const fn with_enclave(mut self, enclave: EnclaveArgs) -> Self {
self.enclave = enclave;
self
}
/// Set the pruning args for the node
pub fn with_pruning(mut self, pruning: PruningArgs) -> Self {
self.pruning = pruning;
self
}
/// Returns pruning configuration.
pub fn prune_config(&self) -> Option<PruneConfig>
where
ChainSpec: EthereumHardforks,
{
self.pruning.prune_config(&self.chain)
}
/// Returns the max block that the node should run to, looking it up from the network if
/// necessary
pub async fn max_block<Provider, Client>(
&self,
network_client: Client,
provider: Provider,
) -> eyre::Result<Option<BlockNumber>>
where
Provider: HeaderProvider,
Client: HeadersClient<Header: reth_primitives_traits::BlockHeader>,
{
let max_block = if let Some(block) = self.debug.max_block {
Some(block)
} else if let Some(tip) = self.debug.tip {
Some(self.lookup_or_fetch_tip(provider, network_client, tip).await?)
} else {
None
};
Ok(max_block)
}
/// Fetches the head block from the database.
///
/// If the database is empty, returns the genesis block.
pub fn lookup_head<Factory>(&self, factory: &Factory) -> ProviderResult<Head>
where
Factory: DatabaseProviderFactory<
Provider: HeaderProvider + StageCheckpointReader + BlockHashReader,
>,
{
let provider = factory.database_provider_ro()?;
let head = provider.get_stage_checkpoint(StageId::Finish)?.unwrap_or_default().block_number;
let header = provider
.header_by_number(head)?
.expect("the header for the latest block is missing, database is corrupt");
let total_difficulty = provider
.header_td_by_number(head)?
// total difficulty is effectively deprecated, but still required in some places, e.g.
// p2p
.unwrap_or_default();
let hash = provider
.block_hash(head)?
.expect("the hash for the latest block is missing, database is corrupt");
Ok(Head {
number: head,
hash,
difficulty: header.difficulty(),
total_difficulty,
timestamp: header.timestamp(),
})
}
/// Attempt to look up the block number for the tip hash in the database.
/// If it doesn't exist, download the header and return the block number.
///
/// NOTE: The download is attempted with infinite retries.
pub async fn lookup_or_fetch_tip<Provider, Client>(
&self,
provider: Provider,
client: Client,
tip: B256,
) -> ProviderResult<u64>
where
Provider: HeaderProvider,
Client: HeadersClient<Header: reth_primitives_traits::BlockHeader>,
{
let header = provider.header_by_hash_or_number(tip.into())?;
// try to look up the header in the database
if let Some(header) = header {
info!(target: "reth::cli", ?tip, "Successfully looked up tip block in the database");
return Ok(header.number())
}
Ok(self.fetch_tip_from_network(client, tip.into()).await.number())
}
/// Attempt to look up the block with the given number and return the header.
///
/// NOTE: The download is attempted with infinite retries.
pub async fn fetch_tip_from_network<Client>(
&self,
client: Client,
tip: BlockHashOrNumber,
) -> SealedHeader<Client::Header>
where
Client: HeadersClient<Header: reth_primitives_traits::BlockHeader>,
{
info!(target: "reth::cli", ?tip, "Fetching tip block from the network.");
let mut fetch_failures = 0;
loop {
match get_single_header(&client, tip).await {
Ok(tip_header) => {
info!(target: "reth::cli", ?tip, "Successfully fetched tip");
return tip_header
}
Err(error) => {
fetch_failures += 1;
if fetch_failures % 20 == 0 {
error!(target: "reth::cli", ?fetch_failures, %error, "Failed to fetch the tip. Retrying...");
}
}
}
}
}
/// Change rpc port numbers based on the instance number, using the inner
/// [`RpcServerArgs::adjust_instance_ports`] method.
pub fn adjust_instance_ports(&mut self) {
self.network.adjust_instance_ports(self.instance);
self.rpc.adjust_instance_ports(self.instance);
}
/// Sets networking and RPC ports to zero, causing the OS to choose random unused ports when
/// sockets are bound.
pub fn with_unused_ports(mut self) -> Self {
self.rpc = self.rpc.with_unused_ports();
self.network = self.network.with_unused_ports();
self
}
/// Effectively disables the RPC state cache by setting the cache sizes to `0`.
///
/// By setting the cache sizes to 0, caching of newly executed or fetched blocks will be
/// effectively disabled.
pub const fn with_disabled_rpc_cache(mut self) -> Self {
self.rpc.rpc_state_cache.set_zero_lengths();
self
}
/// Resolve the final datadir path.
pub fn datadir(&self) -> ChainPath<DataDirPath>
where
ChainSpec: EthChainSpec,
{
self.datadir.clone().resolve_datadir(self.chain.chain())
}
/// Load an application configuration from a specified path.
///
/// A new configuration file is created with default values if none
/// exists.
pub fn load_path<T: Serialize + DeserializeOwned + Default>(
path: impl AsRef<Path>,
) -> eyre::Result<T> {
let path = path.as_ref();
match fs::read_to_string(path) {
Ok(cfg_string) => {
toml::from_str(&cfg_string).map_err(|e| eyre!("Failed to parse TOML: {e}"))
}
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
if let Some(parent) = path.parent() {
fs::create_dir_all(parent)
.map_err(|e| eyre!("Failed to create directory: {e}"))?;
}
let cfg = T::default();
let s = toml::to_string_pretty(&cfg)
.map_err(|e| eyre!("Failed to serialize to TOML: {e}"))?;
fs::write(path, s).map_err(|e| eyre!("Failed to write configuration file: {e}"))?;
Ok(cfg)
}
Err(e) => Err(eyre!("Failed to load configuration: {e}")),
}
}
/// Modifies the [`ChainSpec`] generic of the config using the provided closure.
pub fn map_chainspec<F, C>(self, f: F) -> NodeConfig<C>
where
F: FnOnce(Arc<ChainSpec>) -> C,
{
let chain = Arc::new(f(self.chain));
NodeConfig {
chain,
datadir: self.datadir,
config: self.config,
metrics: self.metrics,
instance: self.instance,
network: self.network,
rpc: self.rpc,
txpool: self.txpool,
builder: self.builder,
debug: self.debug,
db: self.db,
dev: self.dev,
pruning: self.pruning,
engine: self.engine,
enclave: self.enclave,
era: self.era,
}
}
/// Returns the [`MiningMode`] intended for --dev mode.
pub fn dev_mining_mode<Pool>(&self, pool: Pool) -> MiningMode<Pool>
where
Pool: TransactionPool + Unpin,
{
if let Some(interval) = self.dev.block_time {
MiningMode::interval(interval)
} else {
MiningMode::instant(pool, self.dev.block_max_transactions)
}
}
}
impl Default for NodeConfig<ChainSpec> {
fn default() -> Self {
Self::new(MAINNET.clone())
}
}
impl<ChainSpec> Clone for NodeConfig<ChainSpec> {
fn clone(&self) -> Self {
Self {
chain: self.chain.clone(),
config: self.config.clone(),
metrics: self.metrics,
instance: self.instance,
network: self.network.clone(),
rpc: self.rpc.clone(),
txpool: self.txpool.clone(),
builder: self.builder.clone(),
debug: self.debug.clone(),
db: self.db,
dev: self.dev,
pruning: self.pruning.clone(),
datadir: self.datadir.clone(),
engine: self.engine.clone(),
enclave: self.enclave.clone(),
era: self.era.clone(),
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/core/src/utils.rs | crates/node/core/src/utils.rs | //! Utility functions for node startup and shutdown, for example path parsing and retrieving single
//! blocks from the network.
use alloy_consensus::BlockHeader;
use alloy_eips::BlockHashOrNumber;
use alloy_rpc_types_engine::{JwtError, JwtSecret};
use eyre::Result;
use reth_consensus::{Consensus, ConsensusError};
use reth_network_p2p::{
bodies::client::BodiesClient, headers::client::HeadersClient, priority::Priority,
};
use reth_primitives_traits::{Block, SealedBlock, SealedHeader};
use std::{
env::VarError,
path::{Path, PathBuf},
};
use tracing::{debug, info};
/// Parses a user-specified path with support for environment variables and common shorthands (e.g.
/// ~ for the user's home directory).
pub fn parse_path(value: &str) -> Result<PathBuf, shellexpand::LookupError<VarError>> {
shellexpand::full(value).map(|path| PathBuf::from(path.into_owned()))
}
/// Attempts to retrieve or create a JWT secret from the specified path.
pub fn get_or_create_jwt_secret_from_path(path: &Path) -> Result<JwtSecret, JwtError> {
if path.exists() {
debug!(target: "reth::cli", ?path, "Reading JWT auth secret file");
JwtSecret::from_file(path)
} else {
info!(target: "reth::cli", ?path, "Creating JWT auth secret file");
JwtSecret::try_create_random(path)
}
}
/// Get a single header from the network
pub async fn get_single_header<Client>(
client: Client,
id: BlockHashOrNumber,
) -> Result<SealedHeader<Client::Header>>
where
Client: HeadersClient<Header: reth_primitives_traits::BlockHeader>,
{
let (peer_id, response) = client.get_header_with_priority(id, Priority::High).await?.split();
let Some(header) = response else {
client.report_bad_message(peer_id);
eyre::bail!("Invalid number of headers received. Expected: 1. Received: 0")
};
let header = SealedHeader::seal_slow(header);
let valid = match id {
BlockHashOrNumber::Hash(hash) => header.hash() == hash,
BlockHashOrNumber::Number(number) => header.number() == number,
};
if !valid {
client.report_bad_message(peer_id);
eyre::bail!(
"Received invalid header. Received: {:?}. Expected: {:?}",
header.num_hash(),
id
);
}
Ok(header)
}
/// Get a body from the network based on header
pub async fn get_single_body<B, Client>(
client: Client,
header: SealedHeader<B::Header>,
consensus: impl Consensus<B, Error = ConsensusError>,
) -> Result<SealedBlock<B>>
where
B: Block,
Client: BodiesClient<Body = B::Body>,
{
let (peer_id, response) = client.get_block_body(header.hash()).await?.split();
let Some(body) = response else {
client.report_bad_message(peer_id);
eyre::bail!("Invalid number of bodies received. Expected: 1. Received: 0")
};
let block = SealedBlock::from_sealed_parts(header, body);
consensus.validate_block_pre_execution(&block)?;
Ok(block)
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/core/src/dirs.rs | crates/node/core/src/dirs.rs | //! reth data directories.
use crate::{args::DatadirArgs, utils::parse_path};
use reth_chainspec::Chain;
use std::{
env::VarError,
fmt::{Debug, Display, Formatter},
path::{Path, PathBuf},
str::FromStr,
};
/// Constructs a string to be used as a path for configuration and db paths.
pub fn config_path_prefix(chain: Chain) -> String {
chain.to_string()
}
/// Returns the path to the reth data directory.
///
/// Refer to [`dirs_next::data_dir`] for cross-platform behavior.
pub fn data_dir() -> Option<PathBuf> {
dirs_next::data_dir().map(|root| root.join("reth"))
}
/// Returns the path to the reth database.
///
/// Refer to [`dirs_next::data_dir`] for cross-platform behavior.
pub fn database_path() -> Option<PathBuf> {
data_dir().map(|root| root.join("db"))
}
/// Returns the path to the reth configuration directory.
///
/// Refer to [`dirs_next::config_dir`] for cross-platform behavior.
pub fn config_dir() -> Option<PathBuf> {
dirs_next::config_dir().map(|root| root.join("reth"))
}
/// Returns the path to the reth cache directory.
///
/// Refer to [`dirs_next::cache_dir`] for cross-platform behavior.
pub fn cache_dir() -> Option<PathBuf> {
dirs_next::cache_dir().map(|root| root.join("reth"))
}
/// Returns the path to the reth logs directory.
///
/// Refer to [`dirs_next::cache_dir`] for cross-platform behavior.
pub fn logs_dir() -> Option<PathBuf> {
cache_dir().map(|root| root.join("logs"))
}
/// Returns the path to the reth data dir.
///
/// The data dir should contain a subdirectory for each chain, and those chain directories will
/// include all information for that chain, such as the p2p secret.
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
#[non_exhaustive]
pub struct DataDirPath;
impl XdgPath for DataDirPath {
fn resolve() -> Option<PathBuf> {
data_dir()
}
}
/// Returns the path to the reth logs directory.
///
/// Refer to [`dirs_next::cache_dir`] for cross-platform behavior.
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
#[non_exhaustive]
pub struct LogsDir;
impl XdgPath for LogsDir {
fn resolve() -> Option<PathBuf> {
logs_dir()
}
}
/// A small helper trait for unit structs that represent a standard path following the XDG
/// path specification.
pub trait XdgPath {
/// Resolve the standard path.
fn resolve() -> Option<PathBuf>;
}
/// A wrapper type that either parses a user-given path or defaults to an
/// OS-specific path.
///
/// The [`FromStr`] implementation supports shell expansions and common patterns such as `~` for the
/// home directory.
///
/// # Example
///
/// ```
/// use reth_node_core::dirs::{DataDirPath, PlatformPath};
/// use std::str::FromStr;
///
/// // Resolves to the platform-specific database path
/// let default: PlatformPath<DataDirPath> = PlatformPath::default();
/// // Resolves to `$(pwd)/my/path/to/datadir`
/// let custom: PlatformPath<DataDirPath> = PlatformPath::from_str("my/path/to/datadir").unwrap();
///
/// assert_ne!(default.as_ref(), custom.as_ref());
/// ```
#[derive(Debug, PartialEq, Eq)]
pub struct PlatformPath<D>(PathBuf, std::marker::PhantomData<D>);
impl<D> Display for PlatformPath<D> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.0.display())
}
}
impl<D> Clone for PlatformPath<D> {
fn clone(&self) -> Self {
Self(self.0.clone(), std::marker::PhantomData)
}
}
impl<D: XdgPath> Default for PlatformPath<D> {
fn default() -> Self {
Self(
D::resolve().expect("Could not resolve default path. Set one manually."),
std::marker::PhantomData,
)
}
}
impl<D> FromStr for PlatformPath<D> {
type Err = shellexpand::LookupError<VarError>;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Self(parse_path(s)?, std::marker::PhantomData))
}
}
impl<D> AsRef<Path> for PlatformPath<D> {
fn as_ref(&self) -> &Path {
self.0.as_path()
}
}
impl<D> From<PlatformPath<D>> for PathBuf {
fn from(value: PlatformPath<D>) -> Self {
value.0
}
}
impl<D> PlatformPath<D> {
/// Returns the path joined with another path
pub fn join<P: AsRef<Path>>(&self, path: P) -> Self {
Self(self.0.join(path), std::marker::PhantomData)
}
}
impl<D> PlatformPath<D> {
/// Converts the path to a `ChainPath` with the given `Chain`.
pub fn with_chain(&self, chain: Chain, datadir_args: DatadirArgs) -> ChainPath<D> {
// extract chain name
let platform_path = self.platform_path_from_chain(chain);
ChainPath::new(platform_path, chain, datadir_args)
}
fn platform_path_from_chain(&self, chain: Chain) -> Self {
let chain_name = config_path_prefix(chain);
let path = self.0.join(chain_name);
Self(path, std::marker::PhantomData)
}
/// Map the inner path to a new type `T`.
pub fn map_to<T>(&self) -> PlatformPath<T> {
PlatformPath(self.0.clone(), std::marker::PhantomData)
}
}
/// An Optional wrapper type around [`PlatformPath`].
///
/// This is useful for when a path is optional, such as the `--data-dir` flag.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct MaybePlatformPath<D>(Option<PlatformPath<D>>);
// === impl MaybePlatformPath ===
impl<D: XdgPath> MaybePlatformPath<D> {
/// Returns the path if it is set, otherwise returns the default path for the given chain.
pub fn unwrap_or_chain_default(&self, chain: Chain, datadir_args: DatadirArgs) -> ChainPath<D> {
ChainPath(
self.0
.clone()
.unwrap_or_else(|| PlatformPath::default().platform_path_from_chain(chain)),
chain,
datadir_args,
)
}
/// Returns the default platform path for the specified [Chain].
pub fn chain_default(chain: Chain) -> ChainPath<D> {
PlatformPath::default().with_chain(chain, DatadirArgs::default())
}
/// Returns true if a custom path is set
pub const fn is_some(&self) -> bool {
self.0.is_some()
}
/// Returns the path if it is set, otherwise returns `None`.
pub fn as_ref(&self) -> Option<&Path> {
self.0.as_ref().map(|p| p.as_ref())
}
/// Returns the path if it is set, otherwise returns the default path, without any chain
/// directory.
pub fn unwrap_or_default(&self) -> PlatformPath<D> {
self.0.clone().unwrap_or_default()
}
}
impl<D: XdgPath> Display for MaybePlatformPath<D> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
if let Some(path) = &self.0 {
path.fmt(f)
} else {
// NOTE: this is a workaround for making it work with clap's `default_value_t` which
// computes the default value via `Default -> Display -> FromStr`
write!(f, "default")
}
}
}
impl<D> Default for MaybePlatformPath<D> {
fn default() -> Self {
Self(None)
}
}
impl<D> FromStr for MaybePlatformPath<D> {
type Err = shellexpand::LookupError<VarError>;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let p = match s {
"default" => {
// NOTE: this is a workaround for making it work with clap's `default_value_t` which
// computes the default value via `Default -> Display -> FromStr`
None
}
_ => Some(PlatformPath::from_str(s)?),
};
Ok(Self(p))
}
}
impl<D> From<PathBuf> for MaybePlatformPath<D> {
fn from(path: PathBuf) -> Self {
Self(Some(PlatformPath(path, std::marker::PhantomData)))
}
}
/// Wrapper type around `PlatformPath` that includes a `Chain`, used for separating reth data for
/// different networks.
///
/// If the chain is either mainnet, sepolia, or holesky, then the path will be:
/// * mainnet: `<DIR>/mainnet`
/// * sepolia: `<DIR>/sepolia`
/// * holesky: `<DIR>/holesky`
///
/// Otherwise, the path will be dependent on the chain ID:
/// * `<DIR>/<CHAIN_ID>`
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ChainPath<D>(PlatformPath<D>, Chain, DatadirArgs);
impl<D> ChainPath<D> {
/// Returns a new `ChainPath` given a `PlatformPath` and a `Chain`.
pub const fn new(path: PlatformPath<D>, chain: Chain, datadir_args: DatadirArgs) -> Self {
Self(path, chain, datadir_args)
}
/// Returns the path to the reth data directory for this chain.
///
/// `<DIR>/<CHAIN_ID>`
pub fn data_dir(&self) -> &Path {
self.0.as_ref()
}
/// Returns the path to the db directory for this chain.
///
/// `<DIR>/<CHAIN_ID>/db`
pub fn db(&self) -> PathBuf {
self.data_dir().join("db")
}
/// Returns the path to the static files directory for this chain.
///
/// `<DIR>/<CHAIN_ID>/static_files`
pub fn static_files(&self) -> PathBuf {
let datadir_args = &self.2;
if let Some(static_files_path) = &datadir_args.static_files_path {
static_files_path.clone()
} else {
self.data_dir().join("static_files")
}
}
/// Returns the path to the reth p2p secret key for this chain.
///
/// `<DIR>/<CHAIN_ID>/discovery-secret`
pub fn p2p_secret(&self) -> PathBuf {
self.data_dir().join("discovery-secret")
}
/// Returns the path to the known peers file for this chain.
///
/// `<DIR>/<CHAIN_ID>/known-peers.json`
pub fn known_peers(&self) -> PathBuf {
self.data_dir().join("known-peers.json")
}
/// Returns the path to the blobstore directory for this chain where blobs of unfinalized
/// transactions are stored.
///
/// `<DIR>/<CHAIN_ID>/blobstore`
pub fn blobstore(&self) -> PathBuf {
self.data_dir().join("blobstore")
}
/// Returns the path to the local transactions backup file
///
/// `<DIR>/<CHAIN_ID>/txpool-transactions-backup.rlp`
pub fn txpool_transactions(&self) -> PathBuf {
self.data_dir().join("txpool-transactions-backup.rlp")
}
/// Returns the path to the config file for this chain.
///
/// `<DIR>/<CHAIN_ID>/reth.toml`
pub fn config(&self) -> PathBuf {
self.data_dir().join("reth.toml")
}
/// Returns the path to the jwtsecret file for this chain.
///
/// `<DIR>/<CHAIN_ID>/jwt.hex`
pub fn jwt(&self) -> PathBuf {
self.data_dir().join("jwt.hex")
}
/// Returns the path to the invalid block hooks directory for this chain.
///
/// `<DIR>/<CHAIN_ID>/invalid_block_hooks`
pub fn invalid_block_hooks(&self) -> PathBuf {
self.data_dir().join("invalid_block_hooks")
}
/// Returns the path to the ExEx WAL directory for this chain.
pub fn exex_wal(&self) -> PathBuf {
self.data_dir().join("exex/wal")
}
}
impl<D> AsRef<Path> for ChainPath<D> {
fn as_ref(&self) -> &Path {
self.0.as_ref()
}
}
impl<D> Display for ChainPath<D> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.0)
}
}
impl<D> From<ChainPath<D>> for PathBuf {
fn from(value: ChainPath<D>) -> Self {
value.0.into()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_maybe_data_dir_path() {
let path = MaybePlatformPath::<DataDirPath>::default();
let path = path.unwrap_or_chain_default(Chain::mainnet(), DatadirArgs::default());
assert!(path.as_ref().ends_with("reth/mainnet"), "{path:?}");
let db_path = path.db();
assert!(db_path.ends_with("reth/mainnet/db"), "{db_path:?}");
let path = MaybePlatformPath::<DataDirPath>::from_str("my/path/to/datadir").unwrap();
let path = path.unwrap_or_chain_default(Chain::mainnet(), DatadirArgs::default());
assert!(path.as_ref().ends_with("my/path/to/datadir"), "{path:?}");
}
#[test]
fn test_maybe_testnet_datadir_path() {
let path = MaybePlatformPath::<DataDirPath>::default();
let path = path.unwrap_or_chain_default(Chain::holesky(), DatadirArgs::default());
assert!(path.as_ref().ends_with("reth/holesky"), "{path:?}");
let path = MaybePlatformPath::<DataDirPath>::default();
let path = path.unwrap_or_chain_default(Chain::sepolia(), DatadirArgs::default());
assert!(path.as_ref().ends_with("reth/sepolia"), "{path:?}");
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/core/src/exit.rs | crates/node/core/src/exit.rs | //! Helper types for waiting for the node to exit.
use futures::{future::BoxFuture, FutureExt};
use std::{
fmt,
future::Future,
pin::Pin,
task::{ready, Context, Poll},
};
/// A Future which resolves when the node exits
pub struct NodeExitFuture {
/// The consensus engine future.
/// This can be polled to wait for the consensus engine to exit.
consensus_engine_fut: Option<BoxFuture<'static, eyre::Result<()>>>,
/// Flag indicating whether the node should be terminated after the pipeline sync.
terminate: bool,
}
impl fmt::Debug for NodeExitFuture {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("NodeExitFuture")
.field("consensus_engine_fut", &"...")
.field("terminate", &self.terminate)
.finish()
}
}
impl NodeExitFuture {
/// Create a new `NodeExitFuture`.
pub fn new<F>(consensus_engine_fut: F, terminate: bool) -> Self
where
F: Future<Output = eyre::Result<()>> + 'static + Send,
{
Self { consensus_engine_fut: Some(Box::pin(consensus_engine_fut)), terminate }
}
}
impl Future for NodeExitFuture {
type Output = eyre::Result<()>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
if let Some(rx) = this.consensus_engine_fut.as_mut() {
match ready!(rx.poll_unpin(cx)) {
Ok(_) => {
this.consensus_engine_fut.take();
if this.terminate {
Poll::Ready(Ok(()))
} else {
Poll::Pending
}
}
Err(err) => Poll::Ready(Err(err)),
}
} else {
Poll::Pending
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::future::poll_fn;
#[tokio::test]
async fn test_node_exit_future_terminate_true() {
let fut = async { Ok(()) };
let node_exit_future = NodeExitFuture::new(fut, true);
let res = node_exit_future.await;
assert!(res.is_ok());
}
#[tokio::test]
async fn test_node_exit_future_terminate_false() {
let fut = async { Ok(()) };
let mut node_exit_future = NodeExitFuture::new(fut, false);
poll_fn(|cx| {
assert!(node_exit_future.poll_unpin(cx).is_pending());
Poll::Ready(())
})
.await;
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/core/src/cli/config.rs | crates/node/core/src/cli/config.rs | //! Config traits for various node components.
use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT_36M;
use alloy_primitives::Bytes;
use reth_chainspec::{Chain, ChainKind, NamedChain};
use reth_network::{protocol::IntoRlpxSubProtocol, NetworkPrimitives};
use reth_transaction_pool::PoolConfig;
use std::{borrow::Cow, time::Duration};
/// 45M gas limit
const ETHEREUM_BLOCK_GAS_LIMIT_45M: u64 = 45_000_000;
/// 60M gas limit
const ETHEREUM_BLOCK_GAS_LIMIT_60M: u64 = 60_000_000;
/// A trait that provides payload builder settings.
///
/// This provides all basic payload builder settings and is implemented by the
/// [`PayloadBuilderArgs`](crate::args::PayloadBuilderArgs) type.
pub trait PayloadBuilderConfig {
/// Block extra data set by the payload builder.
fn extra_data(&self) -> Cow<'_, str>;
/// Returns the extra data as bytes.
fn extra_data_bytes(&self) -> Bytes {
self.extra_data().as_bytes().to_vec().into()
}
/// The interval at which the job should build a new payload after the last.
fn interval(&self) -> Duration;
/// The deadline for when the payload builder job should resolve.
fn deadline(&self) -> Duration;
/// Target gas limit for built blocks.
fn gas_limit(&self) -> Option<u64>;
/// Maximum number of tasks to spawn for building a payload.
fn max_payload_tasks(&self) -> usize;
/// Returns the configured gas limit if set, or a chain-specific default.
fn gas_limit_for(&self, chain: Chain) -> u64 {
if let Some(limit) = self.gas_limit() {
return limit;
}
match chain.kind() {
ChainKind::Named(NamedChain::Sepolia | NamedChain::Holesky | NamedChain::Hoodi) => {
ETHEREUM_BLOCK_GAS_LIMIT_60M
}
ChainKind::Named(NamedChain::Mainnet) => ETHEREUM_BLOCK_GAS_LIMIT_45M,
_ => ETHEREUM_BLOCK_GAS_LIMIT_36M,
}
}
}
/// A trait that represents the configured network and can be used to apply additional configuration
/// to the network.
pub trait RethNetworkConfig {
/// Adds a new additional protocol to the `RLPx` sub-protocol list.
///
/// These additional protocols are negotiated during the `RLPx` handshake.
/// If both peers share the same protocol, the corresponding handler will be included alongside
/// the `eth` protocol.
///
/// See also [`ProtocolHandler`](reth_network::protocol::ProtocolHandler)
fn add_rlpx_sub_protocol(&mut self, protocol: impl IntoRlpxSubProtocol);
/// Returns the secret key used for authenticating sessions.
fn secret_key(&self) -> secp256k1::SecretKey;
// TODO add more network config methods here
}
impl<N: NetworkPrimitives> RethNetworkConfig for reth_network::NetworkManager<N> {
fn add_rlpx_sub_protocol(&mut self, protocol: impl IntoRlpxSubProtocol) {
Self::add_rlpx_sub_protocol(self, protocol);
}
fn secret_key(&self) -> secp256k1::SecretKey {
Self::secret_key(self)
}
}
/// A trait that provides all basic config values for the transaction pool and is implemented by the
/// [`TxPoolArgs`](crate::args::TxPoolArgs) type.
pub trait RethTransactionPoolConfig {
/// Returns transaction pool configuration.
fn pool_config(&self) -> PoolConfig;
/// Returns max batch size for transaction batch insertion.
fn max_batch_size(&self) -> usize;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/core/src/cli/mod.rs | crates/node/core/src/cli/mod.rs | //! Additional CLI configuration support.
pub mod config;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/core/src/args/pruning.rs | crates/node/core/src/args/pruning.rs | //! Pruning and full node arguments
use crate::{args::error::ReceiptsLogError, primitives::EthereumHardfork};
use alloy_primitives::{Address, BlockNumber};
use clap::{builder::RangedU64ValueParser, Args};
use reth_chainspec::EthereumHardforks;
use reth_config::config::PruneConfig;
use reth_prune_types::{PruneMode, PruneModes, ReceiptsLogPruneConfig, MINIMUM_PRUNING_DISTANCE};
use std::collections::BTreeMap;
/// Parameters for pruning and full node
#[derive(Debug, Clone, Args, PartialEq, Eq, Default)]
#[command(next_help_heading = "Pruning")]
pub struct PruningArgs {
/// Run full node. Only the most recent [`MINIMUM_PRUNING_DISTANCE`] block states are stored.
#[arg(long, default_value_t = false)]
pub full: bool,
/// Minimum pruning interval measured in blocks.
#[arg(long, value_parser = RangedU64ValueParser::<u64>::new().range(1..),)]
pub block_interval: Option<u64>,
// Sender Recovery
/// Prunes all sender recovery data.
#[arg(long = "prune.senderrecovery.full", conflicts_with_all = &["sender_recovery_distance", "sender_recovery_before"])]
pub sender_recovery_full: bool,
/// Prune sender recovery data before the `head-N` block number. In other words, keep last N +
/// 1 blocks.
#[arg(long = "prune.senderrecovery.distance", value_name = "BLOCKS", conflicts_with_all = &["sender_recovery_full", "sender_recovery_before"])]
pub sender_recovery_distance: Option<u64>,
/// Prune sender recovery data before the specified block number. The specified block number is
/// not pruned.
#[arg(long = "prune.senderrecovery.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["sender_recovery_full", "sender_recovery_distance"])]
pub sender_recovery_before: Option<BlockNumber>,
// Transaction Lookup
/// Prunes all transaction lookup data.
#[arg(long = "prune.transactionlookup.full", conflicts_with_all = &["transaction_lookup_distance", "transaction_lookup_before"])]
pub transaction_lookup_full: bool,
/// Prune transaction lookup data before the `head-N` block number. In other words, keep last N
/// + 1 blocks.
#[arg(long = "prune.transactionlookup.distance", value_name = "BLOCKS", conflicts_with_all = &["transaction_lookup_full", "transaction_lookup_before"])]
pub transaction_lookup_distance: Option<u64>,
/// Prune transaction lookup data before the specified block number. The specified block number
/// is not pruned.
#[arg(long = "prune.transactionlookup.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["transaction_lookup_full", "transaction_lookup_distance"])]
pub transaction_lookup_before: Option<BlockNumber>,
// Receipts
/// Prunes all receipt data.
#[arg(long = "prune.receipts.full", conflicts_with_all = &["receipts_pre_merge", "receipts_distance", "receipts_before"])]
pub receipts_full: bool,
/// Prune receipts before the merge block.
#[arg(long = "prune.receipts.pre-merge", conflicts_with_all = &["receipts_full", "receipts_distance", "receipts_before"])]
pub receipts_pre_merge: bool,
/// Prune receipts before the `head-N` block number. In other words, keep last N + 1 blocks.
#[arg(long = "prune.receipts.distance", value_name = "BLOCKS", conflicts_with_all = &["receipts_full", "receipts_pre_merge", "receipts_before"])]
pub receipts_distance: Option<u64>,
/// Prune receipts before the specified block number. The specified block number is not pruned.
#[arg(long = "prune.receipts.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["receipts_full", "receipts_pre_merge", "receipts_distance"])]
pub receipts_before: Option<BlockNumber>,
// Receipts Log Filter
/// Configure receipts log filter. Format:
/// <`address`>:<`prune_mode`>... where <`prune_mode`> can be 'full', 'distance:<`blocks`>', or
/// 'before:<`block_number`>'
#[arg(long = "prune.receiptslogfilter", value_name = "FILTER_CONFIG", conflicts_with_all = &["receipts_full", "receipts_pre_merge", "receipts_distance", "receipts_before"], value_parser = parse_receipts_log_filter)]
pub receipts_log_filter: Option<ReceiptsLogPruneConfig>,
// Account History
/// Prunes all account history.
#[arg(long = "prune.accounthistory.full", conflicts_with_all = &["account_history_distance", "account_history_before"])]
pub account_history_full: bool,
/// Prune account before the `head-N` block number. In other words, keep last N + 1 blocks.
#[arg(long = "prune.accounthistory.distance", value_name = "BLOCKS", conflicts_with_all = &["account_history_full", "account_history_before"])]
pub account_history_distance: Option<u64>,
/// Prune account history before the specified block number. The specified block number is not
/// pruned.
#[arg(long = "prune.accounthistory.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["account_history_full", "account_history_distance"])]
pub account_history_before: Option<BlockNumber>,
// Storage History
/// Prunes all storage history data.
#[arg(long = "prune.storagehistory.full", conflicts_with_all = &["storage_history_distance", "storage_history_before"])]
pub storage_history_full: bool,
/// Prune storage history before the `head-N` block number. In other words, keep last N + 1
/// blocks.
#[arg(long = "prune.storagehistory.distance", value_name = "BLOCKS", conflicts_with_all = &["storage_history_full", "storage_history_before"])]
pub storage_history_distance: Option<u64>,
/// Prune storage history before the specified block number. The specified block number is not
/// pruned.
#[arg(long = "prune.storagehistory.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["storage_history_full", "storage_history_distance"])]
pub storage_history_before: Option<BlockNumber>,
// Bodies
/// Prune bodies before the merge block.
#[arg(long = "prune.bodies.pre-merge", value_name = "BLOCKS", conflicts_with_all = &["bodies_distance", "bodies_before"])]
pub bodies_pre_merge: bool,
/// Prune bodies before the `head-N` block number. In other words, keep last N + 1
/// blocks.
#[arg(long = "prune.bodies.distance", value_name = "BLOCKS", conflicts_with_all = &["bodies_pre_merge", "bodies_before"])]
pub bodies_distance: Option<u64>,
/// Prune storage history before the specified block number. The specified block number is not
/// pruned.
#[arg(long = "prune.bodies.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["bodies_distance", "bodies_pre_merge"])]
pub bodies_before: Option<BlockNumber>,
}
impl PruningArgs {
/// Returns pruning configuration.
pub fn prune_config<ChainSpec>(&self, chain_spec: &ChainSpec) -> Option<PruneConfig>
where
ChainSpec: EthereumHardforks,
{
// Initialize with a default prune configuration.
let mut config = PruneConfig::default();
// If --full is set, use full node defaults.
if self.full {
config = PruneConfig {
block_interval: config.block_interval,
segments: PruneModes {
sender_recovery: Some(PruneMode::Full),
transaction_lookup: None,
receipts: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)),
account_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)),
storage_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)),
// TODO: set default to pre-merge block if available
bodies_history: None,
receipts_log_filter: Default::default(),
},
}
}
// Override with any explicitly set prune.* flags.
if let Some(block_interval) = self.block_interval {
config.block_interval = block_interval as usize;
}
if let Some(mode) = self.sender_recovery_prune_mode() {
config.segments.sender_recovery = Some(mode);
}
if let Some(mode) = self.transaction_lookup_prune_mode() {
config.segments.transaction_lookup = Some(mode);
}
if let Some(mode) = self.receipts_prune_mode(chain_spec) {
config.segments.receipts = Some(mode);
}
if let Some(mode) = self.account_history_prune_mode() {
config.segments.account_history = Some(mode);
}
if let Some(mode) = self.bodies_prune_mode(chain_spec) {
config.segments.bodies_history = Some(mode);
}
if let Some(mode) = self.storage_history_prune_mode() {
config.segments.storage_history = Some(mode);
}
if let Some(receipt_logs) =
self.receipts_log_filter.as_ref().filter(|c| !c.is_empty()).cloned()
{
config.segments.receipts_log_filter = receipt_logs;
// need to remove the receipts segment filter entirely because that takes precedence
// over the logs filter
config.segments.receipts.take();
}
Some(config)
}
fn bodies_prune_mode<ChainSpec>(&self, chain_spec: &ChainSpec) -> Option<PruneMode>
where
ChainSpec: EthereumHardforks,
{
if self.bodies_pre_merge {
chain_spec
.ethereum_fork_activation(EthereumHardfork::Paris)
.block_number()
.map(PruneMode::Before)
} else if let Some(distance) = self.bodies_distance {
Some(PruneMode::Distance(distance))
} else {
self.bodies_before.map(PruneMode::Before)
}
}
const fn sender_recovery_prune_mode(&self) -> Option<PruneMode> {
if self.sender_recovery_full {
Some(PruneMode::Full)
} else if let Some(distance) = self.sender_recovery_distance {
Some(PruneMode::Distance(distance))
} else if let Some(block_number) = self.sender_recovery_before {
Some(PruneMode::Before(block_number))
} else {
None
}
}
const fn transaction_lookup_prune_mode(&self) -> Option<PruneMode> {
if self.transaction_lookup_full {
Some(PruneMode::Full)
} else if let Some(distance) = self.transaction_lookup_distance {
Some(PruneMode::Distance(distance))
} else if let Some(block_number) = self.transaction_lookup_before {
Some(PruneMode::Before(block_number))
} else {
None
}
}
fn receipts_prune_mode<ChainSpec>(&self, chain_spec: &ChainSpec) -> Option<PruneMode>
where
ChainSpec: EthereumHardforks,
{
if self.receipts_pre_merge {
chain_spec
.ethereum_fork_activation(EthereumHardfork::Paris)
.block_number()
.map(PruneMode::Before)
} else if self.receipts_full {
Some(PruneMode::Full)
} else if let Some(distance) = self.receipts_distance {
Some(PruneMode::Distance(distance))
} else {
self.receipts_before.map(PruneMode::Before)
}
}
const fn account_history_prune_mode(&self) -> Option<PruneMode> {
if self.account_history_full {
Some(PruneMode::Full)
} else if let Some(distance) = self.account_history_distance {
Some(PruneMode::Distance(distance))
} else if let Some(block_number) = self.account_history_before {
Some(PruneMode::Before(block_number))
} else {
None
}
}
const fn storage_history_prune_mode(&self) -> Option<PruneMode> {
if self.storage_history_full {
Some(PruneMode::Full)
} else if let Some(distance) = self.storage_history_distance {
Some(PruneMode::Distance(distance))
} else if let Some(block_number) = self.storage_history_before {
Some(PruneMode::Before(block_number))
} else {
None
}
}
}
/// Parses `,` separated pruning info into [`ReceiptsLogPruneConfig`].
pub(crate) fn parse_receipts_log_filter(
value: &str,
) -> Result<ReceiptsLogPruneConfig, ReceiptsLogError> {
let mut config = BTreeMap::new();
// Split out each of the filters.
let filters = value.split(',');
for filter in filters {
let parts: Vec<&str> = filter.split(':').collect();
if parts.len() < 2 {
return Err(ReceiptsLogError::InvalidFilterFormat(filter.to_string()));
}
// Parse the address
let address = parts[0]
.parse::<Address>()
.map_err(|_| ReceiptsLogError::InvalidAddress(parts[0].to_string()))?;
// Parse the prune mode
let prune_mode = match parts[1] {
"full" => PruneMode::Full,
s if s.starts_with("distance") => {
if parts.len() < 3 {
return Err(ReceiptsLogError::InvalidFilterFormat(filter.to_string()));
}
let distance =
parts[2].parse::<u64>().map_err(ReceiptsLogError::InvalidDistance)?;
PruneMode::Distance(distance)
}
s if s.starts_with("before") => {
if parts.len() < 3 {
return Err(ReceiptsLogError::InvalidFilterFormat(filter.to_string()));
}
let block_number =
parts[2].parse::<u64>().map_err(ReceiptsLogError::InvalidBlockNumber)?;
PruneMode::Before(block_number)
}
_ => return Err(ReceiptsLogError::InvalidPruneMode(parts[1].to_string())),
};
config.insert(address, prune_mode);
}
Ok(ReceiptsLogPruneConfig(config))
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_primitives::address;
use clap::Parser;
/// A helper type to parse Args more easily
#[derive(Parser)]
struct CommandParser<T: Args> {
#[command(flatten)]
args: T,
}
#[test]
fn pruning_args_sanity_check() {
let args = CommandParser::<PruningArgs>::parse_from([
"reth",
"--prune.receiptslogfilter",
"0x0000000000000000000000000000000000000003:before:5000000",
])
.args;
let mut config = ReceiptsLogPruneConfig::default();
config.0.insert(
address!("0x0000000000000000000000000000000000000003"),
PruneMode::Before(5000000),
);
assert_eq!(args.receipts_log_filter, Some(config));
}
#[test]
fn parse_receiptslogfilter() {
let default_args = PruningArgs::default();
let args = CommandParser::<PruningArgs>::parse_from(["reth"]).args;
assert_eq!(args, default_args);
}
#[test]
fn test_parse_receipts_log_filter() {
let filter1 = "0x0000000000000000000000000000000000000001:full";
let filter2 = "0x0000000000000000000000000000000000000002:distance:1000";
let filter3 = "0x0000000000000000000000000000000000000003:before:5000000";
let filters = [filter1, filter2, filter3].join(",");
// Args can be parsed.
let result = parse_receipts_log_filter(&filters);
assert!(result.is_ok());
let config = result.unwrap();
assert_eq!(config.0.len(), 3);
// Check that the args were parsed correctly.
let addr1: Address = "0x0000000000000000000000000000000000000001".parse().unwrap();
let addr2: Address = "0x0000000000000000000000000000000000000002".parse().unwrap();
let addr3: Address = "0x0000000000000000000000000000000000000003".parse().unwrap();
assert_eq!(config.0.get(&addr1), Some(&PruneMode::Full));
assert_eq!(config.0.get(&addr2), Some(&PruneMode::Distance(1000)));
assert_eq!(config.0.get(&addr3), Some(&PruneMode::Before(5000000)));
}
#[test]
fn test_parse_receipts_log_filter_invalid_filter_format() {
let result = parse_receipts_log_filter("invalid_format");
assert!(matches!(result, Err(ReceiptsLogError::InvalidFilterFormat(_))));
}
#[test]
fn test_parse_receipts_log_filter_invalid_address() {
let result = parse_receipts_log_filter("invalid_address:full");
assert!(matches!(result, Err(ReceiptsLogError::InvalidAddress(_))));
}
#[test]
fn test_parse_receipts_log_filter_invalid_prune_mode() {
let result =
parse_receipts_log_filter("0x0000000000000000000000000000000000000000:invalid_mode");
assert!(matches!(result, Err(ReceiptsLogError::InvalidPruneMode(_))));
}
#[test]
fn test_parse_receipts_log_filter_invalid_distance() {
let result = parse_receipts_log_filter(
"0x0000000000000000000000000000000000000000:distance:invalid_distance",
);
assert!(matches!(result, Err(ReceiptsLogError::InvalidDistance(_))));
}
#[test]
fn test_parse_receipts_log_filter_invalid_block_number() {
let result = parse_receipts_log_filter(
"0x0000000000000000000000000000000000000000:before:invalid_block",
);
assert!(matches!(result, Err(ReceiptsLogError::InvalidBlockNumber(_))));
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/core/src/args/payload_builder.rs | crates/node/core/src/args/payload_builder.rs | use crate::{cli::config::PayloadBuilderConfig, version::default_extra_data};
use alloy_consensus::constants::MAXIMUM_EXTRA_DATA_SIZE;
use alloy_eips::merge::SLOT_DURATION;
use clap::{
builder::{RangedU64ValueParser, TypedValueParser},
Arg, Args, Command,
};
use reth_cli_util::{parse_duration_from_secs, parse_duration_from_secs_or_ms};
use std::{borrow::Cow, ffi::OsStr, time::Duration};
/// Parameters for configuring the Payload Builder
#[derive(Debug, Clone, Args, PartialEq, Eq)]
#[command(next_help_heading = "Builder")]
pub struct PayloadBuilderArgs {
/// Block extra data set by the payload builder.
#[arg(long = "builder.extradata", value_parser = ExtraDataValueParser::default(), default_value_t = default_extra_data())]
pub extra_data: String,
/// Target gas limit for built blocks.
#[arg(long = "builder.gaslimit", value_name = "GAS_LIMIT")]
pub gas_limit: Option<u64>,
/// The interval at which the job should build a new payload after the last.
///
/// Interval is specified in seconds or in milliseconds if the value ends with `ms`:
/// * `50ms` -> 50 milliseconds
/// * `1` -> 1 second
#[arg(long = "builder.interval", value_parser = parse_duration_from_secs_or_ms, default_value = "1", value_name = "DURATION")]
pub interval: Duration,
/// The deadline for when the payload builder job should resolve.
#[arg(long = "builder.deadline", value_parser = parse_duration_from_secs, default_value = "12", value_name = "SECONDS")]
pub deadline: Duration,
/// Maximum number of tasks to spawn for building a payload.
#[arg(long = "builder.max-tasks", default_value = "3", value_parser = RangedU64ValueParser::<usize>::new().range(1..))]
pub max_payload_tasks: usize,
}
impl Default for PayloadBuilderArgs {
fn default() -> Self {
Self {
extra_data: default_extra_data(),
interval: Duration::from_secs(1),
gas_limit: None,
deadline: SLOT_DURATION,
max_payload_tasks: 3,
}
}
}
impl PayloadBuilderConfig for PayloadBuilderArgs {
fn extra_data(&self) -> Cow<'_, str> {
self.extra_data.as_str().into()
}
fn interval(&self) -> Duration {
self.interval
}
fn deadline(&self) -> Duration {
self.deadline
}
fn gas_limit(&self) -> Option<u64> {
self.gas_limit
}
fn max_payload_tasks(&self) -> usize {
self.max_payload_tasks
}
}
#[derive(Clone, Debug, Default)]
#[non_exhaustive]
struct ExtraDataValueParser;
impl TypedValueParser for ExtraDataValueParser {
type Value = String;
fn parse_ref(
&self,
_cmd: &Command,
_arg: Option<&Arg>,
value: &OsStr,
) -> Result<Self::Value, clap::Error> {
let val =
value.to_str().ok_or_else(|| clap::Error::new(clap::error::ErrorKind::InvalidUtf8))?;
if val.len() > MAXIMUM_EXTRA_DATA_SIZE {
return Err(clap::Error::raw(
clap::error::ErrorKind::InvalidValue,
format!(
"Payload builder extradata size exceeds {MAXIMUM_EXTRA_DATA_SIZE}-byte limit"
),
))
}
Ok(val.to_string())
}
}
#[cfg(test)]
mod tests {
use super::*;
use clap::Parser;
/// A helper type to parse Args more easily
#[derive(Parser)]
struct CommandParser<T: Args> {
#[command(flatten)]
args: T,
}
#[test]
fn test_args_with_valid_max_tasks() {
let args =
CommandParser::<PayloadBuilderArgs>::parse_from(["reth", "--builder.max-tasks", "1"])
.args;
assert_eq!(args.max_payload_tasks, 1)
}
#[test]
fn test_args_with_invalid_max_tasks() {
assert!(CommandParser::<PayloadBuilderArgs>::try_parse_from([
"reth",
"--builder.max-tasks",
"0"
])
.is_err());
}
#[test]
fn test_default_extra_data() {
let extra_data = default_extra_data();
let args = CommandParser::<PayloadBuilderArgs>::parse_from([
"reth",
"--builder.extradata",
extra_data.as_str(),
])
.args;
assert_eq!(args.extra_data, extra_data);
}
#[test]
fn test_invalid_extra_data() {
let extra_data = "x".repeat(MAXIMUM_EXTRA_DATA_SIZE + 1);
let args = CommandParser::<PayloadBuilderArgs>::try_parse_from([
"reth",
"--builder.extradata",
extra_data.as_str(),
]);
assert!(args.is_err());
}
#[test]
fn payload_builder_args_default_sanity_check() {
let default_args = PayloadBuilderArgs::default();
let args = CommandParser::<PayloadBuilderArgs>::parse_from(["reth"]).args;
assert_eq!(args, default_args);
}
#[test]
fn test_args_with_s_interval() {
let args =
CommandParser::<PayloadBuilderArgs>::parse_from(["reth", "--builder.interval", "50"])
.args;
assert_eq!(args.interval, Duration::from_secs(50));
}
#[test]
fn test_args_with_ms_interval() {
let args =
CommandParser::<PayloadBuilderArgs>::parse_from(["reth", "--builder.interval", "50ms"])
.args;
assert_eq!(args.interval, Duration::from_millis(50));
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/core/src/args/dev.rs | crates/node/core/src/args/dev.rs | //! clap [Args](clap::Args) for Dev testnet configuration
use std::time::Duration;
use clap::Args;
use humantime::parse_duration;
/// Parameters for Dev testnet configuration
#[derive(Debug, Args, PartialEq, Eq, Default, Clone, Copy)]
#[command(next_help_heading = "Dev testnet")]
pub struct DevArgs {
/// Start the node in dev mode
///
/// This mode uses a local proof-of-authority consensus engine with either fixed block times
/// or automatically mined blocks.
/// Disables network discovery and enables local http server.
/// Prefunds 20 accounts derived by mnemonic "test test test test test test test test test test
/// test junk" with 10 000 ETH each.
#[arg(long = "dev", alias = "auto-mine", help_heading = "Dev testnet", verbatim_doc_comment)]
pub dev: bool,
/// How many transactions to mine per block.
#[arg(
long = "dev.block-max-transactions",
help_heading = "Dev testnet",
conflicts_with = "block_time"
)]
pub block_max_transactions: Option<usize>,
/// Interval between blocks.
///
/// Parses strings using [`humantime::parse_duration`]
/// --dev.block-time 12s
#[arg(
long = "dev.block-time",
help_heading = "Dev testnet",
conflicts_with = "block_max_transactions",
value_parser = parse_duration,
verbatim_doc_comment
)]
pub block_time: Option<Duration>,
}
#[cfg(test)]
mod tests {
use super::*;
use clap::Parser;
/// A helper type to parse Args more easily
#[derive(Parser)]
struct CommandParser<T: Args> {
#[command(flatten)]
args: T,
}
#[test]
fn test_parse_dev_args() {
let args = CommandParser::<DevArgs>::parse_from(["reth"]).args;
assert_eq!(args, DevArgs { dev: false, block_max_transactions: None, block_time: None });
let args = CommandParser::<DevArgs>::parse_from(["reth", "--dev"]).args;
assert_eq!(args, DevArgs { dev: true, block_max_transactions: None, block_time: None });
let args = CommandParser::<DevArgs>::parse_from(["reth", "--auto-mine"]).args;
assert_eq!(args, DevArgs { dev: true, block_max_transactions: None, block_time: None });
let args = CommandParser::<DevArgs>::parse_from([
"reth",
"--dev",
"--dev.block-max-transactions",
"2",
])
.args;
assert_eq!(args, DevArgs { dev: true, block_max_transactions: Some(2), block_time: None });
let args =
CommandParser::<DevArgs>::parse_from(["reth", "--dev", "--dev.block-time", "1s"]).args;
assert_eq!(
args,
DevArgs {
dev: true,
block_max_transactions: None,
block_time: Some(std::time::Duration::from_secs(1))
}
);
}
#[test]
fn test_parse_dev_args_conflicts() {
let args = CommandParser::<DevArgs>::try_parse_from([
"reth",
"--dev",
"--dev.block-max-transactions",
"2",
"--dev.block-time",
"1s",
]);
assert!(args.is_err());
}
#[test]
fn dev_args_default_sanity_check() {
let default_args = DevArgs::default();
let args = CommandParser::<DevArgs>::parse_from(["reth"]).args;
assert_eq!(args, default_args);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/core/src/args/datadir_args.rs | crates/node/core/src/args/datadir_args.rs | //! clap [Args](clap::Args) for datadir config
use crate::dirs::{ChainPath, DataDirPath, MaybePlatformPath};
use clap::Args;
use reth_chainspec::Chain;
use std::path::PathBuf;
/// Parameters for datadir configuration
#[derive(Debug, Args, PartialEq, Eq, Default, Clone)]
#[command(next_help_heading = "Datadir")]
pub struct DatadirArgs {
/// The path to the data dir for all reth files and subdirectories.
///
/// Defaults to the OS-specific data directory:
///
/// - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/`
/// - Windows: `{FOLDERID_RoamingAppData}/reth/`
/// - macOS: `$HOME/Library/Application Support/reth/`
#[arg(long, value_name = "DATA_DIR", verbatim_doc_comment, default_value_t)]
pub datadir: MaybePlatformPath<DataDirPath>,
/// The absolute path to store static files in.
#[arg(
long = "datadir.static-files",
alias = "datadir.static_files",
value_name = "PATH",
verbatim_doc_comment
)]
pub static_files_path: Option<PathBuf>,
}
impl DatadirArgs {
/// Resolves the final datadir path.
pub fn resolve_datadir(self, chain: Chain) -> ChainPath<DataDirPath> {
let datadir = self.datadir.clone();
datadir.unwrap_or_chain_default(chain, self)
}
}
#[cfg(test)]
mod tests {
use super::*;
use clap::Parser;
/// A helper type to parse Args more easily
#[derive(Parser)]
struct CommandParser<T: Args> {
#[command(flatten)]
args: T,
}
#[test]
fn test_parse_datadir_args() {
let default_args = DatadirArgs::default();
let args = CommandParser::<DatadirArgs>::parse_from(["reth"]).args;
assert_eq!(args, default_args);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/core/src/args/stage.rs | crates/node/core/src/args/stage.rs | //! Shared arguments related to stages
use derive_more::Display;
/// Represents a specific stage within the data pipeline.
///
/// Different stages within the pipeline have dedicated functionalities and operations.
#[derive(Debug, Clone, Copy, Eq, PartialEq, PartialOrd, Ord, clap::ValueEnum, Display)]
pub enum StageEnum {
/// The headers stage within the pipeline.
///
/// This stage handles operations related to block headers.
Headers,
/// The bodies stage within the pipeline.
///
/// This stage deals with block bodies and their associated data.
Bodies,
/// The senders stage within the pipeline.
///
/// Responsible for sender-related processes and data recovery.
Senders,
/// The execution stage within the pipeline.
///
/// Handles the execution of transactions and contracts.
Execution,
/// The account hashing stage within the pipeline.
///
/// Manages operations related to hashing account data.
AccountHashing,
/// The storage hashing stage within the pipeline.
///
/// Manages operations related to hashing storage data.
StorageHashing,
/// The account and storage hashing stages within the pipeline.
///
/// Covers general data hashing operations.
Hashing,
/// The merkle stage within the pipeline.
///
/// Handles Merkle tree-related computations and data processing.
Merkle,
/// The transaction lookup stage within the pipeline.
///
/// Deals with the retrieval and processing of transactions.
TxLookup,
/// The account history stage within the pipeline.
///
/// Manages historical data related to accounts.
AccountHistory,
/// The storage history stage within the pipeline.
///
/// Manages historical data related to storage.
StorageHistory,
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/core/src/args/engine.rs | crates/node/core/src/args/engine.rs | //! clap [Args](clap::Args) for engine purposes
use clap::Args;
use reth_engine_primitives::TreeConfig;
use crate::node_config::{
DEFAULT_CROSS_BLOCK_CACHE_SIZE_MB, DEFAULT_MAX_PROOF_TASK_CONCURRENCY,
DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, DEFAULT_PERSISTENCE_THRESHOLD, DEFAULT_RESERVED_CPU_CORES,
};
/// Parameters for configuring the engine driver.
#[derive(Debug, Clone, Args, PartialEq, Eq)]
#[command(next_help_heading = "Engine")]
pub struct EngineArgs {
/// Configure persistence threshold for engine experimental.
#[arg(long = "engine.persistence-threshold", default_value_t = DEFAULT_PERSISTENCE_THRESHOLD)]
pub persistence_threshold: u64,
/// Configure the target number of blocks to keep in memory.
#[arg(long = "engine.memory-block-buffer-target", default_value_t = DEFAULT_MEMORY_BLOCK_BUFFER_TARGET)]
pub memory_block_buffer_target: u64,
/// Enable legacy state root
#[arg(long = "engine.legacy-state-root", default_value = "false")]
pub legacy_state_root_task_enabled: bool,
/// CAUTION: This CLI flag has no effect anymore, use --engine.disable-caching-and-prewarming
/// if you want to disable caching and prewarming
#[arg(long = "engine.caching-and-prewarming", default_value = "true", hide = true)]
#[deprecated]
pub caching_and_prewarming_enabled: bool,
/// Disable cross-block caching and parallel prewarming
#[arg(long = "engine.disable-caching-and-prewarming")]
pub caching_and_prewarming_disabled: bool,
/// CAUTION: This CLI flag has no effect anymore, use --engine.disable-parallel-sparse-trie
/// if you want to disable usage of the `ParallelSparseTrie`.
#[deprecated]
#[arg(long = "engine.parallel-sparse-trie", default_value = "true", hide = true)]
pub parallel_sparse_trie_enabled: bool,
/// Disable the parallel sparse trie in the engine.
#[arg(long = "engine.disable-parallel-sparse-trie", default_value = "false")]
pub parallel_sparse_trie_disabled: bool,
/// Enable state provider latency metrics. This allows the engine to collect and report stats
/// about how long state provider calls took during execution, but this does introduce slight
/// overhead to state provider calls.
#[arg(long = "engine.state-provider-metrics", default_value = "false")]
pub state_provider_metrics: bool,
/// Configure the size of cross-block cache in megabytes
#[arg(long = "engine.cross-block-cache-size", default_value_t = DEFAULT_CROSS_BLOCK_CACHE_SIZE_MB)]
pub cross_block_cache_size: u64,
/// Enable comparing trie updates from the state root task to the trie updates from the regular
/// state root calculation.
#[arg(long = "engine.state-root-task-compare-updates")]
pub state_root_task_compare_updates: bool,
/// Enables accepting requests hash instead of an array of requests in `engine_newPayloadV4`.
#[arg(long = "engine.accept-execution-requests-hash")]
pub accept_execution_requests_hash: bool,
/// Configure the maximum number of concurrent proof tasks
#[arg(long = "engine.max-proof-task-concurrency", default_value_t = DEFAULT_MAX_PROOF_TASK_CONCURRENCY)]
pub max_proof_task_concurrency: u64,
/// Configure the number of reserved CPU cores for non-reth processes
#[arg(long = "engine.reserved-cpu-cores", default_value_t = DEFAULT_RESERVED_CPU_CORES)]
pub reserved_cpu_cores: usize,
/// CAUTION: This CLI flag has no effect anymore, use --engine.disable-precompile-cache
/// if you want to disable precompile cache
#[arg(long = "engine.precompile-cache", default_value = "true", hide = true)]
#[deprecated]
pub precompile_cache_enabled: bool,
/// Disable precompile cache
#[arg(long = "engine.disable-precompile-cache", default_value = "false")]
pub precompile_cache_disabled: bool,
/// Enable state root fallback, useful for testing
#[arg(long = "engine.state-root-fallback", default_value = "false")]
pub state_root_fallback: bool,
/// Always process payload attributes and begin a payload build process even if
/// `forkchoiceState.headBlockHash` is already the canonical head or an ancestor. See
/// `TreeConfig::always_process_payload_attributes_on_canonical_head` for more details.
///
/// Note: This is a no-op on OP Stack.
#[arg(
long = "engine.always-process-payload-attributes-on-canonical-head",
default_value = "false"
)]
pub always_process_payload_attributes_on_canonical_head: bool,
}
#[allow(deprecated)]
impl Default for EngineArgs {
fn default() -> Self {
Self {
persistence_threshold: DEFAULT_PERSISTENCE_THRESHOLD,
memory_block_buffer_target: DEFAULT_MEMORY_BLOCK_BUFFER_TARGET,
legacy_state_root_task_enabled: false,
state_root_task_compare_updates: false,
caching_and_prewarming_enabled: true,
caching_and_prewarming_disabled: false,
parallel_sparse_trie_enabled: true,
parallel_sparse_trie_disabled: false,
state_provider_metrics: false,
cross_block_cache_size: DEFAULT_CROSS_BLOCK_CACHE_SIZE_MB,
accept_execution_requests_hash: false,
max_proof_task_concurrency: DEFAULT_MAX_PROOF_TASK_CONCURRENCY,
reserved_cpu_cores: DEFAULT_RESERVED_CPU_CORES,
precompile_cache_enabled: true,
precompile_cache_disabled: false,
state_root_fallback: false,
always_process_payload_attributes_on_canonical_head: false,
}
}
}
impl EngineArgs {
/// Creates a [`TreeConfig`] from the engine arguments.
pub fn tree_config(&self) -> TreeConfig {
TreeConfig::default()
.with_persistence_threshold(self.persistence_threshold)
.with_memory_block_buffer_target(self.memory_block_buffer_target)
.with_legacy_state_root(self.legacy_state_root_task_enabled)
.without_caching_and_prewarming(self.caching_and_prewarming_disabled)
.with_disable_parallel_sparse_trie(self.parallel_sparse_trie_disabled)
.with_state_provider_metrics(self.state_provider_metrics)
.with_always_compare_trie_updates(self.state_root_task_compare_updates)
.with_cross_block_cache_size(self.cross_block_cache_size * 1024 * 1024)
.with_max_proof_task_concurrency(self.max_proof_task_concurrency)
.with_reserved_cpu_cores(self.reserved_cpu_cores)
.without_precompile_cache(self.precompile_cache_disabled)
.with_state_root_fallback(self.state_root_fallback)
.with_always_process_payload_attributes_on_canonical_head(
self.always_process_payload_attributes_on_canonical_head,
)
}
}
#[cfg(test)]
mod tests {
use super::*;
use clap::Parser;
/// A helper type to parse Args more easily
#[derive(Parser)]
struct CommandParser<T: Args> {
#[command(flatten)]
args: T,
}
#[test]
fn test_parse_engine_args() {
let default_args = EngineArgs::default();
let args = CommandParser::<EngineArgs>::parse_from(["reth"]).args;
assert_eq!(args, default_args);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/core/src/args/network.rs | crates/node/core/src/args/network.rs | //! clap [Args](clap::Args) for network related arguments.
use std::{
net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6},
ops::Not,
path::PathBuf,
};
use crate::version::version_metadata;
use clap::Args;
use reth_chainspec::EthChainSpec;
use reth_config::Config;
use reth_discv4::{NodeRecord, DEFAULT_DISCOVERY_ADDR, DEFAULT_DISCOVERY_PORT};
use reth_discv5::{
discv5::ListenConfig, DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, DEFAULT_DISCOVERY_V5_PORT,
DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL, DEFAULT_SECONDS_LOOKUP_INTERVAL,
};
use reth_net_nat::{NatResolver, DEFAULT_NET_IF_NAME};
use reth_network::{
transactions::{
config::TransactionPropagationKind,
constants::{
tx_fetcher::{
DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH, DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS,
DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS_PER_PEER,
},
tx_manager::{
DEFAULT_MAX_COUNT_PENDING_POOL_IMPORTS, DEFAULT_MAX_COUNT_TRANSACTIONS_SEEN_BY_PEER,
},
},
TransactionFetcherConfig, TransactionPropagationMode, TransactionsManagerConfig,
DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ,
SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE,
},
HelloMessageWithProtocols, NetworkConfigBuilder, NetworkPrimitives, SessionsConfig,
};
use reth_network_peers::{mainnet_nodes, TrustedPeer};
use secp256k1::SecretKey;
use tracing::error;
/// Parameters for configuring the network more granularity via CLI
#[derive(Debug, Clone, Args, PartialEq, Eq)]
#[command(next_help_heading = "Networking")]
pub struct NetworkArgs {
/// Arguments to setup discovery service.
#[command(flatten)]
pub discovery: DiscoveryArgs,
#[expect(clippy::doc_markdown)]
/// Comma separated enode URLs of trusted peers for P2P connections.
///
/// --trusted-peers enode://abcd@192.168.0.1:30303
#[arg(long, value_delimiter = ',')]
pub trusted_peers: Vec<TrustedPeer>,
/// Connect to or accept from trusted peers only
#[arg(long)]
pub trusted_only: bool,
/// Comma separated enode URLs for P2P discovery bootstrap.
///
/// Will fall back to a network-specific default if not specified.
#[arg(long, value_delimiter = ',')]
pub bootnodes: Option<Vec<TrustedPeer>>,
/// Amount of DNS resolution requests retries to perform when peering.
#[arg(long, default_value_t = 0)]
pub dns_retries: usize,
/// The path to the known peers file. Connected peers are dumped to this file on nodes
/// shutdown, and read on startup. Cannot be used with `--no-persist-peers`.
#[arg(long, value_name = "FILE", verbatim_doc_comment, conflicts_with = "no_persist_peers")]
pub peers_file: Option<PathBuf>,
/// Custom node identity
#[arg(long, value_name = "IDENTITY", default_value = version_metadata().p2p_client_version.as_ref())]
pub identity: String,
/// Secret key to use for this node.
///
/// This will also deterministically set the peer ID. If not specified, it will be set in the
/// data dir for the chain being used.
#[arg(long, value_name = "PATH")]
pub p2p_secret_key: Option<PathBuf>,
/// Do not persist peers.
#[arg(long, verbatim_doc_comment)]
pub no_persist_peers: bool,
/// NAT resolution method (any|none|upnp|publicip|extip:\<IP\>)
#[arg(long, default_value = "any")]
pub nat: NatResolver,
/// Network listening address
#[arg(long = "addr", value_name = "ADDR", default_value_t = DEFAULT_DISCOVERY_ADDR)]
pub addr: IpAddr,
/// Network listening port
#[arg(long = "port", value_name = "PORT", default_value_t = DEFAULT_DISCOVERY_PORT)]
pub port: u16,
/// Maximum number of outbound requests. default: 100
#[arg(long)]
pub max_outbound_peers: Option<usize>,
/// Maximum number of inbound requests. default: 30
#[arg(long)]
pub max_inbound_peers: Option<usize>,
/// Max concurrent `GetPooledTransactions` requests.
#[arg(long = "max-tx-reqs", value_name = "COUNT", default_value_t = DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS, verbatim_doc_comment)]
pub max_concurrent_tx_requests: u32,
/// Max concurrent `GetPooledTransactions` requests per peer.
#[arg(long = "max-tx-reqs-peer", value_name = "COUNT", default_value_t = DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS_PER_PEER, verbatim_doc_comment)]
pub max_concurrent_tx_requests_per_peer: u8,
/// Max number of seen transactions to remember per peer.
///
/// Default is 320 transaction hashes.
#[arg(long = "max-seen-tx-history", value_name = "COUNT", default_value_t = DEFAULT_MAX_COUNT_TRANSACTIONS_SEEN_BY_PEER, verbatim_doc_comment)]
pub max_seen_tx_history: u32,
#[arg(long = "max-pending-imports", value_name = "COUNT", default_value_t = DEFAULT_MAX_COUNT_PENDING_POOL_IMPORTS, verbatim_doc_comment)]
/// Max number of transactions to import concurrently.
pub max_pending_pool_imports: usize,
/// Experimental, for usage in research. Sets the max accumulated byte size of transactions
/// to pack in one response.
/// Spec'd at 2MiB.
#[arg(long = "pooled-tx-response-soft-limit", value_name = "BYTES", default_value_t = SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE, verbatim_doc_comment)]
pub soft_limit_byte_size_pooled_transactions_response: usize,
/// Experimental, for usage in research. Sets the max accumulated byte size of transactions to
/// request in one request.
///
/// Since `RLPx` protocol version 68, the byte size of a transaction is shared as metadata in a
/// transaction announcement (see `RLPx` specs). This allows a node to request a specific size
/// response.
///
/// By default, nodes request only 128 KiB worth of transactions, but should a peer request
/// more, up to 2 MiB, a node will answer with more than 128 KiB.
///
/// Default is 128 KiB.
#[arg(long = "pooled-tx-pack-soft-limit", value_name = "BYTES", default_value_t = DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ, verbatim_doc_comment)]
pub soft_limit_byte_size_pooled_transactions_response_on_pack_request: usize,
/// Max capacity of cache of hashes for transactions pending fetch.
#[arg(long = "max-tx-pending-fetch", value_name = "COUNT", default_value_t = DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH, verbatim_doc_comment)]
pub max_capacity_cache_txns_pending_fetch: u32,
/// Name of network interface used to communicate with peers.
///
/// If flag is set, but no value is passed, the default interface for docker `eth0` is tried.
#[arg(long = "net-if.experimental", conflicts_with = "addr", value_name = "IF_NAME")]
pub net_if: Option<String>,
/// Transaction Propagation Policy
///
/// The policy determines which peers transactions are gossiped to.
#[arg(long = "tx-propagation-policy", default_value_t = TransactionPropagationKind::All)]
pub tx_propagation_policy: TransactionPropagationKind,
/// Disable transaction pool gossip
///
/// Disables gossiping of transactions in the mempool to peers. This can be omitted for
/// personal nodes, though providers should always opt to enable this flag.
#[arg(long = "disable-tx-gossip")]
pub disable_tx_gossip: bool,
/// Sets the transaction propagation mode by determining how new pending transactions are
/// propagated to other peers in full.
///
/// Examples: sqrt, all, max:10
#[arg(
long = "tx-propagation-mode",
default_value = "sqrt",
help = "Transaction propagation mode (sqrt, all, max:<number>)"
)]
pub propagation_mode: TransactionPropagationMode,
}
impl NetworkArgs {
/// Returns the resolved IP address.
pub fn resolved_addr(&self) -> IpAddr {
if let Some(ref if_name) = self.net_if {
let if_name = if if_name.is_empty() { DEFAULT_NET_IF_NAME } else { if_name };
return match reth_net_nat::net_if::resolve_net_if_ip(if_name) {
Ok(addr) => addr,
Err(err) => {
error!(target: "reth::cli",
if_name,
%err,
"Failed to read network interface IP"
);
DEFAULT_DISCOVERY_ADDR
}
};
}
self.addr
}
/// Returns the resolved bootnodes if any are provided.
pub fn resolved_bootnodes(&self) -> Option<Vec<NodeRecord>> {
self.bootnodes.clone().map(|bootnodes| {
bootnodes.into_iter().filter_map(|node| node.resolve_blocking().ok()).collect()
})
}
/// Configures and returns a `TransactionsManagerConfig` based on the current settings.
pub const fn transactions_manager_config(&self) -> TransactionsManagerConfig {
TransactionsManagerConfig {
transaction_fetcher_config: TransactionFetcherConfig::new(
self.max_concurrent_tx_requests,
self.max_concurrent_tx_requests_per_peer,
self.soft_limit_byte_size_pooled_transactions_response,
self.soft_limit_byte_size_pooled_transactions_response_on_pack_request,
self.max_capacity_cache_txns_pending_fetch,
),
max_transactions_seen_by_peer_history: self.max_seen_tx_history,
propagation_mode: self.propagation_mode,
}
}
/// Build a [`NetworkConfigBuilder`] from a [`Config`] and a [`EthChainSpec`], in addition to
/// the values in this option struct.
///
/// The `default_peers_file` will be used as the default location to store the persistent peers
/// file if `no_persist_peers` is false, and there is no provided `peers_file`.
///
/// Configured Bootnodes are prioritized, if unset, the chain spec bootnodes are used
/// Priority order for bootnodes configuration:
/// 1. --bootnodes flag
/// 2. Network preset flags (e.g. --holesky)
/// 3. default to mainnet nodes
pub fn network_config<N: NetworkPrimitives>(
&self,
config: &Config,
chain_spec: impl EthChainSpec,
secret_key: SecretKey,
default_peers_file: PathBuf,
) -> NetworkConfigBuilder<N> {
let addr = self.resolved_addr();
let chain_bootnodes = self
.resolved_bootnodes()
.unwrap_or_else(|| chain_spec.bootnodes().unwrap_or_else(mainnet_nodes));
let peers_file = self.peers_file.clone().unwrap_or(default_peers_file);
// Configure peer connections
let peers_config = config
.peers
.clone()
.with_max_inbound_opt(self.max_inbound_peers)
.with_max_outbound_opt(self.max_outbound_peers);
// Configure basic network stack
NetworkConfigBuilder::<N>::new(secret_key)
.peer_config(config.peers_config_with_basic_nodes_from_file(
self.persistent_peers_file(peers_file).as_deref(),
))
.external_ip_resolver(self.nat)
.sessions_config(
SessionsConfig::default().with_upscaled_event_buffer(peers_config.max_peers()),
)
.peer_config(peers_config)
.boot_nodes(chain_bootnodes.clone())
.transactions_manager_config(self.transactions_manager_config())
// Configure node identity
.apply(|builder| {
let peer_id = builder.get_peer_id();
builder.hello_message(
HelloMessageWithProtocols::builder(peer_id)
.client_version(&self.identity)
.build(),
)
})
// apply discovery settings
.apply(|builder| {
let rlpx_socket = (addr, self.port).into();
self.discovery.apply_to_builder(builder, rlpx_socket, chain_bootnodes)
})
.listener_addr(SocketAddr::new(
addr, // set discovery port based on instance number
self.port,
))
.discovery_addr(SocketAddr::new(
self.discovery.addr,
// set discovery port based on instance number
self.discovery.port,
))
.disable_tx_gossip(self.disable_tx_gossip)
}
/// If `no_persist_peers` is false then this returns the path to the persistent peers file path.
pub fn persistent_peers_file(&self, peers_file: PathBuf) -> Option<PathBuf> {
self.no_persist_peers.not().then_some(peers_file)
}
/// Sets the p2p port to zero, to allow the OS to assign a random unused port when
/// the network components bind to a socket.
pub const fn with_unused_p2p_port(mut self) -> Self {
self.port = 0;
self
}
/// Sets the p2p and discovery ports to zero, allowing the OD to assign a random unused port
/// when network components bind to sockets.
pub const fn with_unused_ports(mut self) -> Self {
self = self.with_unused_p2p_port();
self.discovery = self.discovery.with_unused_discovery_port();
self
}
/// Change networking port numbers based on the instance number, if provided.
/// Ports are updated to `previous_value + instance - 1`
///
/// # Panics
/// Warning: if `instance` is zero in debug mode, this will panic.
pub fn adjust_instance_ports(&mut self, instance: Option<u16>) {
if let Some(instance) = instance {
debug_assert_ne!(instance, 0, "instance must be non-zero");
self.port += instance - 1;
self.discovery.adjust_instance_ports(instance);
}
}
/// Resolve all trusted peers at once
pub async fn resolve_trusted_peers(&self) -> Result<Vec<NodeRecord>, std::io::Error> {
futures::future::try_join_all(
self.trusted_peers.iter().map(|peer| async move { peer.resolve().await }),
)
.await
}
}
impl Default for NetworkArgs {
fn default() -> Self {
Self {
discovery: DiscoveryArgs::default(),
trusted_peers: vec![],
trusted_only: false,
bootnodes: None,
dns_retries: 0,
peers_file: None,
identity: version_metadata().p2p_client_version.to_string(),
p2p_secret_key: None,
no_persist_peers: false,
nat: NatResolver::Any,
addr: DEFAULT_DISCOVERY_ADDR,
port: DEFAULT_DISCOVERY_PORT,
max_outbound_peers: None,
max_inbound_peers: None,
max_concurrent_tx_requests: DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS,
max_concurrent_tx_requests_per_peer: DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS_PER_PEER,
soft_limit_byte_size_pooled_transactions_response:
SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE,
soft_limit_byte_size_pooled_transactions_response_on_pack_request: DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ,
max_pending_pool_imports: DEFAULT_MAX_COUNT_PENDING_POOL_IMPORTS,
max_seen_tx_history: DEFAULT_MAX_COUNT_TRANSACTIONS_SEEN_BY_PEER,
max_capacity_cache_txns_pending_fetch: DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH,
net_if: None,
tx_propagation_policy: TransactionPropagationKind::default(),
disable_tx_gossip: false,
propagation_mode: TransactionPropagationMode::Sqrt,
}
}
}
/// Arguments to setup discovery
#[derive(Debug, Clone, Args, PartialEq, Eq)]
pub struct DiscoveryArgs {
/// Disable the discovery service.
#[arg(short, long, default_value_if("dev", "true", "true"))]
pub disable_discovery: bool,
/// Disable the DNS discovery.
#[arg(long, conflicts_with = "disable_discovery")]
pub disable_dns_discovery: bool,
/// Disable Discv4 discovery.
#[arg(long, conflicts_with = "disable_discovery")]
pub disable_discv4_discovery: bool,
/// Enable Discv5 discovery.
#[arg(long, conflicts_with = "disable_discovery")]
pub enable_discv5_discovery: bool,
/// Disable Nat discovery.
#[arg(long, conflicts_with = "disable_discovery")]
pub disable_nat: bool,
/// The UDP address to use for devp2p peer discovery version 4.
#[arg(id = "discovery.addr", long = "discovery.addr", value_name = "DISCOVERY_ADDR", default_value_t = DEFAULT_DISCOVERY_ADDR)]
pub addr: IpAddr,
/// The UDP port to use for devp2p peer discovery version 4.
#[arg(id = "discovery.port", long = "discovery.port", value_name = "DISCOVERY_PORT", default_value_t = DEFAULT_DISCOVERY_PORT)]
pub port: u16,
/// The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by `RLPx`
/// address, if it's also IPv4.
#[arg(id = "discovery.v5.addr", long = "discovery.v5.addr", value_name = "DISCOVERY_V5_ADDR", default_value = None)]
pub discv5_addr: Option<Ipv4Addr>,
/// The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by `RLPx`
/// address, if it's also IPv6.
#[arg(id = "discovery.v5.addr.ipv6", long = "discovery.v5.addr.ipv6", value_name = "DISCOVERY_V5_ADDR_IPV6", default_value = None)]
pub discv5_addr_ipv6: Option<Ipv6Addr>,
/// The UDP IPv4 port to use for devp2p peer discovery version 5. Not used unless `--addr` is
/// IPv4, or `--discovery.v5.addr` is set.
#[arg(id = "discovery.v5.port", long = "discovery.v5.port", value_name = "DISCOVERY_V5_PORT",
default_value_t = DEFAULT_DISCOVERY_V5_PORT)]
pub discv5_port: u16,
/// The UDP IPv6 port to use for devp2p peer discovery version 5. Not used unless `--addr` is
/// IPv6, or `--discovery.addr.ipv6` is set.
#[arg(id = "discovery.v5.port.ipv6", long = "discovery.v5.port.ipv6", value_name = "DISCOVERY_V5_PORT_IPV6",
default_value = None, default_value_t = DEFAULT_DISCOVERY_V5_PORT)]
pub discv5_port_ipv6: u16,
/// The interval in seconds at which to carry out periodic lookup queries, for the whole
/// run of the program.
#[arg(id = "discovery.v5.lookup-interval", long = "discovery.v5.lookup-interval", value_name = "DISCOVERY_V5_LOOKUP_INTERVAL", default_value_t = DEFAULT_SECONDS_LOOKUP_INTERVAL)]
pub discv5_lookup_interval: u64,
/// The interval in seconds at which to carry out boost lookup queries, for a fixed number of
/// times, at bootstrap.
#[arg(id = "discovery.v5.bootstrap.lookup-interval", long = "discovery.v5.bootstrap.lookup-interval", value_name = "DISCOVERY_V5_BOOTSTRAP_LOOKUP_INTERVAL",
default_value_t = DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL)]
pub discv5_bootstrap_lookup_interval: u64,
/// The number of times to carry out boost lookup queries at bootstrap.
#[arg(id = "discovery.v5.bootstrap.lookup-countdown", long = "discovery.v5.bootstrap.lookup-countdown", value_name = "DISCOVERY_V5_BOOTSTRAP_LOOKUP_COUNTDOWN",
default_value_t = DEFAULT_COUNT_BOOTSTRAP_LOOKUPS)]
pub discv5_bootstrap_lookup_countdown: u64,
}
impl DiscoveryArgs {
/// Apply the discovery settings to the given [`NetworkConfigBuilder`]
pub fn apply_to_builder<N>(
&self,
mut network_config_builder: NetworkConfigBuilder<N>,
rlpx_tcp_socket: SocketAddr,
boot_nodes: impl IntoIterator<Item = NodeRecord>,
) -> NetworkConfigBuilder<N>
where
N: NetworkPrimitives,
{
if self.disable_discovery || self.disable_dns_discovery {
network_config_builder = network_config_builder.disable_dns_discovery();
}
if self.disable_discovery || self.disable_discv4_discovery {
network_config_builder = network_config_builder.disable_discv4_discovery();
}
if self.disable_nat {
// we only check for `disable-nat` here and not for disable discovery because nat:extip can be used without discovery: <https://github.com/paradigmxyz/reth/issues/14878>
network_config_builder = network_config_builder.disable_nat();
}
if self.should_enable_discv5() {
network_config_builder = network_config_builder
.discovery_v5(self.discovery_v5_builder(rlpx_tcp_socket, boot_nodes));
}
network_config_builder
}
/// Creates a [`reth_discv5::ConfigBuilder`] filling it with the values from this struct.
pub fn discovery_v5_builder(
&self,
rlpx_tcp_socket: SocketAddr,
boot_nodes: impl IntoIterator<Item = NodeRecord>,
) -> reth_discv5::ConfigBuilder {
let Self {
discv5_addr,
discv5_addr_ipv6,
discv5_port,
discv5_port_ipv6,
discv5_lookup_interval,
discv5_bootstrap_lookup_interval,
discv5_bootstrap_lookup_countdown,
..
} = self;
// Use rlpx address if none given
let discv5_addr_ipv4 = discv5_addr.or(match rlpx_tcp_socket {
SocketAddr::V4(addr) => Some(*addr.ip()),
SocketAddr::V6(_) => None,
});
let discv5_addr_ipv6 = discv5_addr_ipv6.or(match rlpx_tcp_socket {
SocketAddr::V4(_) => None,
SocketAddr::V6(addr) => Some(*addr.ip()),
});
reth_discv5::Config::builder(rlpx_tcp_socket)
.discv5_config(
reth_discv5::discv5::ConfigBuilder::new(ListenConfig::from_two_sockets(
discv5_addr_ipv4.map(|addr| SocketAddrV4::new(addr, *discv5_port)),
discv5_addr_ipv6.map(|addr| SocketAddrV6::new(addr, *discv5_port_ipv6, 0, 0)),
))
.build(),
)
.add_unsigned_boot_nodes(boot_nodes)
.lookup_interval(*discv5_lookup_interval)
.bootstrap_lookup_interval(*discv5_bootstrap_lookup_interval)
.bootstrap_lookup_countdown(*discv5_bootstrap_lookup_countdown)
}
/// Returns true if discv5 discovery should be configured
const fn should_enable_discv5(&self) -> bool {
if self.disable_discovery {
return false;
}
self.enable_discv5_discovery ||
self.discv5_addr.is_some() ||
self.discv5_addr_ipv6.is_some()
}
/// Set the discovery port to zero, to allow the OS to assign a random unused port when
/// discovery binds to the socket.
pub const fn with_unused_discovery_port(mut self) -> Self {
self.port = 0;
self
}
/// Change networking port numbers based on the instance number.
/// Ports are updated to `previous_value + instance - 1`
///
/// # Panics
/// Warning: if `instance` is zero in debug mode, this will panic.
pub fn adjust_instance_ports(&mut self, instance: u16) {
debug_assert_ne!(instance, 0, "instance must be non-zero");
self.port += instance - 1;
self.discv5_port += instance - 1;
self.discv5_port_ipv6 += instance - 1;
}
}
impl Default for DiscoveryArgs {
fn default() -> Self {
Self {
disable_discovery: false,
disable_dns_discovery: false,
disable_discv4_discovery: false,
enable_discv5_discovery: false,
disable_nat: false,
addr: DEFAULT_DISCOVERY_ADDR,
port: DEFAULT_DISCOVERY_PORT,
discv5_addr: None,
discv5_addr_ipv6: None,
discv5_port: DEFAULT_DISCOVERY_V5_PORT,
discv5_port_ipv6: DEFAULT_DISCOVERY_V5_PORT,
discv5_lookup_interval: DEFAULT_SECONDS_LOOKUP_INTERVAL,
discv5_bootstrap_lookup_interval: DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL,
discv5_bootstrap_lookup_countdown: DEFAULT_COUNT_BOOTSTRAP_LOOKUPS,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use clap::Parser;
/// A helper type to parse Args more easily
#[derive(Parser)]
struct CommandParser<T: Args> {
#[command(flatten)]
args: T,
}
#[test]
fn parse_nat_args() {
let args = CommandParser::<NetworkArgs>::parse_from(["reth", "--nat", "none"]).args;
assert_eq!(args.nat, NatResolver::None);
let args =
CommandParser::<NetworkArgs>::parse_from(["reth", "--nat", "extip:0.0.0.0"]).args;
assert_eq!(args.nat, NatResolver::ExternalIp("0.0.0.0".parse().unwrap()));
}
#[test]
fn parse_peer_args() {
let args =
CommandParser::<NetworkArgs>::parse_from(["reth", "--max-outbound-peers", "50"]).args;
assert_eq!(args.max_outbound_peers, Some(50));
assert_eq!(args.max_inbound_peers, None);
let args = CommandParser::<NetworkArgs>::parse_from([
"reth",
"--max-outbound-peers",
"75",
"--max-inbound-peers",
"15",
])
.args;
assert_eq!(args.max_outbound_peers, Some(75));
assert_eq!(args.max_inbound_peers, Some(15));
}
#[test]
fn parse_trusted_peer_args() {
let args =
CommandParser::<NetworkArgs>::parse_from([
"reth",
"--trusted-peers",
"enode://d860a01f9722d78051619d1e2351aba3f43f943f6f00718d1b9baa4101932a1f5011f16bb2b1bb35db20d6fe28fa0bf09636d26a87d31de9ec6203eeedb1f666@18.138.108.67:30303,enode://22a8232c3abc76a16ae9d6c3b164f98775fe226f0917b0ca871128a74a8e9630b458460865bab457221f1d448dd9791d24c4e5d88786180ac185df813a68d4de@3.209.45.79:30303"
])
.args;
assert_eq!(
args.trusted_peers,
vec![
"enode://d860a01f9722d78051619d1e2351aba3f43f943f6f00718d1b9baa4101932a1f5011f16bb2b1bb35db20d6fe28fa0bf09636d26a87d31de9ec6203eeedb1f666@18.138.108.67:30303".parse().unwrap(),
"enode://22a8232c3abc76a16ae9d6c3b164f98775fe226f0917b0ca871128a74a8e9630b458460865bab457221f1d448dd9791d24c4e5d88786180ac185df813a68d4de@3.209.45.79:30303".parse().unwrap()
]
);
}
#[test]
fn parse_retry_strategy_args() {
let tests = vec![0, 10];
for retries in tests {
let args = CommandParser::<NetworkArgs>::parse_from([
"reth",
"--dns-retries",
retries.to_string().as_str(),
])
.args;
assert_eq!(args.dns_retries, retries);
}
}
#[test]
fn parse_disable_tx_gossip_args() {
let args = CommandParser::<NetworkArgs>::parse_from(["reth", "--disable-tx-gossip"]).args;
assert!(args.disable_tx_gossip);
}
#[test]
fn network_args_default_sanity_test() {
let default_args = NetworkArgs::default();
let args = CommandParser::<NetworkArgs>::parse_from(["reth"]).args;
assert_eq!(args, default_args);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/core/src/args/gas_price_oracle.rs | crates/node/core/src/args/gas_price_oracle.rs | use alloy_primitives::U256;
use clap::Args;
use reth_rpc_eth_types::GasPriceOracleConfig;
use reth_rpc_server_types::constants::gas_oracle::{
DEFAULT_GAS_PRICE_BLOCKS, DEFAULT_GAS_PRICE_PERCENTILE, DEFAULT_IGNORE_GAS_PRICE,
DEFAULT_MAX_GAS_PRICE,
};
/// Parameters to configure Gas Price Oracle
#[derive(Debug, Clone, Copy, Args, PartialEq, Eq)]
#[command(next_help_heading = "Gas Price Oracle")]
pub struct GasPriceOracleArgs {
/// Number of recent blocks to check for gas price
#[arg(long = "gpo.blocks", default_value_t = DEFAULT_GAS_PRICE_BLOCKS)]
pub blocks: u32,
/// Gas Price below which gpo will ignore transactions
#[arg(long = "gpo.ignoreprice", default_value_t = DEFAULT_IGNORE_GAS_PRICE.to())]
pub ignore_price: u64,
/// Maximum transaction priority fee(or gasprice before London Fork) to be recommended by gpo
#[arg(long = "gpo.maxprice", default_value_t = DEFAULT_MAX_GAS_PRICE.to())]
pub max_price: u64,
/// The percentile of gas prices to use for the estimate
#[arg(long = "gpo.percentile", default_value_t = DEFAULT_GAS_PRICE_PERCENTILE)]
pub percentile: u32,
/// The default gas price to use if there are no blocks to use
#[arg(long = "gpo.default-suggested-fee")]
pub default_suggested_fee: Option<U256>,
}
impl GasPriceOracleArgs {
/// Returns a [`GasPriceOracleConfig`] from the arguments.
pub fn gas_price_oracle_config(&self) -> GasPriceOracleConfig {
let Self { blocks, ignore_price, max_price, percentile, default_suggested_fee } = self;
GasPriceOracleConfig {
max_price: Some(U256::from(*max_price)),
ignore_price: Some(U256::from(*ignore_price)),
percentile: *percentile,
blocks: *blocks,
default_suggested_fee: *default_suggested_fee,
..Default::default()
}
}
}
impl Default for GasPriceOracleArgs {
fn default() -> Self {
Self {
blocks: DEFAULT_GAS_PRICE_BLOCKS,
ignore_price: DEFAULT_IGNORE_GAS_PRICE.to(),
max_price: DEFAULT_MAX_GAS_PRICE.to(),
percentile: DEFAULT_GAS_PRICE_PERCENTILE,
default_suggested_fee: None,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use clap::Parser;
/// A helper type to parse Args more easily
#[derive(Parser)]
struct CommandParser<T: Args> {
#[command(flatten)]
args: T,
}
#[test]
fn test_parse_gpo_args() {
let args = CommandParser::<GasPriceOracleArgs>::parse_from(["reth"]).args;
assert_eq!(
args,
GasPriceOracleArgs {
blocks: DEFAULT_GAS_PRICE_BLOCKS,
ignore_price: DEFAULT_IGNORE_GAS_PRICE.to(),
max_price: DEFAULT_MAX_GAS_PRICE.to(),
percentile: DEFAULT_GAS_PRICE_PERCENTILE,
default_suggested_fee: None,
}
);
}
#[test]
fn gpo_args_default_sanity_test() {
let default_args = GasPriceOracleArgs::default();
let args = CommandParser::<GasPriceOracleArgs>::parse_from(["reth"]).args;
assert_eq!(args, default_args);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/core/src/args/benchmark_args.rs | crates/node/core/src/args/benchmark_args.rs | //! clap [Args](clap::Args) for benchmark configuration
use clap::Args;
use std::path::PathBuf;
/// Parameters for benchmark configuration
#[derive(Debug, Args, PartialEq, Eq, Default, Clone)]
#[command(next_help_heading = "Benchmark")]
pub struct BenchmarkArgs {
/// Run the benchmark from a specific block.
#[arg(long, verbatim_doc_comment)]
pub from: Option<u64>,
/// Run the benchmark to a specific block.
#[arg(long, verbatim_doc_comment)]
pub to: Option<u64>,
/// Number of blocks to advance from the current head block.
/// When specified, automatically sets --from to current head + 1 and --to to current head +
/// advance. Cannot be used together with explicit --from and --to arguments.
#[arg(long, conflicts_with_all = &["from", "to"], verbatim_doc_comment)]
pub advance: Option<u64>,
/// Path to a JWT secret to use for the authenticated engine-API RPC server.
///
/// This will perform JWT authentication for all requests to the given engine RPC url.
///
/// If no path is provided, a secret will be generated and stored in the datadir under
/// `<DIR>/<CHAIN_ID>/jwt.hex`. For mainnet this would be `~/.reth/mainnet/jwt.hex` by default.
#[arg(
long = "jwt-secret",
alias = "jwtsecret",
value_name = "PATH",
global = true,
required = false
)]
pub auth_jwtsecret: Option<PathBuf>,
/// The RPC url to use for sending engine requests.
#[arg(
long,
value_name = "ENGINE_RPC_URL",
verbatim_doc_comment,
default_value = "http://localhost:8551"
)]
pub engine_rpc_url: String,
/// The path to the output directory for granular benchmark results.
#[arg(long, short, value_name = "BENCHMARK_OUTPUT", verbatim_doc_comment)]
pub output: Option<PathBuf>,
}
#[cfg(test)]
mod tests {
use super::*;
use clap::Parser;
/// A helper type to parse Args more easily
#[derive(Parser)]
struct CommandParser<T: Args> {
#[command(flatten)]
args: T,
}
#[test]
fn test_parse_benchmark_args() {
let default_args = BenchmarkArgs {
engine_rpc_url: "http://localhost:8551".to_string(),
..Default::default()
};
let args = CommandParser::<BenchmarkArgs>::parse_from(["reth-bench"]).args;
assert_eq!(args, default_args);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/core/src/args/log.rs | crates/node/core/src/args/log.rs | //! clap [Args](clap::Args) for logging configuration.
use crate::dirs::{LogsDir, PlatformPath};
use clap::{ArgAction, Args, ValueEnum};
use reth_tracing::{
tracing_subscriber::filter::Directive, FileInfo, FileWorkerGuard, LayerInfo, Layers, LogFormat,
RethTracer, Tracer,
};
use std::{fmt, fmt::Display};
use tracing::{level_filters::LevelFilter, Level};
/// Constant to convert megabytes to bytes
const MB_TO_BYTES: u64 = 1024 * 1024;
/// The log configuration.
#[derive(Debug, Args)]
#[command(next_help_heading = "Logging")]
pub struct LogArgs {
/// The format to use for logs written to stdout.
#[arg(long = "log.stdout.format", value_name = "FORMAT", global = true, default_value_t = LogFormat::Terminal)]
pub log_stdout_format: LogFormat,
/// The filter to use for logs written to stdout.
#[arg(long = "log.stdout.filter", value_name = "FILTER", global = true, default_value = "")]
pub log_stdout_filter: String,
/// The format to use for logs written to the log file.
#[arg(long = "log.file.format", value_name = "FORMAT", global = true, default_value_t = LogFormat::Terminal)]
pub log_file_format: LogFormat,
/// The filter to use for logs written to the log file.
#[arg(long = "log.file.filter", value_name = "FILTER", global = true, default_value = "debug")]
pub log_file_filter: String,
/// The path to put log files in.
#[arg(long = "log.file.directory", value_name = "PATH", global = true, default_value_t)]
pub log_file_directory: PlatformPath<LogsDir>,
/// The prefix name of the log files.
#[arg(long = "log.file.name", value_name = "NAME", global = true, default_value = "reth.log")]
pub log_file_name: String,
/// The maximum size (in MB) of one log file.
#[arg(long = "log.file.max-size", value_name = "SIZE", global = true, default_value_t = 200)]
pub log_file_max_size: u64,
/// The maximum amount of log files that will be stored. If set to 0, background file logging
/// is disabled.
#[arg(long = "log.file.max-files", value_name = "COUNT", global = true, default_value_t = 5)]
pub log_file_max_files: usize,
/// Write logs to journald.
#[arg(long = "log.journald", global = true)]
pub journald: bool,
/// The filter to use for logs written to journald.
#[arg(
long = "log.journald.filter",
value_name = "FILTER",
global = true,
default_value = "error"
)]
pub journald_filter: String,
/// Sets whether or not the formatter emits ANSI terminal escape codes for colors and other
/// text formatting.
#[arg(
long,
value_name = "COLOR",
global = true,
default_value_t = ColorMode::Always
)]
pub color: ColorMode,
/// The verbosity settings for the tracer.
#[command(flatten)]
pub verbosity: Verbosity,
}
impl LogArgs {
/// Creates a [`LayerInfo`] instance.
fn layer_info(&self, format: LogFormat, filter: String, use_color: bool) -> LayerInfo {
LayerInfo::new(
format,
self.verbosity.directive().to_string(),
filter,
use_color.then(|| self.color.to_string()),
)
}
/// File info from the current log options.
fn file_info(&self) -> FileInfo {
FileInfo::new(
self.log_file_directory.clone().into(),
self.log_file_name.clone(),
self.log_file_max_size * MB_TO_BYTES,
self.log_file_max_files,
)
}
/// Initializes tracing with the configured options from cli args.
///
/// Uses default layers for tracing. If you need to include custom layers,
/// use `init_tracing_with_layers` instead.
///
/// Returns the file worker guard if a file worker was configured.
pub fn init_tracing(&self) -> eyre::Result<Option<FileWorkerGuard>> {
self.init_tracing_with_layers(Layers::new())
}
/// Initializes tracing with the configured options from cli args.
///
/// Returns the file worker guard, and the file name, if a file worker was configured.
pub fn init_tracing_with_layers(
&self,
layers: Layers,
) -> eyre::Result<Option<FileWorkerGuard>> {
let mut tracer = RethTracer::new();
let stdout = self.layer_info(self.log_stdout_format, self.log_stdout_filter.clone(), true);
tracer = tracer.with_stdout(stdout);
if self.journald {
tracer = tracer.with_journald(self.journald_filter.clone());
}
if self.log_file_max_files > 0 {
let info = self.file_info();
let file = self.layer_info(self.log_file_format, self.log_file_filter.clone(), false);
tracer = tracer.with_file(file, info);
}
let guard = tracer.init_with_layers(layers)?;
Ok(guard)
}
}
/// The color mode for the cli.
#[derive(Debug, Copy, Clone, ValueEnum, Eq, PartialEq)]
pub enum ColorMode {
/// Colors on
Always,
/// Colors on
Auto,
/// Colors off
Never,
}
impl Display for ColorMode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Always => write!(f, "always"),
Self::Auto => write!(f, "auto"),
Self::Never => write!(f, "never"),
}
}
}
/// The verbosity settings for the cli.
#[derive(Debug, Copy, Clone, Args)]
#[command(next_help_heading = "Display")]
pub struct Verbosity {
/// Set the minimum log level.
///
/// -v Errors
/// -vv Warnings
/// -vvv Info
/// -vvvv Debug
/// -vvvvv Traces (warning: very verbose!)
#[arg(short, long, action = ArgAction::Count, global = true, default_value_t = 3, verbatim_doc_comment, help_heading = "Display")]
verbosity: u8,
/// Silence all log output.
#[arg(long, alias = "silent", short = 'q', global = true, help_heading = "Display")]
quiet: bool,
}
impl Verbosity {
/// Get the corresponding [Directive] for the given verbosity, or none if the verbosity
/// corresponds to silent.
pub fn directive(&self) -> Directive {
if self.quiet {
LevelFilter::OFF.into()
} else {
let level = match self.verbosity - 1 {
0 => Level::ERROR,
1 => Level::WARN,
2 => Level::INFO,
3 => Level::DEBUG,
_ => Level::TRACE,
};
level.into()
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/core/src/args/database.rs | crates/node/core/src/args/database.rs | //! clap [Args](clap::Args) for database configuration
use std::{fmt, str::FromStr, time::Duration};
use crate::version::default_client_version;
use clap::{
builder::{PossibleValue, TypedValueParser},
error::ErrorKind,
Arg, Args, Command, Error,
};
use reth_db::{mdbx::MaxReadTransactionDuration, ClientVersion};
use reth_storage_errors::db::LogLevel;
/// Parameters for database configuration
#[derive(Debug, Args, PartialEq, Eq, Default, Clone, Copy)]
#[command(next_help_heading = "Database")]
pub struct DatabaseArgs {
/// Database logging level. Levels higher than "notice" require a debug build.
#[arg(long = "db.log-level", value_parser = LogLevelValueParser::default())]
pub log_level: Option<LogLevel>,
/// Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an
/// NFS volume.
#[arg(long = "db.exclusive")]
pub exclusive: Option<bool>,
/// Maximum database size (e.g., 4TB, 8MB)
#[arg(long = "db.max-size", value_parser = parse_byte_size)]
pub max_size: Option<usize>,
/// Database growth step (e.g., 4GB, 4KB)
#[arg(long = "db.growth-step", value_parser = parse_byte_size)]
pub growth_step: Option<usize>,
/// Read transaction timeout in seconds, 0 means no timeout.
#[arg(long = "db.read-transaction-timeout")]
pub read_transaction_timeout: Option<u64>,
/// Maximum number of readers allowed to access the database concurrently.
#[arg(long = "db.max-readers")]
pub max_readers: Option<u64>,
}
impl DatabaseArgs {
/// Returns default database arguments with configured log level and client version.
pub fn database_args(&self) -> reth_db::mdbx::DatabaseArguments {
self.get_database_args(default_client_version())
}
/// Returns the database arguments with configured log level, client version,
/// max read transaction duration, and geometry.
pub fn get_database_args(
&self,
client_version: ClientVersion,
) -> reth_db::mdbx::DatabaseArguments {
let max_read_transaction_duration = match self.read_transaction_timeout {
None => None, // if not specified, use default value
Some(0) => Some(MaxReadTransactionDuration::Unbounded), // if 0, disable timeout
Some(secs) => Some(MaxReadTransactionDuration::Set(Duration::from_secs(secs))),
};
reth_db::mdbx::DatabaseArguments::new(client_version)
.with_log_level(self.log_level)
.with_exclusive(self.exclusive)
.with_max_read_transaction_duration(max_read_transaction_duration)
.with_geometry_max_size(self.max_size)
.with_growth_step(self.growth_step)
.with_max_readers(self.max_readers)
}
}
/// clap value parser for [`LogLevel`].
#[derive(Clone, Debug, Default)]
#[non_exhaustive]
struct LogLevelValueParser;
impl TypedValueParser for LogLevelValueParser {
type Value = LogLevel;
fn parse_ref(
&self,
_cmd: &Command,
arg: Option<&Arg>,
value: &std::ffi::OsStr,
) -> Result<Self::Value, Error> {
let val =
value.to_str().ok_or_else(|| Error::raw(ErrorKind::InvalidUtf8, "Invalid UTF-8"))?;
val.parse::<LogLevel>().map_err(|err| {
let arg = arg.map(|a| a.to_string()).unwrap_or_else(|| "...".to_owned());
let possible_values = LogLevel::value_variants()
.iter()
.map(|v| format!("- {:?}: {}", v, v.help_message()))
.collect::<Vec<_>>()
.join("\n");
let msg = format!(
"Invalid value '{val}' for {arg}: {err}.\n Possible values:\n{possible_values}"
);
clap::Error::raw(clap::error::ErrorKind::InvalidValue, msg)
})
}
fn possible_values(&self) -> Option<Box<dyn Iterator<Item = PossibleValue> + '_>> {
let values = LogLevel::value_variants()
.iter()
.map(|v| PossibleValue::new(v.variant_name()).help(v.help_message()));
Some(Box::new(values))
}
}
/// Size in bytes.
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
pub struct ByteSize(pub usize);
impl From<ByteSize> for usize {
fn from(s: ByteSize) -> Self {
s.0
}
}
impl FromStr for ByteSize {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let s = s.trim().to_uppercase();
let parts: Vec<&str> = s.split_whitespace().collect();
let (num_str, unit) = match parts.len() {
1 => {
let (num, unit) =
s.split_at(s.find(|c: char| c.is_alphabetic()).unwrap_or(s.len()));
(num, unit)
}
2 => (parts[0], parts[1]),
_ => {
return Err("Invalid format. Use '<number><unit>' or '<number> <unit>'.".to_string())
}
};
let num: usize = num_str.parse().map_err(|_| "Invalid number".to_string())?;
let multiplier = match unit {
"B" | "" => 1, // Assume bytes if no unit is specified
"KB" => 1024,
"MB" => 1024 * 1024,
"GB" => 1024 * 1024 * 1024,
"TB" => 1024 * 1024 * 1024 * 1024,
_ => return Err(format!("Invalid unit: {unit}. Use B, KB, MB, GB, or TB.")),
};
Ok(Self(num * multiplier))
}
}
impl fmt::Display for ByteSize {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
const KB: usize = 1024;
const MB: usize = KB * 1024;
const GB: usize = MB * 1024;
const TB: usize = GB * 1024;
let (size, unit) = if self.0 >= TB {
(self.0 as f64 / TB as f64, "TB")
} else if self.0 >= GB {
(self.0 as f64 / GB as f64, "GB")
} else if self.0 >= MB {
(self.0 as f64 / MB as f64, "MB")
} else if self.0 >= KB {
(self.0 as f64 / KB as f64, "KB")
} else {
(self.0 as f64, "B")
};
write!(f, "{size:.2}{unit}")
}
}
/// Value parser function that supports various formats.
fn parse_byte_size(s: &str) -> Result<usize, String> {
s.parse::<ByteSize>().map(Into::into)
}
#[cfg(test)]
mod tests {
use super::*;
use clap::Parser;
use reth_db::mdbx::{GIGABYTE, KILOBYTE, MEGABYTE, TERABYTE};
/// A helper type to parse Args more easily
#[derive(Parser)]
struct CommandParser<T: Args> {
#[command(flatten)]
args: T,
}
#[test]
fn test_default_database_args() {
let default_args = DatabaseArgs::default();
let args = CommandParser::<DatabaseArgs>::parse_from(["reth"]).args;
assert_eq!(args, default_args);
}
#[test]
fn test_command_parser_with_valid_max_size() {
let cmd = CommandParser::<DatabaseArgs>::try_parse_from([
"reth",
"--db.max-size",
"4398046511104",
])
.unwrap();
assert_eq!(cmd.args.max_size, Some(TERABYTE * 4));
}
#[test]
fn test_command_parser_with_invalid_max_size() {
let result =
CommandParser::<DatabaseArgs>::try_parse_from(["reth", "--db.max-size", "invalid"]);
assert!(result.is_err());
}
#[test]
fn test_command_parser_with_valid_growth_step() {
let cmd = CommandParser::<DatabaseArgs>::try_parse_from([
"reth",
"--db.growth-step",
"4294967296",
])
.unwrap();
assert_eq!(cmd.args.growth_step, Some(GIGABYTE * 4));
}
#[test]
fn test_command_parser_with_invalid_growth_step() {
let result =
CommandParser::<DatabaseArgs>::try_parse_from(["reth", "--db.growth-step", "invalid"]);
assert!(result.is_err());
}
#[test]
fn test_command_parser_with_valid_max_size_and_growth_step_from_str() {
let cmd = CommandParser::<DatabaseArgs>::try_parse_from([
"reth",
"--db.max-size",
"2TB",
"--db.growth-step",
"1GB",
])
.unwrap();
assert_eq!(cmd.args.max_size, Some(TERABYTE * 2));
assert_eq!(cmd.args.growth_step, Some(GIGABYTE));
let cmd = CommandParser::<DatabaseArgs>::try_parse_from([
"reth",
"--db.max-size",
"12MB",
"--db.growth-step",
"2KB",
])
.unwrap();
assert_eq!(cmd.args.max_size, Some(MEGABYTE * 12));
assert_eq!(cmd.args.growth_step, Some(KILOBYTE * 2));
// with spaces
let cmd = CommandParser::<DatabaseArgs>::try_parse_from([
"reth",
"--db.max-size",
"12 MB",
"--db.growth-step",
"2 KB",
])
.unwrap();
assert_eq!(cmd.args.max_size, Some(MEGABYTE * 12));
assert_eq!(cmd.args.growth_step, Some(KILOBYTE * 2));
let cmd = CommandParser::<DatabaseArgs>::try_parse_from([
"reth",
"--db.max-size",
"1073741824",
"--db.growth-step",
"1048576",
])
.unwrap();
assert_eq!(cmd.args.max_size, Some(GIGABYTE));
assert_eq!(cmd.args.growth_step, Some(MEGABYTE));
}
#[test]
fn test_command_parser_max_size_and_growth_step_from_str_invalid_unit() {
let result =
CommandParser::<DatabaseArgs>::try_parse_from(["reth", "--db.growth-step", "1 PB"]);
assert!(result.is_err());
let result =
CommandParser::<DatabaseArgs>::try_parse_from(["reth", "--db.max-size", "2PB"]);
assert!(result.is_err());
}
#[test]
fn test_possible_values() {
// Initialize the LogLevelValueParser
let parser = LogLevelValueParser;
// Call the possible_values method
let possible_values: Vec<PossibleValue> = parser.possible_values().unwrap().collect();
// Expected possible values
let expected_values = vec![
PossibleValue::new("fatal")
.help("Enables logging for critical conditions, i.e. assertion failures"),
PossibleValue::new("error").help("Enables logging for error conditions"),
PossibleValue::new("warn").help("Enables logging for warning conditions"),
PossibleValue::new("notice")
.help("Enables logging for normal but significant condition"),
PossibleValue::new("verbose").help("Enables logging for verbose informational"),
PossibleValue::new("debug").help("Enables logging for debug-level messages"),
PossibleValue::new("trace").help("Enables logging for trace debug-level messages"),
PossibleValue::new("extra").help("Enables logging for extra debug-level messages"),
];
// Check that the possible values match the expected values
assert_eq!(possible_values.len(), expected_values.len());
for (actual, expected) in possible_values.iter().zip(expected_values.iter()) {
assert_eq!(actual.get_name(), expected.get_name());
assert_eq!(actual.get_help(), expected.get_help());
}
}
#[test]
fn test_command_parser_with_valid_log_level() {
let cmd =
CommandParser::<DatabaseArgs>::try_parse_from(["reth", "--db.log-level", "Debug"])
.unwrap();
assert_eq!(cmd.args.log_level, Some(LogLevel::Debug));
}
#[test]
fn test_command_parser_with_invalid_log_level() {
let result =
CommandParser::<DatabaseArgs>::try_parse_from(["reth", "--db.log-level", "invalid"]);
assert!(result.is_err());
}
#[test]
fn test_command_parser_without_log_level() {
let cmd = CommandParser::<DatabaseArgs>::try_parse_from(["reth"]).unwrap();
assert_eq!(cmd.args.log_level, None);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/core/src/args/error.rs | crates/node/core/src/args/error.rs | use std::num::ParseIntError;
/// Error while parsing a `ReceiptsLogPruneConfig`
#[derive(thiserror::Error, Debug)]
#[expect(clippy::enum_variant_names)]
pub(crate) enum ReceiptsLogError {
/// The format of the filter is invalid.
#[error("invalid filter format: {0}")]
InvalidFilterFormat(String),
/// Address is invalid.
#[error("address is invalid: {0}")]
InvalidAddress(String),
/// The prune mode is not one of full, distance, before.
#[error("prune mode is invalid: {0}")]
InvalidPruneMode(String),
/// The distance value supplied is invalid.
#[error("distance is invalid: {0}")]
InvalidDistance(ParseIntError),
/// The block number supplied is invalid.
#[error("block number is invalid: {0}")]
InvalidBlockNumber(ParseIntError),
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/core/src/args/debug.rs | crates/node/core/src/args/debug.rs | //! clap [Args](clap::Args) for debugging purposes
use alloy_primitives::B256;
use clap::{
builder::{PossibleValue, TypedValueParser},
Arg, Args, Command,
};
use std::{collections::HashSet, ffi::OsStr, fmt, path::PathBuf, str::FromStr};
use strum::{AsRefStr, EnumIter, IntoStaticStr, ParseError, VariantArray, VariantNames};
/// Parameters for debugging purposes
#[derive(Debug, Clone, Args, PartialEq, Eq)]
#[command(next_help_heading = "Debug")]
pub struct DebugArgs {
/// Flag indicating whether the node should be terminated after the pipeline sync.
#[arg(long = "debug.terminate", help_heading = "Debug")]
pub terminate: bool,
/// Set the chain tip manually for testing purposes.
///
/// NOTE: This is a temporary flag
#[arg(long = "debug.tip", help_heading = "Debug")]
pub tip: Option<B256>,
/// Runs the sync only up to the specified block.
#[arg(long = "debug.max-block", help_heading = "Debug")]
pub max_block: Option<u64>,
/// Runs a fake consensus client that advances the chain using recent block hashes
/// on Etherscan. If specified, requires an `ETHERSCAN_API_KEY` environment variable.
#[arg(
long = "debug.etherscan",
help_heading = "Debug",
conflicts_with = "tip",
conflicts_with = "rpc_consensus_ws",
value_name = "ETHERSCAN_API_URL"
)]
pub etherscan: Option<Option<String>>,
/// Runs a fake consensus client using blocks fetched from an RPC `WebSocket` endpoint.
#[arg(
long = "debug.rpc-consensus-ws",
help_heading = "Debug",
conflicts_with = "tip",
conflicts_with = "etherscan"
)]
pub rpc_consensus_ws: Option<String>,
/// If provided, the engine will skip `n` consecutive FCUs.
#[arg(long = "debug.skip-fcu", help_heading = "Debug")]
pub skip_fcu: Option<usize>,
/// If provided, the engine will skip `n` consecutive new payloads.
#[arg(long = "debug.skip-new-payload", help_heading = "Debug")]
pub skip_new_payload: Option<usize>,
/// If provided, the chain will be reorged at specified frequency.
#[arg(long = "debug.reorg-frequency", help_heading = "Debug")]
pub reorg_frequency: Option<usize>,
/// The reorg depth for chain reorgs.
#[arg(long = "debug.reorg-depth", requires = "reorg_frequency", help_heading = "Debug")]
pub reorg_depth: Option<usize>,
/// The path to store engine API messages at.
/// If specified, all of the intercepted engine API messages
/// will be written to specified location.
#[arg(long = "debug.engine-api-store", help_heading = "Debug", value_name = "PATH")]
pub engine_api_store: Option<PathBuf>,
/// Determines which type of invalid block hook to install
///
/// Example: `witness,prestate`
#[arg(
long = "debug.invalid-block-hook",
help_heading = "Debug",
value_parser = InvalidBlockSelectionValueParser::default(),
default_value = "witness"
)]
pub invalid_block_hook: Option<InvalidBlockSelection>,
/// The RPC URL of a healthy node to use for comparing invalid block hook results against.
///
///Debug setting that enables execution witness comparison for troubleshooting bad blocks.
/// When enabled, the node will collect execution witnesses from the specified source and
/// compare them against local execution when a bad block is encountered, helping identify
/// discrepancies in state execution.
#[arg(
long = "debug.healthy-node-rpc-url",
help_heading = "Debug",
value_name = "URL",
verbatim_doc_comment
)]
pub healthy_node_rpc_url: Option<String>,
/// The URL of the ethstats server to connect to.
/// Example: `nodename:secret@host:port`
#[arg(long = "ethstats", help_heading = "Debug")]
pub ethstats: Option<String>,
}
impl Default for DebugArgs {
fn default() -> Self {
Self {
terminate: false,
tip: None,
max_block: None,
etherscan: None,
rpc_consensus_ws: None,
skip_fcu: None,
skip_new_payload: None,
reorg_frequency: None,
reorg_depth: None,
engine_api_store: None,
invalid_block_hook: Some(InvalidBlockSelection::default()),
healthy_node_rpc_url: None,
ethstats: None,
}
}
}
/// Describes the invalid block hooks that should be installed.
///
/// # Example
///
/// Create a [`InvalidBlockSelection`] from a selection.
///
/// ```
/// use reth_node_core::args::{InvalidBlockHookType, InvalidBlockSelection};
/// let config: InvalidBlockSelection = vec![InvalidBlockHookType::Witness].into();
/// ```
#[derive(Debug, Clone, PartialEq, Eq, derive_more::Deref)]
pub struct InvalidBlockSelection(HashSet<InvalidBlockHookType>);
impl Default for InvalidBlockSelection {
fn default() -> Self {
Self([InvalidBlockHookType::Witness].into())
}
}
impl InvalidBlockSelection {
/// Creates a new _unique_ [`InvalidBlockSelection`] from the given items.
///
/// # Note
///
/// This will dedupe the selection and remove duplicates while preserving the order.
///
/// # Example
///
/// Create a selection from the [`InvalidBlockHookType`] string identifiers
///
/// ```
/// use reth_node_core::args::{InvalidBlockHookType, InvalidBlockSelection};
/// let selection = vec!["witness", "prestate", "opcode"];
/// let config = InvalidBlockSelection::try_from_selection(selection).unwrap();
/// assert_eq!(
/// config,
/// InvalidBlockSelection::from([
/// InvalidBlockHookType::Witness,
/// InvalidBlockHookType::PreState,
/// InvalidBlockHookType::Opcode
/// ])
/// );
/// ```
///
/// Create a unique selection from the [`InvalidBlockHookType`] string identifiers
///
/// ```
/// use reth_node_core::args::{InvalidBlockHookType, InvalidBlockSelection};
/// let selection = vec!["witness", "prestate", "opcode", "witness", "prestate"];
/// let config = InvalidBlockSelection::try_from_selection(selection).unwrap();
/// assert_eq!(
/// config,
/// InvalidBlockSelection::from([
/// InvalidBlockHookType::Witness,
/// InvalidBlockHookType::PreState,
/// InvalidBlockHookType::Opcode
/// ])
/// );
/// ```
pub fn try_from_selection<I, T>(selection: I) -> Result<Self, T::Error>
where
I: IntoIterator<Item = T>,
T: TryInto<InvalidBlockHookType>,
{
selection.into_iter().map(TryInto::try_into).collect()
}
/// Clones the set of configured [`InvalidBlockHookType`].
pub fn to_selection(&self) -> HashSet<InvalidBlockHookType> {
self.0.clone()
}
}
impl From<&[InvalidBlockHookType]> for InvalidBlockSelection {
fn from(s: &[InvalidBlockHookType]) -> Self {
Self(s.iter().copied().collect())
}
}
impl From<Vec<InvalidBlockHookType>> for InvalidBlockSelection {
fn from(s: Vec<InvalidBlockHookType>) -> Self {
Self(s.into_iter().collect())
}
}
impl<const N: usize> From<[InvalidBlockHookType; N]> for InvalidBlockSelection {
fn from(s: [InvalidBlockHookType; N]) -> Self {
Self(s.iter().copied().collect())
}
}
impl FromIterator<InvalidBlockHookType> for InvalidBlockSelection {
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = InvalidBlockHookType>,
{
Self(iter.into_iter().collect())
}
}
impl FromStr for InvalidBlockSelection {
type Err = ParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s.is_empty() {
return Ok(Self(Default::default()))
}
let hooks = s.split(',').map(str::trim).peekable();
Self::try_from_selection(hooks)
}
}
impl fmt::Display for InvalidBlockSelection {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "[{}]", self.0.iter().map(|s| s.to_string()).collect::<Vec<_>>().join(", "))
}
}
/// clap value parser for [`InvalidBlockSelection`].
#[derive(Clone, Debug, Default)]
#[non_exhaustive]
struct InvalidBlockSelectionValueParser;
impl TypedValueParser for InvalidBlockSelectionValueParser {
type Value = InvalidBlockSelection;
fn parse_ref(
&self,
_cmd: &Command,
arg: Option<&Arg>,
value: &OsStr,
) -> Result<Self::Value, clap::Error> {
let val =
value.to_str().ok_or_else(|| clap::Error::new(clap::error::ErrorKind::InvalidUtf8))?;
val.parse::<InvalidBlockSelection>().map_err(|err| {
let arg = arg.map(|a| a.to_string()).unwrap_or_else(|| "...".to_owned());
let possible_values = InvalidBlockHookType::all_variant_names().to_vec().join(",");
let msg = format!(
"Invalid value '{val}' for {arg}: {err}.\n [possible values: {possible_values}]"
);
clap::Error::raw(clap::error::ErrorKind::InvalidValue, msg)
})
}
fn possible_values(&self) -> Option<Box<dyn Iterator<Item = PossibleValue> + '_>> {
let values = InvalidBlockHookType::all_variant_names().iter().map(PossibleValue::new);
Some(Box::new(values))
}
}
/// The type of invalid block hook to install
#[derive(
Debug,
Clone,
Copy,
PartialEq,
Eq,
Hash,
AsRefStr,
IntoStaticStr,
VariantNames,
VariantArray,
EnumIter,
)]
#[strum(serialize_all = "kebab-case")]
pub enum InvalidBlockHookType {
/// A witness value enum
Witness,
/// A prestate trace value enum
PreState,
/// An opcode trace value enum
Opcode,
}
impl FromStr for InvalidBlockHookType {
type Err = ParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(match s {
"witness" => Self::Witness,
"prestate" => Self::PreState,
"opcode" => Self::Opcode,
_ => return Err(ParseError::VariantNotFound),
})
}
}
impl TryFrom<&str> for InvalidBlockHookType {
type Error = ParseError;
fn try_from(s: &str) -> Result<Self, <Self as TryFrom<&str>>::Error> {
FromStr::from_str(s)
}
}
impl fmt::Display for InvalidBlockHookType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad(self.as_ref())
}
}
impl InvalidBlockHookType {
/// Returns all variant names of the enum
pub const fn all_variant_names() -> &'static [&'static str] {
<Self as VariantNames>::VARIANTS
}
}
#[cfg(test)]
mod tests {
use super::*;
use clap::Parser;
/// A helper type to parse Args more easily
#[derive(Parser)]
struct CommandParser<T: Args> {
#[command(flatten)]
args: T,
}
#[test]
fn test_parse_default_debug_args() {
let default_args = DebugArgs::default();
let args = CommandParser::<DebugArgs>::parse_from(["reth"]).args;
assert_eq!(args, default_args);
}
#[test]
fn test_parse_invalid_block_args() {
let expected_args = DebugArgs {
invalid_block_hook: Some(InvalidBlockSelection::from([InvalidBlockHookType::Witness])),
..Default::default()
};
let args = CommandParser::<DebugArgs>::parse_from([
"reth",
"--debug.invalid-block-hook",
"witness",
])
.args;
assert_eq!(args, expected_args);
let expected_args = DebugArgs {
invalid_block_hook: Some(InvalidBlockSelection::from([
InvalidBlockHookType::Witness,
InvalidBlockHookType::PreState,
])),
..Default::default()
};
let args = CommandParser::<DebugArgs>::parse_from([
"reth",
"--debug.invalid-block-hook",
"witness,prestate",
])
.args;
assert_eq!(args, expected_args);
let args = CommandParser::<DebugArgs>::parse_from([
"reth",
"--debug.invalid-block-hook",
"witness,prestate,prestate",
])
.args;
assert_eq!(args, expected_args);
let args = CommandParser::<DebugArgs>::parse_from([
"reth",
"--debug.invalid-block-hook",
"witness,witness,prestate",
])
.args;
assert_eq!(args, expected_args);
let args = CommandParser::<DebugArgs>::parse_from([
"reth",
"--debug.invalid-block-hook",
"prestate,witness,prestate",
])
.args;
assert_eq!(args, expected_args);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/core/src/args/ress_args.rs | crates/node/core/src/args/ress_args.rs | use clap::Args;
/// The default number of maximum active connections.
const MAX_ACTIVE_CONNECTIONS_DEFAULT: u64 = 5;
/// The default maximum witness lookback window.
const MAX_WITNESS_WINDOW_DEFAULT: u64 = 1024;
/// The default maximum number of witnesses to generate in parallel.
const WITNESS_MAX_PARALLEL_DEFAULT: usize = 5;
/// The default witness cache size.
const WITNESS_CACHE_SIZE_DEFAULT: u32 = 10;
/// Parameters for configuring the `ress` subprotocol.
#[derive(Debug, Clone, Args, PartialEq, Eq)]
#[command(next_help_heading = "Ress")]
pub struct RessArgs {
/// Enable support for `ress` subprotocol.
#[arg(long = "ress.enable", default_value_t = false)]
pub enabled: bool,
/// The maximum number of active connections for `ress` subprotocol.
#[arg(long = "ress.max-active-connections", default_value_t = MAX_ACTIVE_CONNECTIONS_DEFAULT)]
pub max_active_connections: u64,
/// The maximum witness lookback window.
#[arg(long = "ress.max-witness-window", default_value_t = MAX_WITNESS_WINDOW_DEFAULT)]
pub max_witness_window: u64,
/// The maximum number of witnesses to generate in parallel.
#[arg(long = "ress.witness-max-parallel", default_value_t = WITNESS_MAX_PARALLEL_DEFAULT)]
pub witness_max_parallel: usize,
/// Witness cache size.
#[arg(long = "ress.witness-cache-size", default_value_t = WITNESS_CACHE_SIZE_DEFAULT)]
pub witness_cache_size: u32,
}
impl Default for RessArgs {
fn default() -> Self {
Self {
enabled: false,
max_active_connections: MAX_ACTIVE_CONNECTIONS_DEFAULT,
max_witness_window: MAX_WITNESS_WINDOW_DEFAULT,
witness_max_parallel: WITNESS_MAX_PARALLEL_DEFAULT,
witness_cache_size: WITNESS_CACHE_SIZE_DEFAULT,
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/core/src/args/rpc_state_cache.rs | crates/node/core/src/args/rpc_state_cache.rs | use clap::Args;
use reth_rpc_server_types::constants::cache::{
DEFAULT_BLOCK_CACHE_MAX_LEN, DEFAULT_CONCURRENT_DB_REQUESTS, DEFAULT_HEADER_CACHE_MAX_LEN,
DEFAULT_RECEIPT_CACHE_MAX_LEN,
};
/// Parameters to configure RPC state cache.
#[derive(Debug, Clone, Args, PartialEq, Eq)]
#[command(next_help_heading = "RPC State Cache")]
pub struct RpcStateCacheArgs {
/// Max number of blocks in cache.
#[arg(
long = "rpc-cache.max-blocks",
default_value_t = DEFAULT_BLOCK_CACHE_MAX_LEN,
)]
pub max_blocks: u32,
/// Max number receipts in cache.
#[arg(
long = "rpc-cache.max-receipts",
default_value_t = DEFAULT_RECEIPT_CACHE_MAX_LEN,
)]
pub max_receipts: u32,
/// Max number of headers in cache.
#[arg(
long = "rpc-cache.max-headers",
alias = "rpc-cache.max-envs",
default_value_t = DEFAULT_HEADER_CACHE_MAX_LEN,
)]
pub max_headers: u32,
/// Max number of concurrent database requests.
#[arg(
long = "rpc-cache.max-concurrent-db-requests",
default_value_t = DEFAULT_CONCURRENT_DB_REQUESTS,
)]
pub max_concurrent_db_requests: usize,
}
impl RpcStateCacheArgs {
/// Sets the Cache sizes to zero, effectively disabling caching.
pub const fn set_zero_lengths(&mut self) {
self.max_blocks = 0;
self.max_receipts = 0;
self.max_headers = 0;
}
}
impl Default for RpcStateCacheArgs {
fn default() -> Self {
Self {
max_blocks: DEFAULT_BLOCK_CACHE_MAX_LEN,
max_receipts: DEFAULT_RECEIPT_CACHE_MAX_LEN,
max_headers: DEFAULT_HEADER_CACHE_MAX_LEN,
max_concurrent_db_requests: DEFAULT_CONCURRENT_DB_REQUESTS,
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/core/src/args/types.rs | crates/node/core/src/args/types.rs | //! Additional helper types for CLI parsing.
use std::{fmt, num::ParseIntError, str::FromStr};
/// A macro that generates types that maps "0" to "None" when parsing CLI arguments.
macro_rules! zero_as_none {
($type_name:ident, $inner_type:ty) => {
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
/// A helper type that maps `0` to `None` when parsing CLI arguments.
pub struct $type_name(pub Option<$inner_type>);
impl $type_name {
/// Returns the inner value.
pub const fn new(value: $inner_type) -> Self {
Self(Some(value))
}
/// Returns the inner value or `$inner_type::MAX` if `None`.
pub fn unwrap_or_max(self) -> $inner_type {
self.0.unwrap_or(<$inner_type>::MAX)
}
}
impl std::fmt::Display for $type_name {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self.0 {
Some(value) => write!(f, "{}", value),
None => write!(f, "0"),
}
}
}
impl From<$inner_type> for $type_name {
#[inline]
fn from(value: $inner_type) -> Self {
Self(if value == 0 { None } else { Some(value) })
}
}
impl std::str::FromStr for $type_name {
type Err = std::num::ParseIntError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let value = s.parse::<$inner_type>()?;
Ok(Self::from(value))
}
}
};
}
zero_as_none!(ZeroAsNoneU64, u64);
zero_as_none!(ZeroAsNoneU32, u32);
/// A macro that generates types that map "max" to "MAX" when parsing CLI arguments.
macro_rules! max_values {
($name:ident, $ty:ident) => {
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
/// A helper type for parsing "max" as the maximum value of the specified type.
pub struct $name(pub $ty);
impl $name {
/// Returns the inner value.
pub const fn get(&self) -> $ty {
self.0
}
}
impl fmt::Display for $name {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0)
}
}
impl From<$ty> for $name {
#[inline]
fn from(value: $ty) -> Self {
Self(value)
}
}
impl FromStr for $name {
type Err = ParseIntError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s.eq_ignore_ascii_case("max") {
Ok($name(<$ty>::MAX))
} else {
s.parse::<$ty>().map($name)
}
}
}
};
}
max_values!(MaxU32, u32);
max_values!(MaxU64, u64);
/// A helper type that supports parsing max or delegates to another parser
#[derive(Debug, Clone)]
pub struct MaxOr<T> {
/// The inner parser
inner: T,
}
impl<T> MaxOr<T>
where
T: clap::builder::TypedValueParser,
T::Value: Into<u64>,
{
/// Creates a new instance with the given inner parser
pub const fn new(inner: T) -> Self {
Self { inner }
}
}
impl<T> clap::builder::TypedValueParser for MaxOr<T>
where
T: clap::builder::TypedValueParser,
T::Value: Into<u64>,
{
type Value = u64;
fn parse_ref(
&self,
cmd: &clap::Command,
arg: Option<&clap::Arg>,
value: &std::ffi::OsStr,
) -> Result<Self::Value, clap::Error> {
if value.to_str().map(|s| s.eq_ignore_ascii_case("max")).unwrap_or(false) {
Ok(u64::MAX)
} else {
self.inner.parse_ref(cmd, arg, value).map(Into::into)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use clap::Parser;
/// A test command that uses the `MaxOr` parser
#[derive(Parser, Debug)]
struct NodeCommand {
#[arg(long, value_parser = MaxOr::new(clap::value_parser!(u64)))]
max_value: u64,
}
#[test]
fn test_zero_parse() {
let val = "0".parse::<ZeroAsNoneU64>().unwrap();
assert_eq!(val, ZeroAsNoneU64(None));
assert_eq!(val.unwrap_or_max(), u64::MAX);
}
#[test]
fn test_from_u64() {
let original = 1u64;
let expected = ZeroAsNoneU64(Some(1u64));
assert_eq!(ZeroAsNoneU64::from(original), expected);
let original = 0u64;
let expected = ZeroAsNoneU64(None);
assert_eq!(ZeroAsNoneU64::from(original), expected);
}
#[test]
fn parse_max_value() {
let cmd: NodeCommand = NodeCommand::try_parse_from(["reth", "--max-value", "max"]).unwrap();
assert_eq!(cmd.max_value, u64::MAX);
let cmd: NodeCommand = NodeCommand::try_parse_from(["reth", "--max-value", "42"]).unwrap();
assert_eq!(cmd.max_value, 42);
let result = NodeCommand::try_parse_from(["reth", "--max-value", "invalid"]);
assert!(result.is_err());
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/core/src/args/rpc_server.rs | crates/node/core/src/args/rpc_server.rs | //! clap [Args](clap::Args) for RPC related arguments.
use std::{
collections::HashSet,
ffi::OsStr,
net::{IpAddr, Ipv4Addr},
path::PathBuf,
};
use alloy_primitives::Address;
use alloy_rpc_types_engine::JwtSecret;
use clap::{
builder::{PossibleValue, RangedU64ValueParser, TypedValueParser},
Arg, Args, Command,
};
use rand::Rng;
use reth_cli_util::parse_ether_value;
use reth_rpc_eth_types::builder::config::PendingBlockKind;
use reth_rpc_server_types::{constants, RethRpcModule, RpcModuleSelection};
use url::Url;
use crate::args::{
types::{MaxU32, ZeroAsNoneU64},
GasPriceOracleArgs, RpcStateCacheArgs,
};
use super::types::MaxOr;
/// Default max number of subscriptions per connection.
pub(crate) const RPC_DEFAULT_MAX_SUBS_PER_CONN: u32 = 1024;
/// Default max request size in MB.
pub(crate) const RPC_DEFAULT_MAX_REQUEST_SIZE_MB: u32 = 15;
/// Default max response size in MB.
///
/// This is only relevant for very large trace responses.
pub(crate) const RPC_DEFAULT_MAX_RESPONSE_SIZE_MB: u32 = 160;
/// Default number of incoming connections.
pub(crate) const RPC_DEFAULT_MAX_CONNECTIONS: u32 = 500;
/// Parameters for configuring the rpc more granularity via CLI
#[derive(Debug, Clone, Args, PartialEq, Eq)]
#[command(next_help_heading = "RPC")]
pub struct RpcServerArgs {
/// Enable the HTTP-RPC server
#[arg(long, default_value_if("dev", "true", "true"))]
pub http: bool,
/// Http server address to listen on
#[arg(long = "http.addr", default_value_t = IpAddr::V4(Ipv4Addr::LOCALHOST))]
pub http_addr: IpAddr,
/// Http server port to listen on
#[arg(long = "http.port", default_value_t = constants::DEFAULT_HTTP_RPC_PORT)]
pub http_port: u16,
/// Disable compression for HTTP responses
#[arg(long = "http.disable-compression", default_value_t = false)]
pub http_disable_compression: bool,
/// Rpc Modules to be configured for the HTTP server
#[arg(long = "http.api", value_parser = RpcModuleSelectionValueParser::default())]
pub http_api: Option<RpcModuleSelection>,
/// Http Corsdomain to allow request from
#[arg(long = "http.corsdomain")]
pub http_corsdomain: Option<String>,
/// Enable the WS-RPC server
#[arg(long)]
pub ws: bool,
/// Ws server address to listen on
#[arg(long = "ws.addr", default_value_t = IpAddr::V4(Ipv4Addr::LOCALHOST))]
pub ws_addr: IpAddr,
/// Ws server port to listen on
#[arg(long = "ws.port", default_value_t = constants::DEFAULT_WS_RPC_PORT)]
pub ws_port: u16,
/// Origins from which to accept `WebSocket` requests
#[arg(id = "ws.origins", long = "ws.origins", alias = "ws.corsdomain")]
pub ws_allowed_origins: Option<String>,
/// Rpc Modules to be configured for the WS server
#[arg(long = "ws.api", value_parser = RpcModuleSelectionValueParser::default())]
pub ws_api: Option<RpcModuleSelection>,
/// Disable the IPC-RPC server
#[arg(long)]
pub ipcdisable: bool,
/// Filename for IPC socket/pipe within the datadir
#[arg(long, default_value_t = constants::DEFAULT_IPC_ENDPOINT.to_string())]
pub ipcpath: String,
/// Set the permissions for the IPC socket file, in octal format.
///
/// If not specified, the permissions will be set by the system's umask.
#[arg(long = "ipc.permissions")]
pub ipc_socket_permissions: Option<String>,
/// Auth server address to listen on
#[arg(long = "authrpc.addr", default_value_t = IpAddr::V4(Ipv4Addr::LOCALHOST))]
pub auth_addr: IpAddr,
/// Auth server port to listen on
#[arg(long = "authrpc.port", default_value_t = constants::DEFAULT_AUTH_PORT)]
pub auth_port: u16,
/// Path to a JWT secret to use for the authenticated engine-API RPC server.
///
/// This will enforce JWT authentication for all requests coming from the consensus layer.
///
/// If no path is provided, a secret will be generated and stored in the datadir under
/// `<DIR>/<CHAIN_ID>/jwt.hex`. For mainnet this would be `~/.reth/mainnet/jwt.hex` by default.
#[arg(long = "authrpc.jwtsecret", value_name = "PATH", global = true, required = false)]
pub auth_jwtsecret: Option<PathBuf>,
/// Enable auth engine API over IPC
#[arg(long)]
pub auth_ipc: bool,
/// Filename for auth IPC socket/pipe within the datadir
#[arg(long = "auth-ipc.path", default_value_t = constants::DEFAULT_ENGINE_API_IPC_ENDPOINT.to_string())]
pub auth_ipc_path: String,
/// Disable the auth/engine API server.
///
/// This will prevent the authenticated engine-API server from starting. Use this if you're
/// running a node that doesn't need to serve engine API requests.
#[arg(long = "disable-auth-server", alias = "disable-engine-api")]
pub disable_auth_server: bool,
/// Hex encoded JWT secret to authenticate the regular RPC server(s), see `--http.api` and
/// `--ws.api`.
///
/// This is __not__ used for the authenticated engine-API RPC server, see
/// `--authrpc.jwtsecret`.
#[arg(long = "rpc.jwtsecret", value_name = "HEX", global = true, required = false)]
pub rpc_jwtsecret: Option<JwtSecret>,
/// Set the maximum RPC request payload size for both HTTP and WS in megabytes.
#[arg(long = "rpc.max-request-size", alias = "rpc-max-request-size", default_value_t = RPC_DEFAULT_MAX_REQUEST_SIZE_MB.into())]
pub rpc_max_request_size: MaxU32,
/// Set the maximum RPC response payload size for both HTTP and WS in megabytes.
#[arg(long = "rpc.max-response-size", alias = "rpc-max-response-size", visible_alias = "rpc.returndata.limit", default_value_t = RPC_DEFAULT_MAX_RESPONSE_SIZE_MB.into())]
pub rpc_max_response_size: MaxU32,
/// Set the maximum concurrent subscriptions per connection.
#[arg(long = "rpc.max-subscriptions-per-connection", alias = "rpc-max-subscriptions-per-connection", default_value_t = RPC_DEFAULT_MAX_SUBS_PER_CONN.into())]
pub rpc_max_subscriptions_per_connection: MaxU32,
/// Maximum number of RPC server connections.
#[arg(long = "rpc.max-connections", alias = "rpc-max-connections", value_name = "COUNT", default_value_t = RPC_DEFAULT_MAX_CONNECTIONS.into())]
pub rpc_max_connections: MaxU32,
/// Maximum number of concurrent tracing requests.
///
/// By default this chooses a sensible value based on the number of available cores.
/// Tracing requests are generally CPU bound.
/// Choosing a value that is higher than the available CPU cores can have a negative impact on
/// the performance of the node and affect the node's ability to maintain sync.
#[arg(long = "rpc.max-tracing-requests", alias = "rpc-max-tracing-requests", value_name = "COUNT", default_value_t = constants::default_max_tracing_requests())]
pub rpc_max_tracing_requests: usize,
/// Maximum number of blocks for `trace_filter` requests.
#[arg(long = "rpc.max-trace-filter-blocks", alias = "rpc-max-trace-filter-blocks", value_name = "COUNT", default_value_t = constants::DEFAULT_MAX_TRACE_FILTER_BLOCKS)]
pub rpc_max_trace_filter_blocks: u64,
/// Maximum number of blocks that could be scanned per filter request. (0 = entire chain)
#[arg(long = "rpc.max-blocks-per-filter", alias = "rpc-max-blocks-per-filter", value_name = "COUNT", default_value_t = ZeroAsNoneU64::new(constants::DEFAULT_MAX_BLOCKS_PER_FILTER))]
pub rpc_max_blocks_per_filter: ZeroAsNoneU64,
/// Maximum number of logs that can be returned in a single response. (0 = no limit)
#[arg(long = "rpc.max-logs-per-response", alias = "rpc-max-logs-per-response", value_name = "COUNT", default_value_t = ZeroAsNoneU64::new(constants::DEFAULT_MAX_LOGS_PER_RESPONSE as u64))]
pub rpc_max_logs_per_response: ZeroAsNoneU64,
/// Maximum gas limit for `eth_call` and call tracing RPC methods.
#[arg(
long = "rpc.gascap",
alias = "rpc-gascap",
value_name = "GAS_CAP",
value_parser = MaxOr::new(RangedU64ValueParser::<u64>::new().range(1..)),
default_value_t = constants::gas_oracle::RPC_DEFAULT_GAS_CAP
)]
pub rpc_gas_cap: u64,
/// Maximum eth transaction fee (in ether) that can be sent via the RPC APIs (0 = no cap)
#[arg(
long = "rpc.txfeecap",
alias = "rpc-txfeecap",
value_name = "TX_FEE_CAP",
value_parser = parse_ether_value,
default_value = "1.0"
)]
pub rpc_tx_fee_cap: u128,
/// Maximum number of blocks for `eth_simulateV1` call.
#[arg(
long = "rpc.max-simulate-blocks",
value_name = "BLOCKS_COUNT",
default_value_t = constants::DEFAULT_MAX_SIMULATE_BLOCKS
)]
pub rpc_max_simulate_blocks: u64,
/// The maximum proof window for historical proof generation.
/// This value allows for generating historical proofs up to
/// configured number of blocks from current tip (up to `tip - window`).
#[arg(
long = "rpc.eth-proof-window",
default_value_t = constants::DEFAULT_ETH_PROOF_WINDOW,
value_parser = RangedU64ValueParser::<u64>::new().range(..=constants::MAX_ETH_PROOF_WINDOW)
)]
pub rpc_eth_proof_window: u64,
/// Maximum number of concurrent getproof requests.
#[arg(long = "rpc.proof-permits", alias = "rpc-proof-permits", value_name = "COUNT", default_value_t = constants::DEFAULT_PROOF_PERMITS)]
pub rpc_proof_permits: usize,
/// Configures the pending block behavior for RPC responses.
///
/// Options: full (include all transactions), empty (header only), none (disable pending
/// blocks).
#[arg(long = "rpc.pending-block", default_value = "full", value_name = "KIND")]
pub rpc_pending_block: PendingBlockKind,
/// Endpoint to forward transactions to.
#[arg(long = "rpc.forwarder", alias = "rpc-forwarder", value_name = "FORWARDER")]
pub rpc_forwarder: Option<Url>,
/// Path to file containing disallowed addresses, json-encoded list of strings. Block
/// validation API will reject blocks containing transactions from these addresses.
#[arg(long = "builder.disallow", value_name = "PATH", value_parser = reth_cli_util::parsers::read_json_from_file::<HashSet<Address>>)]
pub builder_disallow: Option<HashSet<Address>>,
/// State cache configuration.
#[command(flatten)]
pub rpc_state_cache: RpcStateCacheArgs,
/// Gas price oracle configuration.
#[command(flatten)]
pub gas_price_oracle: GasPriceOracleArgs,
}
impl RpcServerArgs {
/// Enables the HTTP-RPC server.
pub const fn with_http(mut self) -> Self {
self.http = true;
self
}
/// Configures modules for the HTTP-RPC server.
pub fn with_http_api(mut self, http_api: RpcModuleSelection) -> Self {
self.http_api = Some(http_api);
self
}
/// Enables the WS-RPC server.
pub const fn with_ws(mut self) -> Self {
self.ws = true;
self
}
/// Configures modules for WS-RPC server.
pub fn with_ws_api(mut self, ws_api: RpcModuleSelection) -> Self {
self.ws_api = Some(ws_api);
self
}
/// Enables the Auth IPC
pub const fn with_auth_ipc(mut self) -> Self {
self.auth_ipc = true;
self
}
/// Configures modules for both the HTTP-RPC server and WS-RPC server.
///
/// This is the same as calling both [`Self::with_http_api`] and [`Self::with_ws_api`].
pub fn with_api(self, api: RpcModuleSelection) -> Self {
self.with_http_api(api.clone()).with_ws_api(api)
}
/// Change rpc port numbers based on the instance number, if provided.
/// * The `auth_port` is scaled by a factor of `instance * 100`
/// * The `http_port` is scaled by a factor of `-instance`
/// * The `ws_port` is scaled by a factor of `instance * 2`
/// * The `ipcpath` is appended with the instance number: `/tmp/reth.ipc-<instance>`
///
/// # Panics
/// Warning: if `instance` is zero in debug mode, this will panic.
///
/// This will also panic in debug mode if either:
/// * `instance` is greater than `655` (scaling would overflow `u16`)
/// * `self.auth_port / 100 + (instance - 1)` would overflow `u16`
///
/// In release mode, this will silently wrap around.
pub fn adjust_instance_ports(&mut self, instance: Option<u16>) {
if let Some(instance) = instance {
debug_assert_ne!(instance, 0, "instance must be non-zero");
// auth port is scaled by a factor of instance * 100
self.auth_port += instance * 100 - 100;
// http port is scaled by a factor of -instance
self.http_port -= instance - 1;
// ws port is scaled by a factor of instance * 2
self.ws_port += instance * 2 - 2;
// append instance file to ipc path
self.ipcpath = format!("{}-{}", self.ipcpath, instance);
}
}
/// Set the http port to zero, to allow the OS to assign a random unused port when the rpc
/// server binds to a socket.
pub const fn with_http_unused_port(mut self) -> Self {
self.http_port = 0;
self
}
/// Set the ws port to zero, to allow the OS to assign a random unused port when the rpc
/// server binds to a socket.
pub const fn with_ws_unused_port(mut self) -> Self {
self.ws_port = 0;
self
}
/// Set the auth port to zero, to allow the OS to assign a random unused port when the rpc
/// server binds to a socket.
pub const fn with_auth_unused_port(mut self) -> Self {
self.auth_port = 0;
self
}
/// Append a random string to the ipc path, to prevent possible collisions when multiple nodes
/// are being run on the same machine.
pub fn with_ipc_random_path(mut self) -> Self {
let random_string: String =
rand::rng().sample_iter(rand::distr::Alphanumeric).take(8).map(char::from).collect();
self.ipcpath = format!("{}-{}", self.ipcpath, random_string);
self
}
/// Configure all ports to be set to a random unused port when bound, and set the IPC path to a
/// random path.
pub fn with_unused_ports(mut self) -> Self {
self = self.with_http_unused_port();
self = self.with_ws_unused_port();
self = self.with_auth_unused_port();
self = self.with_ipc_random_path();
self
}
/// Apply a function to the args.
pub fn apply<F>(self, f: F) -> Self
where
F: FnOnce(Self) -> Self,
{
f(self)
}
}
impl Default for RpcServerArgs {
fn default() -> Self {
Self {
http: false,
http_addr: Ipv4Addr::LOCALHOST.into(),
http_port: constants::DEFAULT_HTTP_RPC_PORT,
http_disable_compression: false,
http_api: None,
http_corsdomain: None,
ws: false,
ws_addr: Ipv4Addr::LOCALHOST.into(),
ws_port: constants::DEFAULT_WS_RPC_PORT,
ws_allowed_origins: None,
ws_api: None,
ipcdisable: false,
ipcpath: constants::DEFAULT_IPC_ENDPOINT.to_string(),
ipc_socket_permissions: None,
auth_addr: Ipv4Addr::LOCALHOST.into(),
auth_port: constants::DEFAULT_AUTH_PORT,
auth_jwtsecret: None,
auth_ipc: false,
auth_ipc_path: constants::DEFAULT_ENGINE_API_IPC_ENDPOINT.to_string(),
disable_auth_server: false,
rpc_jwtsecret: None,
rpc_max_request_size: RPC_DEFAULT_MAX_REQUEST_SIZE_MB.into(),
rpc_max_response_size: RPC_DEFAULT_MAX_RESPONSE_SIZE_MB.into(),
rpc_max_subscriptions_per_connection: RPC_DEFAULT_MAX_SUBS_PER_CONN.into(),
rpc_max_connections: RPC_DEFAULT_MAX_CONNECTIONS.into(),
rpc_max_tracing_requests: constants::default_max_tracing_requests(),
rpc_max_trace_filter_blocks: constants::DEFAULT_MAX_TRACE_FILTER_BLOCKS,
rpc_max_blocks_per_filter: constants::DEFAULT_MAX_BLOCKS_PER_FILTER.into(),
rpc_max_logs_per_response: (constants::DEFAULT_MAX_LOGS_PER_RESPONSE as u64).into(),
rpc_gas_cap: constants::gas_oracle::RPC_DEFAULT_GAS_CAP,
rpc_tx_fee_cap: constants::DEFAULT_TX_FEE_CAP_WEI,
rpc_max_simulate_blocks: constants::DEFAULT_MAX_SIMULATE_BLOCKS,
rpc_eth_proof_window: constants::DEFAULT_ETH_PROOF_WINDOW,
rpc_pending_block: PendingBlockKind::Full,
gas_price_oracle: GasPriceOracleArgs::default(),
rpc_state_cache: RpcStateCacheArgs::default(),
rpc_proof_permits: constants::DEFAULT_PROOF_PERMITS,
rpc_forwarder: None,
builder_disallow: Default::default(),
}
}
}
/// clap value parser for [`RpcModuleSelection`].
#[derive(Clone, Debug, Default)]
#[non_exhaustive]
struct RpcModuleSelectionValueParser;
impl TypedValueParser for RpcModuleSelectionValueParser {
type Value = RpcModuleSelection;
fn parse_ref(
&self,
_cmd: &Command,
arg: Option<&Arg>,
value: &OsStr,
) -> Result<Self::Value, clap::Error> {
let val =
value.to_str().ok_or_else(|| clap::Error::new(clap::error::ErrorKind::InvalidUtf8))?;
val.parse::<RpcModuleSelection>().map_err(|err| {
let arg = arg.map(|a| a.to_string()).unwrap_or_else(|| "...".to_owned());
let possible_values = RethRpcModule::all_variant_names().to_vec().join(",");
let msg = format!(
"Invalid value '{val}' for {arg}: {err}.\n [possible values: {possible_values}]"
);
clap::Error::raw(clap::error::ErrorKind::InvalidValue, msg)
})
}
fn possible_values(&self) -> Option<Box<dyn Iterator<Item = PossibleValue> + '_>> {
let values = RethRpcModule::all_variant_names().iter().map(PossibleValue::new);
Some(Box::new(values))
}
}
#[cfg(test)]
mod tests {
use super::*;
use clap::{Args, Parser};
/// A helper type to parse Args more easily
#[derive(Parser)]
struct CommandParser<T: Args> {
#[command(flatten)]
args: T,
}
#[test]
fn test_rpc_server_args_parser() {
let args =
CommandParser::<RpcServerArgs>::parse_from(["reth", "--http.api", "eth,admin,debug"])
.args;
let apis = args.http_api.unwrap();
let expected = RpcModuleSelection::try_from_selection(["eth", "admin", "debug"]).unwrap();
assert_eq!(apis, expected);
}
#[test]
fn test_rpc_server_eth_call_bundle_args() {
let args =
CommandParser::<RpcServerArgs>::parse_from(["reth", "--http.api", "eth,admin,debug"])
.args;
let apis = args.http_api.unwrap();
let expected = RpcModuleSelection::try_from_selection(["eth", "admin", "debug"]).unwrap();
assert_eq!(apis, expected);
}
#[test]
fn test_rpc_server_args_parser_none() {
let args = CommandParser::<RpcServerArgs>::parse_from(["reth", "--http.api", "none"]).args;
let apis = args.http_api.unwrap();
let expected = RpcModuleSelection::Selection(Default::default());
assert_eq!(apis, expected);
}
#[test]
fn rpc_server_args_default_sanity_test() {
let default_args = RpcServerArgs::default();
let args = CommandParser::<RpcServerArgs>::parse_from(["reth"]).args;
assert_eq!(args, default_args);
}
#[test]
fn test_rpc_tx_fee_cap_parse_integer() {
let args = CommandParser::<RpcServerArgs>::parse_from(["reth", "--rpc.txfeecap", "2"]).args;
let expected = 2_000_000_000_000_000_000u128; // 2 ETH in wei
assert_eq!(args.rpc_tx_fee_cap, expected);
}
#[test]
fn test_rpc_tx_fee_cap_parse_decimal() {
let args =
CommandParser::<RpcServerArgs>::parse_from(["reth", "--rpc.txfeecap", "1.5"]).args;
let expected = 1_500_000_000_000_000_000u128; // 1.5 ETH in wei
assert_eq!(args.rpc_tx_fee_cap, expected);
}
#[test]
fn test_rpc_tx_fee_cap_parse_zero() {
let args = CommandParser::<RpcServerArgs>::parse_from(["reth", "--rpc.txfeecap", "0"]).args;
assert_eq!(args.rpc_tx_fee_cap, 0); // 0 = no cap
}
#[test]
fn test_rpc_tx_fee_cap_parse_none() {
let args = CommandParser::<RpcServerArgs>::parse_from(["reth"]).args;
let expected = 1_000_000_000_000_000_000u128;
assert_eq!(args.rpc_tx_fee_cap, expected); // 1 ETH default cap
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/core/src/args/mod.rs | crates/node/core/src/args/mod.rs | //! Parameters for configuring the rpc more granularity via CLI
/// EnclaveArgs struct for configuring the enclave
mod enclave;
pub use enclave::EnclaveArgs;
/// NetworkArg struct for configuring the network
mod network;
pub use network::{DiscoveryArgs, NetworkArgs};
/// RpcServerArg struct for configuring the RPC
mod rpc_server;
pub use rpc_server::RpcServerArgs;
/// `RpcStateCacheArgs` struct for configuring RPC state cache
mod rpc_state_cache;
pub use rpc_state_cache::RpcStateCacheArgs;
/// DebugArgs struct for debugging purposes
mod debug;
pub use debug::{DebugArgs, InvalidBlockHookType, InvalidBlockSelection};
/// DatabaseArgs struct for configuring the database
mod database;
pub use database::DatabaseArgs;
/// LogArgs struct for configuring the logger
mod log;
pub use log::{ColorMode, LogArgs, Verbosity};
/// `PayloadBuilderArgs` struct for configuring the payload builder
mod payload_builder;
pub use payload_builder::PayloadBuilderArgs;
/// Stage related arguments
mod stage;
pub use stage::StageEnum;
/// Gas price oracle related arguments
mod gas_price_oracle;
pub use gas_price_oracle::GasPriceOracleArgs;
/// TxPoolArgs for configuring the transaction pool
mod txpool;
pub use txpool::TxPoolArgs;
/// DevArgs for configuring the dev testnet
mod dev;
pub use dev::DevArgs;
/// PruneArgs for configuring the pruning and full node
mod pruning;
pub use pruning::PruningArgs;
/// DatadirArgs for configuring data storage paths
mod datadir_args;
pub use datadir_args::DatadirArgs;
/// BenchmarkArgs struct for configuring the benchmark to run
mod benchmark_args;
pub use benchmark_args::BenchmarkArgs;
/// EngineArgs for configuring the engine
mod engine;
pub use engine::EngineArgs;
/// `RessArgs` for configuring ress subprotocol.
mod ress_args;
pub use ress_args::RessArgs;
/// `EraArgs` for configuring ERA files import.
mod era;
pub use era::{DefaultEraHost, EraArgs, EraSourceArgs};
mod error;
pub mod types;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/core/src/args/txpool.rs | crates/node/core/src/args/txpool.rs | //! Transaction pool arguments
use crate::cli::config::RethTransactionPoolConfig;
use alloy_eips::eip1559::{ETHEREUM_BLOCK_GAS_LIMIT_30M, MIN_PROTOCOL_BASE_FEE};
use alloy_primitives::Address;
use clap::Args;
use reth_cli_util::parse_duration_from_secs_or_ms;
use reth_transaction_pool::{
blobstore::disk::DEFAULT_MAX_CACHED_BLOBS,
maintain::MAX_QUEUED_TRANSACTION_LIFETIME,
pool::{NEW_TX_LISTENER_BUFFER_SIZE, PENDING_TX_LISTENER_BUFFER_SIZE},
validate::DEFAULT_MAX_TX_INPUT_BYTES,
LocalTransactionConfig, PoolConfig, PriceBumpConfig, SubPoolLimit, DEFAULT_PRICE_BUMP,
DEFAULT_TXPOOL_ADDITIONAL_VALIDATION_TASKS, MAX_NEW_PENDING_TXS_NOTIFICATIONS,
REPLACE_BLOB_PRICE_BUMP, TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER,
TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, TXPOOL_SUBPOOL_MAX_TXS_DEFAULT,
};
use std::time::Duration;
/// Parameters for debugging purposes
#[derive(Debug, Clone, Args, PartialEq, Eq)]
#[command(next_help_heading = "TxPool")]
pub struct TxPoolArgs {
/// Max number of transaction in the pending sub-pool.
#[arg(long = "txpool.pending-max-count", alias = "txpool.pending_max_count", default_value_t = TXPOOL_SUBPOOL_MAX_TXS_DEFAULT)]
pub pending_max_count: usize,
/// Max size of the pending sub-pool in megabytes.
#[arg(long = "txpool.pending-max-size", alias = "txpool.pending_max_size", default_value_t = TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT)]
pub pending_max_size: usize,
/// Max number of transaction in the basefee sub-pool
#[arg(long = "txpool.basefee-max-count", alias = "txpool.basefee_max_count", default_value_t = TXPOOL_SUBPOOL_MAX_TXS_DEFAULT)]
pub basefee_max_count: usize,
/// Max size of the basefee sub-pool in megabytes.
#[arg(long = "txpool.basefee-max-size", alias = "txpool.basefee_max_size", default_value_t = TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT)]
pub basefee_max_size: usize,
/// Max number of transaction in the queued sub-pool
#[arg(long = "txpool.queued-max-count", alias = "txpool.queued_max_count", default_value_t = TXPOOL_SUBPOOL_MAX_TXS_DEFAULT)]
pub queued_max_count: usize,
/// Max size of the queued sub-pool in megabytes.
#[arg(long = "txpool.queued-max-size", alias = "txpool.queued_max_size", default_value_t = TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT)]
pub queued_max_size: usize,
/// Max number of transaction in the blobpool
#[arg(long = "txpool.blobpool-max-count", alias = "txpool.blobpool_max_count", default_value_t = TXPOOL_SUBPOOL_MAX_TXS_DEFAULT)]
pub blobpool_max_count: usize,
/// Max size of the blobpool in megabytes.
#[arg(long = "txpool.blobpool-max-size", alias = "txpool.blobpool_max_size", default_value_t = TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT)]
pub blobpool_max_size: usize,
/// Max number of entries for the in memory cache of the blob store.
#[arg(long = "txpool.blob-cache-size", alias = "txpool.blob_cache_size")]
pub blob_cache_size: Option<u32>,
/// Max number of executable transaction slots guaranteed per account
#[arg(long = "txpool.max-account-slots", alias = "txpool.max_account_slots", default_value_t = TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER)]
pub max_account_slots: usize,
/// Price bump (in %) for the transaction pool underpriced check.
#[arg(long = "txpool.pricebump", default_value_t = DEFAULT_PRICE_BUMP)]
pub price_bump: u128,
/// Minimum base fee required by the protocol.
#[arg(long = "txpool.minimal-protocol-fee", default_value_t = MIN_PROTOCOL_BASE_FEE)]
pub minimal_protocol_basefee: u64,
/// Minimum priority fee required for transaction acceptance into the pool.
/// Transactions with priority fee below this value will be rejected.
#[arg(long = "txpool.minimum-priority-fee")]
pub minimum_priority_fee: Option<u128>,
/// The default enforced gas limit for transactions entering the pool
#[arg(long = "txpool.gas-limit", default_value_t = ETHEREUM_BLOCK_GAS_LIMIT_30M)]
pub enforced_gas_limit: u64,
/// Maximum gas limit for individual transactions. Transactions exceeding this limit will be
/// rejected by the transaction pool
#[arg(long = "txpool.max-tx-gas")]
pub max_tx_gas_limit: Option<u64>,
/// Price bump percentage to replace an already existing blob transaction
#[arg(long = "blobpool.pricebump", default_value_t = REPLACE_BLOB_PRICE_BUMP)]
pub blob_transaction_price_bump: u128,
/// Max size in bytes of a single transaction allowed to enter the pool
#[arg(long = "txpool.max-tx-input-bytes", alias = "txpool.max_tx_input_bytes", default_value_t = DEFAULT_MAX_TX_INPUT_BYTES)]
pub max_tx_input_bytes: usize,
/// The maximum number of blobs to keep in the in memory blob cache.
#[arg(long = "txpool.max-cached-entries", alias = "txpool.max_cached_entries", default_value_t = DEFAULT_MAX_CACHED_BLOBS)]
pub max_cached_entries: u32,
/// Flag to disable local transaction exemptions.
#[arg(long = "txpool.nolocals")]
pub no_locals: bool,
/// Flag to allow certain addresses as local.
#[arg(long = "txpool.locals")]
pub locals: Vec<Address>,
/// Flag to toggle local transaction propagation.
#[arg(long = "txpool.no-local-transactions-propagation")]
pub no_local_transactions_propagation: bool,
/// Number of additional transaction validation tasks to spawn.
#[arg(long = "txpool.additional-validation-tasks", alias = "txpool.additional_validation_tasks", default_value_t = DEFAULT_TXPOOL_ADDITIONAL_VALIDATION_TASKS)]
pub additional_validation_tasks: usize,
/// Maximum number of pending transactions from the network to buffer
#[arg(long = "txpool.max-pending-txns", alias = "txpool.max_pending_txns", default_value_t = PENDING_TX_LISTENER_BUFFER_SIZE)]
pub pending_tx_listener_buffer_size: usize,
/// Maximum number of new transactions to buffer
#[arg(long = "txpool.max-new-txns", alias = "txpool.max_new_txns", default_value_t = NEW_TX_LISTENER_BUFFER_SIZE)]
pub new_tx_listener_buffer_size: usize,
/// How many new pending transactions to buffer and send to in progress pending transaction
/// iterators.
#[arg(long = "txpool.max-new-pending-txs-notifications", alias = "txpool.max-new-pending-txs-notifications", default_value_t = MAX_NEW_PENDING_TXS_NOTIFICATIONS)]
pub max_new_pending_txs_notifications: usize,
/// Maximum amount of time non-executable transaction are queued.
#[arg(long = "txpool.lifetime", value_parser = parse_duration_from_secs_or_ms, default_value = "10800", value_name = "DURATION")]
pub max_queued_lifetime: Duration,
/// Path to store the local transaction backup at, to survive node restarts.
#[arg(long = "txpool.transactions-backup", alias = "txpool.journal", value_name = "PATH")]
pub transactions_backup_path: Option<std::path::PathBuf>,
/// Disables transaction backup to disk on node shutdown.
#[arg(
long = "txpool.disable-transactions-backup",
alias = "txpool.disable-journal",
conflicts_with = "transactions_backup_path"
)]
pub disable_transactions_backup: bool,
/// Max batch size for transaction pool insertions
#[arg(long = "txpool.max-batch-size", default_value_t = 1)]
pub max_batch_size: usize,
}
impl TxPoolArgs {
/// Sets the minimal protocol base fee to 0, effectively disabling checks that enforce that a
/// transaction's fee must be higher than the [`MIN_PROTOCOL_BASE_FEE`] which is the lowest
/// value the ethereum EIP-1559 base fee can reach.
pub const fn with_disabled_protocol_base_fee(self) -> Self {
self.with_protocol_base_fee(0)
}
/// Configures the minimal protocol base fee that should be enforced.
///
/// Ethereum's EIP-1559 base fee can't drop below [`MIN_PROTOCOL_BASE_FEE`] hence this is
/// enforced by default in the pool.
pub const fn with_protocol_base_fee(mut self, protocol_base_fee: u64) -> Self {
self.minimal_protocol_basefee = protocol_base_fee;
self
}
}
impl Default for TxPoolArgs {
fn default() -> Self {
Self {
pending_max_count: TXPOOL_SUBPOOL_MAX_TXS_DEFAULT,
pending_max_size: TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT,
basefee_max_count: TXPOOL_SUBPOOL_MAX_TXS_DEFAULT,
basefee_max_size: TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT,
queued_max_count: TXPOOL_SUBPOOL_MAX_TXS_DEFAULT,
queued_max_size: TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT,
blobpool_max_count: TXPOOL_SUBPOOL_MAX_TXS_DEFAULT,
blobpool_max_size: TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT,
blob_cache_size: None,
max_account_slots: TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER,
price_bump: DEFAULT_PRICE_BUMP,
minimal_protocol_basefee: MIN_PROTOCOL_BASE_FEE,
minimum_priority_fee: None,
enforced_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT_30M,
max_tx_gas_limit: None,
blob_transaction_price_bump: REPLACE_BLOB_PRICE_BUMP,
max_tx_input_bytes: DEFAULT_MAX_TX_INPUT_BYTES,
max_cached_entries: DEFAULT_MAX_CACHED_BLOBS,
no_locals: false,
locals: Default::default(),
no_local_transactions_propagation: false,
additional_validation_tasks: DEFAULT_TXPOOL_ADDITIONAL_VALIDATION_TASKS,
pending_tx_listener_buffer_size: PENDING_TX_LISTENER_BUFFER_SIZE,
new_tx_listener_buffer_size: NEW_TX_LISTENER_BUFFER_SIZE,
max_new_pending_txs_notifications: MAX_NEW_PENDING_TXS_NOTIFICATIONS,
max_queued_lifetime: MAX_QUEUED_TRANSACTION_LIFETIME,
transactions_backup_path: None,
disable_transactions_backup: false,
max_batch_size: 1,
}
}
}
impl RethTransactionPoolConfig for TxPoolArgs {
/// Returns transaction pool configuration.
fn pool_config(&self) -> PoolConfig {
PoolConfig {
local_transactions_config: LocalTransactionConfig {
no_exemptions: self.no_locals,
local_addresses: self.locals.clone().into_iter().collect(),
propagate_local_transactions: !self.no_local_transactions_propagation,
},
pending_limit: SubPoolLimit {
max_txs: self.pending_max_count,
max_size: self.pending_max_size.saturating_mul(1024 * 1024),
},
basefee_limit: SubPoolLimit {
max_txs: self.basefee_max_count,
max_size: self.basefee_max_size.saturating_mul(1024 * 1024),
},
queued_limit: SubPoolLimit {
max_txs: self.queued_max_count,
max_size: self.queued_max_size.saturating_mul(1024 * 1024),
},
blob_limit: SubPoolLimit {
max_txs: self.blobpool_max_count,
max_size: self.blobpool_max_size.saturating_mul(1024 * 1024),
},
blob_cache_size: self.blob_cache_size,
max_account_slots: self.max_account_slots,
price_bumps: PriceBumpConfig {
default_price_bump: self.price_bump,
replace_blob_tx_price_bump: self.blob_transaction_price_bump,
},
minimal_protocol_basefee: self.minimal_protocol_basefee,
minimum_priority_fee: self.minimum_priority_fee,
gas_limit: self.enforced_gas_limit,
pending_tx_listener_buffer_size: self.pending_tx_listener_buffer_size,
new_tx_listener_buffer_size: self.new_tx_listener_buffer_size,
max_new_pending_txs_notifications: self.max_new_pending_txs_notifications,
max_queued_lifetime: self.max_queued_lifetime,
..Default::default()
}
}
/// Returns max batch size for transaction batch insertion.
fn max_batch_size(&self) -> usize {
self.max_batch_size
}
}
#[cfg(test)]
mod tests {
use super::*;
use clap::Parser;
/// A helper type to parse Args more easily
#[derive(Parser)]
struct CommandParser<T: Args> {
#[command(flatten)]
args: T,
}
#[test]
fn txpool_args_default_sanity_test() {
let default_args = TxPoolArgs::default();
let args = CommandParser::<TxPoolArgs>::parse_from(["reth"]).args;
assert_eq!(args, default_args);
}
#[test]
fn txpool_parse_locals() {
let args = CommandParser::<TxPoolArgs>::parse_from([
"reth",
"--txpool.locals",
"0x0000000000000000000000000000000000000000",
])
.args;
assert_eq!(args.locals, vec![Address::ZERO]);
}
#[test]
fn txpool_parse_max_tx_lifetime() {
// Test with a custom duration
let args =
CommandParser::<TxPoolArgs>::parse_from(["reth", "--txpool.lifetime", "300"]).args;
assert_eq!(args.max_queued_lifetime, Duration::from_secs(300));
// Test with the default value
let args = CommandParser::<TxPoolArgs>::parse_from(["reth"]).args;
assert_eq!(args.max_queued_lifetime, Duration::from_secs(3 * 60 * 60)); // Default is 3h
}
#[test]
fn txpool_parse_max_tx_lifetime_invalid() {
let result =
CommandParser::<TxPoolArgs>::try_parse_from(["reth", "--txpool.lifetime", "invalid"]);
assert!(result.is_err(), "Expected an error for invalid duration");
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/core/src/args/enclave.rs | crates/node/core/src/args/enclave.rs | //! clap [Args](clap::Args) for RPC related arguments.
use std::net::{IpAddr, Ipv4Addr};
use clap::Args;
const ENCLAVE_DEFAULT_ENDPOINT_PORT: u16 = 7878;
const ENCLAVE_DEFAULT_ENDPOINT_IP: IpAddr = IpAddr::V4(Ipv4Addr::UNSPECIFIED);
/// Parameters for configuring the enclave more granularity via CLI
#[derive(Debug, Clone, Args, PartialEq, Eq, Copy)]
#[command(next_help_heading = "Enclave")]
pub struct EnclaveArgs {
/// Auth server address to listen on
#[arg(long = "enclave.endpoint-addr", default_value_t = ENCLAVE_DEFAULT_ENDPOINT_IP.try_into().unwrap())]
pub enclave_server_addr: IpAddr,
/// Auth server port to listen on
#[arg(long = "enclave.endpoint-port", default_value_t = ENCLAVE_DEFAULT_ENDPOINT_PORT)]
pub enclave_server_port: u16,
/// How many failures to tolerate before we panic
#[arg(long = "enclave.retries", default_value_t = 0)]
pub retries: u32,
/// How many seconds to pause between retries
#[arg(long = "enclave.retry-seconds", default_value_t = 30)]
pub retry_seconds: u16,
/// Spin up mock server for testing purpose
#[arg(long = "enclave.mock-server", action = clap::ArgAction::SetTrue)]
pub mock_server: bool,
/// Enclave client timeout
#[arg(long = "enclave.timeout", default_value_t = 5)]
pub enclave_timeout: u64,
}
impl Default for EnclaveArgs {
fn default() -> Self {
Self {
enclave_server_addr: ENCLAVE_DEFAULT_ENDPOINT_IP,
enclave_server_port: ENCLAVE_DEFAULT_ENDPOINT_PORT,
mock_server: false,
enclave_timeout: 5,
retries: 0,
retry_seconds: 30,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::args::enclave::EnclaveArgs;
use clap::{Args, Parser};
/// A helper type to parse Args more easily
#[derive(Parser)]
struct CommandParser<T: Args> {
#[command(flatten)]
args: T,
}
#[test]
fn test_enclave_args_parser() {
let args = CommandParser::<EnclaveArgs>::parse_from(["reth node"]).args;
let addr = args.enclave_server_addr;
let port = args.enclave_server_port;
let mock = args.mock_server;
assert_eq!(port, ENCLAVE_DEFAULT_ENDPOINT_PORT);
assert_eq!(addr, ENCLAVE_DEFAULT_ENDPOINT_IP);
assert_eq!(mock, false);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/node/core/src/args/era.rs | crates/node/core/src/args/era.rs | use clap::Args;
use reth_chainspec::{ChainKind, NamedChain};
use std::path::Path;
use url::Url;
/// Syncs ERA1 encoded blocks from a local or remote source.
#[derive(Clone, Debug, Default, Args)]
pub struct EraArgs {
/// Enable import from ERA1 files.
#[arg(
id = "era.enable",
long = "era.enable",
value_name = "ERA_ENABLE",
default_value_t = false
)]
pub enabled: bool,
/// Describes where to get the ERA files to import from.
#[clap(flatten)]
pub source: EraSourceArgs,
}
/// Arguments for the block history import based on ERA1 encoded files.
#[derive(Clone, Debug, Default, Args)]
#[group(required = false, multiple = false)]
pub struct EraSourceArgs {
/// The path to a directory for import.
///
/// The ERA1 files are read from the local directory parsing headers and bodies.
#[arg(long = "era.path", value_name = "ERA_PATH", verbatim_doc_comment)]
pub path: Option<Box<Path>>,
/// The URL to a remote host where the ERA1 files are hosted.
///
/// The ERA1 files are read from the remote host using HTTP GET requests parsing headers
/// and bodies.
#[arg(long = "era.url", value_name = "ERA_URL", verbatim_doc_comment)]
pub url: Option<Url>,
}
/// The `ExtractEraHost` trait allows to derive a default URL host for ERA files.
pub trait DefaultEraHost {
/// Converts `self` into [`Url`] index page of the ERA host.
///
/// Returns `Err` if the conversion is not possible.
fn default_era_host(&self) -> Option<Url>;
}
impl DefaultEraHost for ChainKind {
fn default_era_host(&self) -> Option<Url> {
Some(match self {
Self::Named(NamedChain::Mainnet) => {
Url::parse("https://era.ithaca.xyz/era1/index.html").expect("URL should be valid")
}
Self::Named(NamedChain::Sepolia) => {
Url::parse("https://era.ithaca.xyz/sepolia-era1/index.html")
.expect("URL should be valid")
}
_ => return None,
})
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/tracing-otlp/src/lib.rs | crates/tracing-otlp/src/lib.rs | //! Provides a tracing layer for `OpenTelemetry` that exports spans to an OTLP endpoint.
//!
//! This module simplifies the integration of `OpenTelemetry` tracing with OTLP export in Rust
//! applications. It allows for easily capturing and exporting distributed traces to compatible
//! backends like Jaeger, Zipkin, or any other OpenTelemetry-compatible tracing system.
use opentelemetry::{trace::TracerProvider, KeyValue, Value};
use opentelemetry_otlp::SpanExporter;
use opentelemetry_sdk::{
trace::{SdkTracer, SdkTracerProvider},
Resource,
};
use opentelemetry_semantic_conventions::{attribute::SERVICE_VERSION, SCHEMA_URL};
use tracing::Subscriber;
use tracing_opentelemetry::OpenTelemetryLayer;
use tracing_subscriber::registry::LookupSpan;
/// Creates a tracing [`OpenTelemetryLayer`] that exports spans to an OTLP endpoint.
///
/// This layer can be added to a [`tracing_subscriber::Registry`] to enable `OpenTelemetry` tracing
/// with OTLP export.
pub fn layer<S>(service_name: impl Into<Value>) -> OpenTelemetryLayer<S, SdkTracer>
where
for<'span> S: Subscriber + LookupSpan<'span>,
{
let exporter = SpanExporter::builder().with_http().build().unwrap();
let resource = Resource::builder()
.with_service_name(service_name)
.with_schema_url([KeyValue::new(SERVICE_VERSION, env!("CARGO_PKG_VERSION"))], SCHEMA_URL)
.build();
let provider =
SdkTracerProvider::builder().with_resource(resource).with_batch_exporter(exporter).build();
let tracer = provider.tracer("reth-otlp");
tracing_opentelemetry::layer().with_tracer(tracer)
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/errors/src/lib.rs | crates/errors/src/lib.rs | //! High level error types for the reth in general.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![no_std]
extern crate alloc;
mod error;
pub use error::{RethError, RethResult};
pub use reth_consensus::ConsensusError;
pub use reth_execution_errors::{BlockExecutionError, BlockValidationError};
pub use reth_storage_errors::{
db::DatabaseError,
provider::{ProviderError, ProviderResult},
};
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/errors/src/error.rs | crates/errors/src/error.rs | use alloc::{boxed::Box, string::ToString};
use core::fmt::Display;
use reth_consensus::ConsensusError;
use reth_execution_errors::BlockExecutionError;
use reth_storage_errors::{db::DatabaseError, provider::ProviderError};
/// Result alias for [`RethError`].
pub type RethResult<T> = Result<T, RethError>;
/// Core error variants possible when interacting with the blockchain.
///
/// This enum encapsulates various error types that can occur during blockchain interactions.
///
/// It allows for structured error handling based on the nature of the encountered issue.
#[derive(Debug, thiserror::Error)]
pub enum RethError {
/// Error encountered during block execution.
#[error(transparent)]
Execution(#[from] BlockExecutionError),
/// Consensus-related errors.
#[error(transparent)]
Consensus(#[from] ConsensusError),
/// Database-related errors.
#[error(transparent)]
Database(#[from] DatabaseError),
/// Errors originating from providers.
#[error(transparent)]
Provider(#[from] ProviderError),
/// Any other error.
#[error(transparent)]
Other(Box<dyn core::error::Error + Send + Sync>),
}
impl RethError {
/// Create a new `RethError` from a given error.
pub fn other<E>(error: E) -> Self
where
E: core::error::Error + Send + Sync + 'static,
{
Self::Other(Box::new(error))
}
/// Create a new `RethError` from a given message.
pub fn msg(msg: impl Display) -> Self {
Self::Other(msg.to_string().into())
}
}
// Some types are used a lot. Make sure they don't unintentionally get bigger.
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
mod size_asserts {
use super::*;
macro_rules! static_assert_size {
($t:ty, $sz:expr) => {
const _: [(); $sz] = [(); core::mem::size_of::<$t>()];
};
}
static_assert_size!(RethError, 64);
static_assert_size!(BlockExecutionError, 64);
static_assert_size!(ConsensusError, 48);
static_assert_size!(DatabaseError, 32);
static_assert_size!(ProviderError, 48);
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/revm/src/cached.rs | crates/revm/src/cached.rs | //! Database adapters for payload building.
use alloy_primitives::{
map::{Entry, HashMap},
Address, B256, U256,
};
use core::cell::RefCell;
use revm::{
bytecode::Bytecode,
state::{AccountInfo, FlaggedStorage},
Database, DatabaseRef,
};
/// A container type that caches reads from an underlying [`DatabaseRef`].
///
/// This is intended to be used in conjunction with `revm::db::State`
/// during payload building which repeatedly accesses the same data.
///
/// [`CachedReads::as_db_mut`] transforms this type into a [`Database`] implementation that uses
/// [`CachedReads`] as a caching layer for operations, and records any cache misses.
///
/// # Example
///
/// ```
/// use reth_revm::{cached::CachedReads, DatabaseRef, db::State};
///
/// fn build_payload<DB: DatabaseRef>(db: DB) {
/// let mut cached_reads = CachedReads::default();
/// let db = cached_reads.as_db_mut(db);
/// // this is `Database` and can be used to build a payload, it never commits to `CachedReads` or the underlying database, but all reads from the underlying database are cached in `CachedReads`.
/// // Subsequent payload build attempts can use cached reads and avoid hitting the underlying database.
/// let state = State::builder().with_database(db).build();
/// }
/// ```
#[derive(Debug, Clone, Default)]
pub struct CachedReads {
/// Block state account with storage.
pub accounts: HashMap<Address, CachedAccount>,
/// Created contracts.
pub contracts: HashMap<B256, Bytecode>,
/// Block hash mapped to the block number.
pub block_hashes: HashMap<u64, B256>,
}
// === impl CachedReads ===
impl CachedReads {
/// Gets a [`DatabaseRef`] that will cache reads from the given database.
pub const fn as_db<DB>(&mut self, db: DB) -> CachedReadsDBRef<'_, DB> {
self.as_db_mut(db).into_db()
}
/// Gets a mutable [`Database`] that will cache reads from the underlying database.
pub const fn as_db_mut<DB>(&mut self, db: DB) -> CachedReadsDbMut<'_, DB> {
CachedReadsDbMut { cached: self, db }
}
/// Inserts an account info into the cache.
pub fn insert_account(
&mut self,
address: Address,
info: AccountInfo,
storage: HashMap<U256, FlaggedStorage>,
) {
self.accounts.insert(address, CachedAccount { info: Some(info), storage });
}
/// Extends current cache with entries from another [`CachedReads`] instance.
///
/// Note: It is expected that both instances are based on the exact same state.
pub fn extend(&mut self, other: Self) {
self.accounts.extend(other.accounts);
self.contracts.extend(other.contracts);
self.block_hashes.extend(other.block_hashes);
}
}
/// A [Database] that caches reads inside [`CachedReads`].
#[derive(Debug)]
pub struct CachedReadsDbMut<'a, DB> {
/// The cache of reads.
pub cached: &'a mut CachedReads,
/// The underlying database.
pub db: DB,
}
impl<'a, DB> CachedReadsDbMut<'a, DB> {
/// Converts this [`Database`] implementation into a [`DatabaseRef`] that will still cache
/// reads.
pub const fn into_db(self) -> CachedReadsDBRef<'a, DB> {
CachedReadsDBRef { inner: RefCell::new(self) }
}
/// Returns access to wrapped [`DatabaseRef`].
pub const fn inner(&self) -> &DB {
&self.db
}
}
impl<DB, T> AsRef<T> for CachedReadsDbMut<'_, DB>
where
DB: AsRef<T>,
{
fn as_ref(&self) -> &T {
self.inner().as_ref()
}
}
impl<DB: DatabaseRef> Database for CachedReadsDbMut<'_, DB> {
type Error = <DB as DatabaseRef>::Error;
fn basic(&mut self, address: Address) -> Result<Option<AccountInfo>, Self::Error> {
let basic = match self.cached.accounts.entry(address) {
Entry::Occupied(entry) => entry.get().info.clone(),
Entry::Vacant(entry) => {
entry.insert(CachedAccount::new(self.db.basic_ref(address)?)).info.clone()
}
};
Ok(basic)
}
fn code_by_hash(&mut self, code_hash: B256) -> Result<Bytecode, Self::Error> {
let code = match self.cached.contracts.entry(code_hash) {
Entry::Occupied(entry) => entry.get().clone(),
Entry::Vacant(entry) => entry.insert(self.db.code_by_hash_ref(code_hash)?).clone(),
};
Ok(code)
}
fn storage(&mut self, address: Address, index: U256) -> Result<FlaggedStorage, Self::Error> {
match self.cached.accounts.entry(address) {
Entry::Occupied(mut acc_entry) => match acc_entry.get_mut().storage.entry(index) {
Entry::Occupied(entry) => Ok(*entry.get()),
Entry::Vacant(entry) => Ok(*entry.insert(self.db.storage_ref(address, index)?)),
},
Entry::Vacant(acc_entry) => {
// acc needs to be loaded for us to access slots.
let info = self.db.basic_ref(address)?;
let (account, value) = if info.is_some() {
let value = self.db.storage_ref(address, index)?;
let mut account = CachedAccount::new(info);
account.storage.insert(index, value);
(account, value)
} else {
(CachedAccount::new(info), FlaggedStorage::ZERO)
};
acc_entry.insert(account);
Ok(value)
}
}
}
fn block_hash(&mut self, number: u64) -> Result<B256, Self::Error> {
let code = match self.cached.block_hashes.entry(number) {
Entry::Occupied(entry) => *entry.get(),
Entry::Vacant(entry) => *entry.insert(self.db.block_hash_ref(number)?),
};
Ok(code)
}
}
/// A [`DatabaseRef`] that caches reads inside [`CachedReads`].
///
/// This is intended to be used as the [`DatabaseRef`] for
/// `revm::db::State` for repeated payload build jobs.
#[derive(Debug)]
pub struct CachedReadsDBRef<'a, DB> {
/// The inner cache reads db mut.
pub inner: RefCell<CachedReadsDbMut<'a, DB>>,
}
impl<DB: DatabaseRef> DatabaseRef for CachedReadsDBRef<'_, DB> {
type Error = <DB as DatabaseRef>::Error;
fn basic_ref(&self, address: Address) -> Result<Option<AccountInfo>, Self::Error> {
self.inner.borrow_mut().basic(address)
}
fn code_by_hash_ref(&self, code_hash: B256) -> Result<Bytecode, Self::Error> {
self.inner.borrow_mut().code_by_hash(code_hash)
}
fn storage_ref(&self, address: Address, index: U256) -> Result<FlaggedStorage, Self::Error> {
self.inner.borrow_mut().storage(address, index)
}
fn block_hash_ref(&self, number: u64) -> Result<B256, Self::Error> {
self.inner.borrow_mut().block_hash(number)
}
}
/// Cached account contains the account state with storage
/// but lacks the account status.
#[derive(Debug, Clone)]
pub struct CachedAccount {
/// Account state.
pub info: Option<AccountInfo>,
/// Account's storage.
pub storage: HashMap<U256, FlaggedStorage>,
}
impl CachedAccount {
fn new(info: Option<AccountInfo>) -> Self {
Self { info, storage: HashMap::default() }
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_extend_with_two_cached_reads() {
// Setup test data
let hash1 = B256::from_slice(&[1u8; 32]);
let hash2 = B256::from_slice(&[2u8; 32]);
let address1 = Address::from_slice(&[1u8; 20]);
let address2 = Address::from_slice(&[2u8; 20]);
// Create primary cache
let mut primary = {
let mut cache = CachedReads::default();
cache.accounts.insert(address1, CachedAccount::new(Some(AccountInfo::default())));
cache.contracts.insert(hash1, Bytecode::default());
cache.block_hashes.insert(1, hash1);
cache
};
// Create additional cache
let additional = {
let mut cache = CachedReads::default();
cache.accounts.insert(address2, CachedAccount::new(Some(AccountInfo::default())));
cache.contracts.insert(hash2, Bytecode::default());
cache.block_hashes.insert(2, hash2);
cache
};
// Extending primary with additional cache
primary.extend(additional);
// Verify the combined state
assert!(
primary.accounts.len() == 2 &&
primary.contracts.len() == 2 &&
primary.block_hashes.len() == 2,
"All maps should contain 2 entries"
);
// Verify specific entries
assert!(
primary.accounts.contains_key(&address1) &&
primary.accounts.contains_key(&address2) &&
primary.contracts.contains_key(&hash1) &&
primary.contracts.contains_key(&hash2) &&
primary.block_hashes.get(&1) == Some(&hash1) &&
primary.block_hashes.get(&2) == Some(&hash2),
"All expected entries should be present"
);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/revm/src/lib.rs | crates/revm/src/lib.rs | //! Revm utils and implementations specific to reth.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(feature = "std"), no_std)]
extern crate alloc;
/// Cache database that reads from an underlying [`DatabaseRef`].
/// Database adapters for payload building.
pub mod cached;
/// A marker that can be used to cancel execution.
pub mod cancelled;
/// Contains glue code for integrating reth database into revm's [Database].
pub mod database;
pub use revm::{database as db, inspector};
/// Common test helpers
#[cfg(any(test, feature = "test-utils"))]
pub mod test_utils;
// Convenience re-exports.
pub use revm::{self, database::State, *};
/// Helper types for execution witness generation.
#[cfg(feature = "witness")]
pub mod witness;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/revm/src/witness.rs | crates/revm/src/witness.rs | use alloc::vec::Vec;
use alloy_primitives::{keccak256, Bytes, B256};
use reth_trie::{HashedPostState, HashedStorage};
use revm::database::State;
/// Tracks state changes during execution.
#[derive(Debug, Clone, Default)]
pub struct ExecutionWitnessRecord {
/// Records all state changes
pub hashed_state: HashedPostState,
/// Map of all contract codes (created / accessed) to their preimages that were required during
/// the execution of the block, including during state root recomputation.
///
/// `keccak(bytecodes) => bytecodes`
pub codes: Vec<Bytes>,
/// Map of all hashed account and storage keys (addresses and slots) to their preimages
/// (unhashed account addresses and storage slots, respectively) that were required during
/// the execution of the block.
///
/// `keccak(address|slot) => address|slot`
pub keys: Vec<Bytes>,
/// The lowest block number referenced by any BLOCKHASH opcode call during transaction
/// execution.
///
/// This helps determine which ancestor block headers must be included in the
/// `ExecutionWitness`.
///
/// `None` - when the BLOCKHASH opcode was not called during execution
pub lowest_block_number: Option<u64>,
}
impl ExecutionWitnessRecord {
/// Records the state after execution.
pub fn record_executed_state<DB>(&mut self, statedb: &State<DB>) {
self.codes = statedb
.cache
.contracts
.values()
.map(|code| code.original_bytes())
.chain(
// cache state does not have all the contracts, especially when
// a contract is created within the block
// the contract only exists in bundle state, therefore we need
// to include them as well
statedb.bundle_state.contracts.values().map(|code| code.original_bytes()),
)
.collect();
for (address, account) in &statedb.cache.accounts {
let hashed_address = keccak256(address);
self.hashed_state
.accounts
.insert(hashed_address, account.account.as_ref().map(|a| (&a.info).into()));
let storage = self
.hashed_state
.storages
.entry(hashed_address)
.or_insert_with(|| HashedStorage::new(account.status.was_destroyed()));
if let Some(account) = &account.account {
self.keys.push(address.to_vec().into());
for (slot, value) in &account.storage {
let slot = B256::from(*slot);
let hashed_slot = keccak256(slot);
storage.storage.insert(hashed_slot, *value);
self.keys.push(slot.into());
}
}
}
// BTreeMap keys are ordered, so the first key is the smallest
self.lowest_block_number = statedb.block_hashes.keys().next().copied()
}
/// Creates the record from the state after execution.
pub fn from_executed_state<DB>(state: &State<DB>) -> Self {
let mut record = Self::default();
record.record_executed_state(state);
record
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/revm/src/test_utils.rs | crates/revm/src/test_utils.rs | use alloc::vec::Vec;
use alloy_primitives::{keccak256, map::HashMap, Address, BlockNumber, Bytes, StorageKey, B256};
use reth_primitives_traits::{Account, Bytecode};
use reth_storage_api::{
AccountReader, BlockHashReader, BytecodeReader, HashedPostStateProvider, StateProofProvider,
StateProvider, StateRootProvider, StorageRootProvider,
};
use reth_storage_errors::provider::ProviderResult;
use reth_trie::{
updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, KeccakKeyHasher,
MultiProof, MultiProofTargets, StorageMultiProof, StorageProof, TrieInput,
};
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
use revm::state::FlaggedStorage;
/// Mock state for testing
#[derive(Debug, Default, Clone, Eq, PartialEq)]
pub struct StateProviderTest {
accounts: HashMap<Address, (HashMap<StorageKey, FlaggedStorage>, Account)>,
contracts: HashMap<B256, Bytecode>,
block_hash: HashMap<u64, B256>,
}
impl StateProviderTest {
/// Insert account.
pub fn insert_account(
&mut self,
address: Address,
mut account: Account,
bytecode: Option<Bytes>,
storage: HashMap<StorageKey, FlaggedStorage>,
) {
if let Some(bytecode) = bytecode {
let hash = keccak256(&bytecode);
account.bytecode_hash = Some(hash);
self.contracts.insert(hash, Bytecode::new_raw(bytecode));
}
self.accounts.insert(address, (storage, account));
}
/// Insert a block hash.
pub fn insert_block_hash(&mut self, block_number: u64, block_hash: B256) {
self.block_hash.insert(block_number, block_hash);
}
}
impl AccountReader for StateProviderTest {
fn basic_account(&self, address: &Address) -> ProviderResult<Option<Account>> {
Ok(self.accounts.get(address).map(|(_, acc)| *acc))
}
}
impl BlockHashReader for StateProviderTest {
fn block_hash(&self, number: u64) -> ProviderResult<Option<B256>> {
Ok(self.block_hash.get(&number).copied())
}
fn canonical_hashes_range(
&self,
start: BlockNumber,
end: BlockNumber,
) -> ProviderResult<Vec<B256>> {
let range = start..end;
Ok(self
.block_hash
.iter()
.filter_map(|(block, hash)| range.contains(block).then_some(*hash))
.collect())
}
}
impl StateRootProvider for StateProviderTest {
fn state_root(&self, _hashed_state: HashedPostState) -> ProviderResult<B256> {
unimplemented!("state root computation is not supported")
}
fn state_root_from_nodes(&self, _input: TrieInput) -> ProviderResult<B256> {
unimplemented!("state root computation is not supported")
}
fn state_root_with_updates(
&self,
_hashed_state: HashedPostState,
) -> ProviderResult<(B256, TrieUpdates)> {
unimplemented!("state root computation is not supported")
}
fn state_root_from_nodes_with_updates(
&self,
_input: TrieInput,
) -> ProviderResult<(B256, TrieUpdates)> {
unimplemented!("state root computation is not supported")
}
}
impl StorageRootProvider for StateProviderTest {
fn storage_root(
&self,
_address: Address,
_hashed_storage: HashedStorage,
) -> ProviderResult<B256> {
unimplemented!("storage root is not supported")
}
fn storage_proof(
&self,
_address: Address,
_slot: B256,
_hashed_storage: HashedStorage,
) -> ProviderResult<StorageProof> {
unimplemented!("proof generation is not supported")
}
fn storage_multiproof(
&self,
_address: Address,
_slots: &[B256],
_hashed_storage: HashedStorage,
) -> ProviderResult<StorageMultiProof> {
unimplemented!("proof generation is not supported")
}
}
impl StateProofProvider for StateProviderTest {
fn proof(
&self,
_input: TrieInput,
_address: Address,
_slots: &[B256],
) -> ProviderResult<AccountProof> {
unimplemented!("proof generation is not supported")
}
fn multiproof(
&self,
_input: TrieInput,
_targets: MultiProofTargets,
) -> ProviderResult<MultiProof> {
unimplemented!("proof generation is not supported")
}
fn witness(&self, _input: TrieInput, _target: HashedPostState) -> ProviderResult<Vec<Bytes>> {
unimplemented!("witness generation is not supported")
}
}
impl HashedPostStateProvider for StateProviderTest {
fn hashed_post_state(&self, bundle_state: &revm::database::BundleState) -> HashedPostState {
HashedPostState::from_bundle_state::<KeccakKeyHasher>(bundle_state.state())
}
}
impl StateProvider for StateProviderTest {
fn storage(
&self,
account: Address,
storage_key: StorageKey,
) -> ProviderResult<Option<FlaggedStorage>> {
Ok(self.accounts.get(&account).and_then(|(storage, _)| storage.get(&storage_key).copied()))
}
}
impl BytecodeReader for StateProviderTest {
fn bytecode_by_hash(&self, code_hash: &B256) -> ProviderResult<Option<Bytecode>> {
Ok(self.contracts.get(code_hash).cloned())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/revm/src/database.rs | crates/revm/src/database.rs | use crate::primitives::alloy_primitives::{BlockNumber, StorageKey};
use alloy_primitives::{Address, B256, U256};
use core::ops::{Deref, DerefMut};
use reth_primitives_traits::Account;
use reth_storage_api::{AccountReader, BlockHashReader, BytecodeReader, StateProvider};
use reth_storage_errors::provider::{ProviderError, ProviderResult};
use revm::{
bytecode::Bytecode,
state::{AccountInfo, FlaggedStorage},
Database, DatabaseRef,
};
/// A helper trait responsible for providing state necessary for EVM execution.
///
/// This serves as the data layer for [`Database`].
pub trait EvmStateProvider: Send + Sync {
/// Get basic account information.
///
/// Returns [`None`] if the account doesn't exist.
fn basic_account(&self, address: &Address) -> ProviderResult<Option<Account>>;
/// Get the hash of the block with the given number. Returns [`None`] if no block with this
/// number exists.
fn block_hash(&self, number: BlockNumber) -> ProviderResult<Option<B256>>;
/// Get account code by hash.
fn bytecode_by_hash(
&self,
code_hash: &B256,
) -> ProviderResult<Option<reth_primitives_traits::Bytecode>>;
/// Get storage of the given account.
fn storage(
&self,
account: Address,
storage_key: StorageKey,
) -> ProviderResult<Option<FlaggedStorage>>;
}
// Blanket implementation of EvmStateProvider for any type that implements StateProvider.
impl<T: StateProvider> EvmStateProvider for T {
fn basic_account(&self, address: &Address) -> ProviderResult<Option<Account>> {
<T as AccountReader>::basic_account(self, address)
}
fn block_hash(&self, number: BlockNumber) -> ProviderResult<Option<B256>> {
<T as BlockHashReader>::block_hash(self, number)
}
fn bytecode_by_hash(
&self,
code_hash: &B256,
) -> ProviderResult<Option<reth_primitives_traits::Bytecode>> {
<T as BytecodeReader>::bytecode_by_hash(self, code_hash)
}
fn storage(
&self,
account: Address,
storage_key: StorageKey,
) -> ProviderResult<Option<FlaggedStorage>> {
<T as reth_storage_api::StateProvider>::storage(self, account, storage_key)
}
}
/// A [Database] and [`DatabaseRef`] implementation that uses [`EvmStateProvider`] as the underlying
/// data source.
#[derive(Clone)]
pub struct StateProviderDatabase<DB>(pub DB);
impl<DB> StateProviderDatabase<DB> {
/// Create new State with generic `StateProvider`.
pub const fn new(db: DB) -> Self {
Self(db)
}
/// Consume State and return inner `StateProvider`.
pub fn into_inner(self) -> DB {
self.0
}
}
impl<DB> core::fmt::Debug for StateProviderDatabase<DB> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("StateProviderDatabase").finish_non_exhaustive()
}
}
impl<DB> AsRef<DB> for StateProviderDatabase<DB> {
fn as_ref(&self) -> &DB {
self
}
}
impl<DB> Deref for StateProviderDatabase<DB> {
type Target = DB;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<DB> DerefMut for StateProviderDatabase<DB> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl<DB: EvmStateProvider> Database for StateProviderDatabase<DB> {
type Error = ProviderError;
/// Retrieves basic account information for a given address.
///
/// Returns `Ok` with `Some(AccountInfo)` if the account exists,
/// `None` if it doesn't, or an error if encountered.
fn basic(&mut self, address: Address) -> Result<Option<AccountInfo>, Self::Error> {
self.basic_ref(address)
}
/// Retrieves the bytecode associated with a given code hash.
///
/// Returns `Ok` with the bytecode if found, or the default bytecode otherwise.
fn code_by_hash(&mut self, code_hash: B256) -> Result<Bytecode, Self::Error> {
self.code_by_hash_ref(code_hash)
}
/// Retrieves the storage value at a specific index for a given address.
///
/// Returns `Ok` with the storage value, or the default value if not found.
fn storage(&mut self, address: Address, index: U256) -> Result<FlaggedStorage, Self::Error> {
self.storage_ref(address, index)
}
/// Retrieves the block hash for a given block number.
///
/// Returns `Ok` with the block hash if found, or the default hash otherwise.
/// Note: It safely casts the `number` to `u64`.
fn block_hash(&mut self, number: u64) -> Result<B256, Self::Error> {
self.block_hash_ref(number)
}
}
impl<DB: EvmStateProvider> DatabaseRef for StateProviderDatabase<DB> {
type Error = <Self as Database>::Error;
/// Retrieves basic account information for a given address.
///
/// Returns `Ok` with `Some(AccountInfo)` if the account exists,
/// `None` if it doesn't, or an error if encountered.
fn basic_ref(&self, address: Address) -> Result<Option<AccountInfo>, Self::Error> {
Ok(self.basic_account(&address)?.map(Into::into))
}
/// Retrieves the bytecode associated with a given code hash.
///
/// Returns `Ok` with the bytecode if found, or the default bytecode otherwise.
fn code_by_hash_ref(&self, code_hash: B256) -> Result<Bytecode, Self::Error> {
Ok(self.bytecode_by_hash(&code_hash)?.unwrap_or_default().0)
}
/// Retrieves the storage value at a specific index for a given address.
///
/// Returns `Ok` with the storage value, or the default value if not found.
fn storage_ref(&self, address: Address, index: U256) -> Result<FlaggedStorage, Self::Error> {
Ok(self.0.storage(address, B256::new(index.to_be_bytes()))?.unwrap_or_default())
}
/// Retrieves the block hash for a given block number.
///
/// Returns `Ok` with the block hash if found, or the default hash otherwise.
fn block_hash_ref(&self, number: u64) -> Result<B256, Self::Error> {
// Get the block hash or default hash with an attempt to convert U256 block number to u64
Ok(self.0.block_hash(number)?.unwrap_or_default())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/revm/src/cancelled.rs | crates/revm/src/cancelled.rs | use alloc::sync::Arc;
use core::sync::atomic::AtomicBool;
/// A marker that can be used to cancel execution.
///
/// If dropped, it will set the `cancelled` flag to true.
///
/// This is most useful when a payload job needs to be cancelled.
#[derive(Default, Clone, Debug)]
pub struct CancelOnDrop(Arc<AtomicBool>);
// === impl CancelOnDrop ===
impl CancelOnDrop {
/// Returns true if the job was cancelled.
pub fn is_cancelled(&self) -> bool {
self.0.load(core::sync::atomic::Ordering::Relaxed)
}
}
impl Drop for CancelOnDrop {
fn drop(&mut self) {
self.0.store(true, core::sync::atomic::Ordering::Relaxed);
}
}
/// A marker that can be used to cancel execution.
///
/// If dropped, it will NOT set the `cancelled` flag to true.
/// If `cancel` is called, the `cancelled` flag will be set to true.
///
/// This is useful in prewarming, when an external signal is received to cancel many prewarming
/// tasks.
#[derive(Default, Clone, Debug)]
pub struct ManualCancel(Arc<AtomicBool>);
// === impl ManualCancel ===
impl ManualCancel {
/// Returns true if the job was cancelled.
pub fn is_cancelled(&self) -> bool {
self.0.load(core::sync::atomic::Ordering::Relaxed)
}
/// Drops the [`ManualCancel`], setting the cancelled flag to true.
pub fn cancel(self) {
self.0.store(true, core::sync::atomic::Ordering::Relaxed);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_default_cancelled() {
let c = CancelOnDrop::default();
assert!(!c.is_cancelled());
}
#[test]
fn test_default_cancel_task() {
let c = ManualCancel::default();
assert!(!c.is_cancelled());
}
#[test]
fn test_set_cancel_task() {
let c = ManualCancel::default();
assert!(!c.is_cancelled());
let c2 = c.clone();
let c3 = c.clone();
c.cancel();
assert!(c3.is_cancelled());
assert!(c2.is_cancelled());
}
#[test]
fn test_cancel_task_multiple_threads() {
let c = ManualCancel::default();
let cloned_cancel = c.clone();
// we want to make sure that:
// * we can spawn tasks that do things
// * those tasks can run to completion and the flag remains unset unless we call cancel
let mut handles = vec![];
for _ in 0..10 {
let c = c.clone();
let handle = std::thread::spawn(move || {
for _ in 0..1000 {
if c.is_cancelled() {
return;
}
}
});
handles.push(handle);
}
// wait for all the threads to finish
for handle in handles {
handle.join().unwrap();
}
// check that the flag is still unset
assert!(!c.is_cancelled());
// cancel and check that the flag is set
c.cancel();
assert!(cloned_cancel.is_cancelled());
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/tracing/src/layers.rs | crates/tracing/src/layers.rs | use std::{
fmt,
path::{Path, PathBuf},
};
use rolling_file::{RollingConditionBasic, RollingFileAppender};
use tracing_appender::non_blocking::WorkerGuard;
use tracing_subscriber::{filter::Directive, EnvFilter, Layer, Registry};
use crate::formatter::LogFormat;
/// A worker guard returned by the file layer.
///
/// When a guard is dropped, all events currently in-memory are flushed to the log file this guard
/// belongs to.
pub type FileWorkerGuard = tracing_appender::non_blocking::WorkerGuard;
/// A boxed tracing [Layer].
pub(crate) type BoxedLayer<S> = Box<dyn Layer<S> + Send + Sync>;
/// Default [directives](Directive) for [`EnvFilter`] which disables high-frequency debug logs from
/// `hyper`, `hickory-resolver`, `jsonrpsee-server`, and `discv5`.
const DEFAULT_ENV_FILTER_DIRECTIVES: [&str; 5] = [
"hyper::proto::h1=off",
"hickory_resolver=off",
"hickory_proto=off",
"discv5=off",
"jsonrpsee-server=off",
];
/// Manages the collection of layers for a tracing subscriber.
///
/// `Layers` acts as a container for different logging layers such as stdout, file, or journald.
/// Each layer can be configured separately and then combined into a tracing subscriber.
#[derive(Default)]
pub struct Layers {
inner: Vec<BoxedLayer<Registry>>,
}
impl fmt::Debug for Layers {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Layers").field("layers_count", &self.inner.len()).finish()
}
}
impl Layers {
/// Creates a new `Layers` instance.
pub fn new() -> Self {
Self::default()
}
/// Adds a layer to the collection of layers.
pub fn add_layer<L>(&mut self, layer: L)
where
L: Layer<Registry> + Send + Sync,
{
self.inner.push(layer.boxed());
}
/// Consumes the `Layers` instance, returning the inner vector of layers.
pub(crate) fn into_inner(self) -> Vec<BoxedLayer<Registry>> {
self.inner
}
/// Adds a journald layer to the layers collection.
///
/// # Arguments
/// * `filter` - A string containing additional filter directives for this layer.
///
/// # Returns
/// An `eyre::Result<()>` indicating the success or failure of the operation.
pub(crate) fn journald(&mut self, filter: &str) -> eyre::Result<()> {
let journald_filter = build_env_filter(None, filter)?;
let layer = tracing_journald::layer()?.with_filter(journald_filter);
self.add_layer(layer);
Ok(())
}
/// Adds a stdout layer with specified formatting and filtering.
///
/// # Type Parameters
/// * `S` - The type of subscriber that will use these layers.
///
/// # Arguments
/// * `format` - The log message format.
/// * `directive` - Directive for the default logging level.
/// * `filter` - Additional filter directives as a string.
/// * `color` - Optional color configuration for the log messages.
///
/// # Returns
/// An `eyre::Result<()>` indicating the success or failure of the operation.
pub(crate) fn stdout(
&mut self,
format: LogFormat,
default_directive: Directive,
filters: &str,
color: Option<String>,
) -> eyre::Result<()> {
let filter = build_env_filter(Some(default_directive), filters)?;
let layer = format.apply(filter, color, None);
self.add_layer(layer);
Ok(())
}
/// Adds a file logging layer to the layers collection.
///
/// # Arguments
/// * `format` - The format for log messages.
/// * `filter` - Additional filter directives as a string.
/// * `file_info` - Information about the log file including path and rotation strategy.
///
/// # Returns
/// An `eyre::Result<FileWorkerGuard>` representing the file logging worker.
pub(crate) fn file(
&mut self,
format: LogFormat,
filter: &str,
file_info: FileInfo,
) -> eyre::Result<FileWorkerGuard> {
let (writer, guard) = file_info.create_log_writer();
let file_filter = build_env_filter(None, filter)?;
let layer = format.apply(file_filter, None, Some(writer));
self.add_layer(layer);
Ok(guard)
}
}
/// Holds configuration information for file logging.
///
/// Contains details about the log file's path, name, size, and rotation strategy.
#[derive(Debug, Clone)]
pub struct FileInfo {
dir: PathBuf,
file_name: String,
max_size_bytes: u64,
max_files: usize,
}
impl FileInfo {
/// Creates a new `FileInfo` instance.
pub const fn new(
dir: PathBuf,
file_name: String,
max_size_bytes: u64,
max_files: usize,
) -> Self {
Self { dir, file_name, max_size_bytes, max_files }
}
/// Creates the log directory if it doesn't exist.
///
/// # Returns
/// A reference to the path of the log directory.
fn create_log_dir(&self) -> &Path {
let log_dir: &Path = self.dir.as_ref();
if !log_dir.exists() {
std::fs::create_dir_all(log_dir).expect("Could not create log directory");
}
log_dir
}
/// Creates a non-blocking writer for the log file.
///
/// # Returns
/// A tuple containing the non-blocking writer and its associated worker guard.
fn create_log_writer(&self) -> (tracing_appender::non_blocking::NonBlocking, WorkerGuard) {
let log_dir = self.create_log_dir();
let (writer, guard) = tracing_appender::non_blocking(
RollingFileAppender::new(
log_dir.join(&self.file_name),
RollingConditionBasic::new().max_size(self.max_size_bytes),
self.max_files,
)
.expect("Could not initialize file logging"),
);
(writer, guard)
}
}
/// Builds an environment filter for logging.
///
/// The events are filtered by `default_directive`, unless overridden by `RUST_LOG`.
///
/// # Arguments
/// * `default_directive` - An optional `Directive` that sets the default directive.
/// * `directives` - Additional directives as a comma-separated string.
///
/// # Returns
/// An `eyre::Result<EnvFilter>` that can be used to configure a tracing subscriber.
fn build_env_filter(
default_directive: Option<Directive>,
directives: &str,
) -> eyre::Result<EnvFilter> {
let env_filter = if let Some(default_directive) = default_directive {
EnvFilter::builder().with_default_directive(default_directive).from_env_lossy()
} else {
EnvFilter::builder().from_env_lossy()
};
DEFAULT_ENV_FILTER_DIRECTIVES
.into_iter()
.chain(directives.split(',').filter(|d| !d.is_empty()))
.try_fold(env_filter, |env_filter, directive| {
Ok(env_filter.add_directive(directive.parse()?))
})
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/tracing/src/lib.rs | crates/tracing/src/lib.rs | //! The `tracing` module provides functionalities for setting up and configuring logging.
//!
//! It includes structures and functions to create and manage various logging layers: stdout,
//! file, or journald. The module's primary entry point is the `Tracer` struct, which can be
//! configured to use different logging formats and destinations. If no layer is specified, it will
//! default to stdout.
//!
//! # Examples
//!
//! Basic usage:
//!
//! ```
//! use reth_tracing::{
//! LayerInfo, RethTracer, Tracer,
//! tracing::level_filters::LevelFilter,
//! LogFormat,
//! };
//!
//! fn main() -> eyre::Result<()> {
//! let tracer = RethTracer::new().with_stdout(LayerInfo::new(
//! LogFormat::Json,
//! LevelFilter::INFO.to_string(),
//! "debug".to_string(),
//! None,
//! ));
//!
//! tracer.init()?;
//!
//! // Your application logic here
//!
//! Ok(())
//! }
//! ```
//!
//! This example sets up a tracer with JSON format logging for journald and terminal-friendly
//! format for file logging.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
// Re-export tracing crates
pub use tracing;
pub use tracing_appender;
pub use tracing_subscriber;
// Re-export our types
pub use formatter::LogFormat;
pub use layers::{FileInfo, FileWorkerGuard, Layers};
pub use test_tracer::TestTracer;
mod formatter;
mod layers;
mod test_tracer;
use tracing::level_filters::LevelFilter;
use tracing_appender::non_blocking::WorkerGuard;
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
/// Tracer for application logging.
///
/// Manages the configuration and initialization of logging layers,
/// including standard output, optional journald, and optional file logging.
#[derive(Debug, Clone)]
pub struct RethTracer {
stdout: LayerInfo,
journald: Option<String>,
file: Option<(LayerInfo, FileInfo)>,
}
impl RethTracer {
/// Constructs a new `Tracer` with default settings.
///
/// Initializes with default stdout layer configuration.
/// Journald and file layers are not set by default.
pub fn new() -> Self {
Self { stdout: LayerInfo::default(), journald: None, file: None }
}
/// Sets a custom configuration for the stdout layer.
///
/// # Arguments
/// * `config` - The `LayerInfo` to use for the stdout layer.
pub fn with_stdout(mut self, config: LayerInfo) -> Self {
self.stdout = config;
self
}
/// Sets the journald layer filter.
///
/// # Arguments
/// * `filter` - The `filter` to use for the journald layer.
pub fn with_journald(mut self, filter: String) -> Self {
self.journald = Some(filter);
self
}
/// Sets the file layer configuration and associated file info.
///
/// # Arguments
/// * `config` - The `LayerInfo` to use for the file layer.
/// * `file_info` - The `FileInfo` containing details about the log file.
pub fn with_file(mut self, config: LayerInfo, file_info: FileInfo) -> Self {
self.file = Some((config, file_info));
self
}
}
impl Default for RethTracer {
fn default() -> Self {
Self::new()
}
}
/// Configuration for a logging layer.
///
/// This struct holds configuration parameters for a tracing layer, including
/// the format, filtering directives, optional coloring, and directive.
#[derive(Debug, Clone)]
pub struct LayerInfo {
format: LogFormat,
default_directive: String,
filters: String,
color: Option<String>,
}
impl LayerInfo {
/// Constructs a new `LayerInfo`.
///
/// # Arguments
/// * `format` - Specifies the format for log messages. Possible values are:
/// - `LogFormat::Json` for JSON formatting.
/// - `LogFormat::LogFmt` for logfmt (key=value) formatting.
/// - `LogFormat::Terminal` for human-readable, terminal-friendly formatting.
/// * `default_directive` - Directive for filtering log messages.
/// * `filters` - Additional filtering parameters as a string.
/// * `color` - Optional color configuration for the log messages.
pub const fn new(
format: LogFormat,
default_directive: String,
filters: String,
color: Option<String>,
) -> Self {
Self { format, default_directive, filters, color }
}
}
impl Default for LayerInfo {
/// Provides default values for `LayerInfo`.
///
/// By default, it uses terminal format, INFO level filter,
/// no additional filters, and no color configuration.
fn default() -> Self {
Self {
format: LogFormat::Terminal,
default_directive: LevelFilter::INFO.to_string(),
filters: String::new(),
color: Some("always".to_string()),
}
}
}
/// Trait defining a general interface for logging configuration.
///
/// The `Tracer` trait provides a standardized way to initialize logging configurations
/// in an application. Implementations of this trait can specify different logging setups,
/// such as standard output logging, file logging, journald logging, or custom logging
/// configurations tailored for specific environments (like testing).
pub trait Tracer: Sized {
/// Initialize the logging configuration.
///
/// By default, this method creates a new `Layers` instance and delegates to `init_with_layers`.
///
/// # Returns
/// An `eyre::Result` which is `Ok` with an optional `WorkerGuard` if a file layer is used,
/// or an `Err` in case of an error during initialization.
fn init(self) -> eyre::Result<Option<WorkerGuard>> {
self.init_with_layers(Layers::new())
}
/// Initialize the logging configuration with additional custom layers.
///
/// This method allows for more customized setup by accepting pre-configured
/// `Layers` which can be further customized before initialization.
///
/// # Arguments
/// * `layers` - Pre-configured `Layers` instance to use for initialization
///
/// # Returns
/// An `eyre::Result` which is `Ok` with an optional `WorkerGuard` if a file layer is used,
/// or an `Err` in case of an error during initialization.
fn init_with_layers(self, layers: Layers) -> eyre::Result<Option<WorkerGuard>>;
}
impl Tracer for RethTracer {
/// Initializes the logging system based on the configured layers.
///
/// This method sets up the global tracing subscriber with the specified
/// stdout, journald, and file layers.
///
/// The default layer is stdout.
///
/// # Returns
/// An `eyre::Result` which is `Ok` with an optional `WorkerGuard` if a file layer is used,
/// or an `Err` in case of an error during initialization.
fn init_with_layers(self, mut layers: Layers) -> eyre::Result<Option<WorkerGuard>> {
layers.stdout(
self.stdout.format,
self.stdout.default_directive.parse()?,
&self.stdout.filters,
self.stdout.color,
)?;
if let Some(config) = self.journald {
layers.journald(&config)?;
}
let file_guard = if let Some((config, file_info)) = self.file {
Some(layers.file(config.format, &config.filters, file_info)?)
} else {
None
};
// The error is returned if the global default subscriber is already set,
// so it's safe to ignore it
let _ = tracing_subscriber::registry().with(layers.into_inner()).try_init();
Ok(file_guard)
}
}
/// Initializes a tracing subscriber for tests.
///
/// The filter is configurable via `RUST_LOG`.
///
/// # Note
///
/// The subscriber will silently fail if it could not be installed.
pub fn init_test_tracing() {
let _ = TestTracer::default().init();
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/tracing/src/formatter.rs | crates/tracing/src/formatter.rs | use crate::layers::BoxedLayer;
use clap::ValueEnum;
use std::{fmt, fmt::Display};
use tracing_appender::non_blocking::NonBlocking;
use tracing_subscriber::{EnvFilter, Layer, Registry};
/// Represents the logging format.
///
/// This enum defines the supported formats for logging output.
/// It is used to configure the format layer of a tracing subscriber.
#[derive(Debug, Copy, Clone, ValueEnum, Eq, PartialEq)]
pub enum LogFormat {
/// Represents JSON formatting for logs.
/// This format outputs log records as JSON objects,
/// making it suitable for structured logging.
Json,
/// Represents logfmt (key=value) formatting for logs.
/// This format is concise and human-readable,
/// typically used in command-line applications.
LogFmt,
/// Represents terminal-friendly formatting for logs.
Terminal,
}
impl LogFormat {
/// Applies the specified logging format to create a new layer.
///
/// This method constructs a tracing layer with the selected format,
/// along with additional configurations for filtering and output.
///
/// # Arguments
/// * `filter` - An `EnvFilter` used to determine which log records to output.
/// * `color` - An optional string that enables or disables ANSI color codes in the logs.
/// * `file_writer` - An optional `NonBlocking` writer for directing logs to a file.
///
/// # Returns
/// A `BoxedLayer<Registry>` that can be added to a tracing subscriber.
pub fn apply(
&self,
filter: EnvFilter,
color: Option<String>,
file_writer: Option<NonBlocking>,
) -> BoxedLayer<Registry> {
let ansi = if let Some(color) = color {
std::env::var("RUST_LOG_STYLE").map(|val| val != "never").unwrap_or(color != "never")
} else {
false
};
let target = std::env::var("RUST_LOG_TARGET")
// `RUST_LOG_TARGET` always overrides default behaviour
.map(|val| val != "0")
.unwrap_or_else(|_|
// If `RUST_LOG_TARGET` is not set, show target in logs only if the max enabled
// level is higher than INFO (DEBUG, TRACE)
filter.max_level_hint().is_none_or(|max_level| max_level > tracing::Level::INFO));
match self {
Self::Json => {
let layer =
tracing_subscriber::fmt::layer().json().with_ansi(ansi).with_target(target);
if let Some(writer) = file_writer {
layer.with_writer(writer).with_filter(filter).boxed()
} else {
layer.with_filter(filter).boxed()
}
}
Self::LogFmt => tracing_logfmt::layer().with_filter(filter).boxed(),
Self::Terminal => {
let layer = tracing_subscriber::fmt::layer().with_ansi(ansi).with_target(target);
if let Some(writer) = file_writer {
layer.with_writer(writer).with_filter(filter).boxed()
} else {
layer.with_filter(filter).boxed()
}
}
}
}
}
impl Display for LogFormat {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Json => write!(f, "json"),
Self::LogFmt => write!(f, "logfmt"),
Self::Terminal => write!(f, "terminal"),
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/tracing/src/test_tracer.rs | crates/tracing/src/test_tracer.rs | use tracing_appender::non_blocking::WorkerGuard;
use tracing_subscriber::EnvFilter;
use crate::{Layers, Tracer};
/// Initializes a tracing subscriber for tests.
///
/// The filter is configurable via `RUST_LOG`.
///
/// # Note
///
/// The subscriber will silently fail if it could not be installed.
#[derive(Debug, Clone, Default)]
#[non_exhaustive]
pub struct TestTracer;
impl Tracer for TestTracer {
fn init_with_layers(self, _layers: Layers) -> eyre::Result<Option<WorkerGuard>> {
let _ = tracing_subscriber::fmt()
.with_env_filter(EnvFilter::from_default_env())
.with_writer(std::io::stderr)
.try_init();
Ok(None)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/exex/exex/src/event.rs | crates/exex/exex/src/event.rs | use alloy_eips::BlockNumHash;
/// Events emitted by an `ExEx`.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ExExEvent {
/// Highest block processed by the `ExEx`.
///
/// The `ExEx` must guarantee that it will not require all earlier blocks in the future,
/// meaning that Reth is allowed to prune them.
///
/// On reorgs, it's possible for the height to go down.
FinishedHeight(BlockNumHash),
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/exex/exex/src/lib.rs | crates/exex/exex/src/lib.rs | //! Execution extensions (`ExEx`).
//!
//! An execution extension is a task that listens to state changes of the node.
//!
//! Some examples of such state derives are rollups, bridges, and indexers.
//!
//! An `ExEx` is a [`Future`] resolving to a `Result<()>` that is run indefinitely alongside the
//! node.
//!
//! `ExEx`'s are initialized using an async closure that resolves to the `ExEx`; this closure gets
//! passed an [`ExExContext`] where it is possible to spawn additional tasks and modify Reth.
//!
//! Most `ExEx`'s will want to derive their state from the [`CanonStateNotification`] channel given
//! in [`ExExContext`]. A new notification is emitted whenever blocks are executed in live and
//! historical sync.
//!
//! # Pruning
//!
//! `ExEx`'s **SHOULD** emit an `ExExEvent::FinishedHeight` event to signify what blocks have been
//! processed. This event is used by Reth to determine what state can be pruned.
//!
//! An `ExEx` will only receive notifications for blocks greater than the block emitted in the
//! event. To clarify: if the `ExEx` emits `ExExEvent::FinishedHeight(0)` it will receive
//! notifications for any `block_number > 0`.
//!
//! # Examples, Assumptions, and Invariants
//!
//! ## Examples
//!
//! ### Simple Indexer ExEx
//! ```no_run
//! use alloy_consensus::BlockHeader;
//! use futures::StreamExt;
//! use reth_exex::ExExContext;
//! use reth_node_api::FullNodeComponents;
//! use reth_provider::CanonStateNotification;
//!
//! async fn my_indexer<N: FullNodeComponents>(
//! mut ctx: ExExContext<N>,
//! ) -> Result<(), Box<dyn std::error::Error>> {
//! // Subscribe to canonical state notifications
//!
//! while let Some(Ok(notification)) = ctx.notifications.next().await {
//! if let Some(committed) = notification.committed_chain() {
//! for block in committed.blocks_iter() {
//! // Index or process block data
//! println!("Processed block: {}", block.number());
//! }
//!
//! // Signal completion for pruning
//! ctx.send_finished_height(committed.tip().num_hash());
//! }
//! }
//!
//! Ok(())
//! }
//! ```
//!
//! ## Assumptions
//!
//! - `ExExs` run indefinitely alongside Reth
//! - `ExExs` receive canonical state notifications for block execution
//! - `ExExs` should handle potential network or database errors gracefully
//! - `ExExs` must emit `FinishedHeight` events for proper state pruning
//!
//! ## Invariants
//!
//! - An ExEx must not block the main Reth execution
//! - Notifications are processed in canonical order
//! - `ExExs` should be able to recover from temporary failures
//! - Memory and resource usage must be controlled
//!
//! ## Performance Considerations
//!
//! - Minimize blocking operations
//! - Use efficient data structures for state tracking
//! - Implement proper error handling and logging
//! - Consider batching operations for better performance
//!
//! [`Future`]: std::future::Future
//! [`ExExContext`]: crate::ExExContext
//! [`CanonStateNotification`]: reth_provider::CanonStateNotification
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
mod backfill;
pub use backfill::*;
mod context;
pub use context::*;
mod dyn_context;
pub use dyn_context::*;
mod event;
pub use event::*;
mod manager;
pub use manager::*;
mod notifications;
pub use notifications::*;
mod wal;
pub use wal::*;
// Re-export exex types
#[doc(inline)]
pub use reth_exex_types::*;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/exex/exex/src/manager.rs | crates/exex/exex/src/manager.rs | use crate::{
wal::Wal, ExExEvent, ExExNotification, ExExNotifications, FinishedExExHeight, WalHandle,
};
use alloy_consensus::BlockHeader;
use alloy_eips::BlockNumHash;
use futures::StreamExt;
use itertools::Itertools;
use metrics::Gauge;
use reth_chain_state::ForkChoiceStream;
use reth_ethereum_primitives::EthPrimitives;
use reth_evm::ConfigureEvm;
use reth_metrics::{metrics::Counter, Metrics};
use reth_node_api::NodePrimitives;
use reth_primitives_traits::SealedHeader;
use reth_provider::HeaderProvider;
use reth_tracing::tracing::{debug, warn};
use std::{
collections::VecDeque,
fmt::Debug,
future::{poll_fn, Future},
ops::Not,
pin::Pin,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
task::{ready, Context, Poll},
};
use tokio::sync::{
mpsc::{self, error::SendError, UnboundedReceiver, UnboundedSender},
watch,
};
use tokio_util::sync::{PollSendError, PollSender, ReusableBoxFuture};
/// Default max size of the internal state notifications buffer.
///
/// 1024 notifications in the buffer is 3.5 hours of mainnet blocks,
/// or 17 minutes of 1-second blocks.
pub const DEFAULT_EXEX_MANAGER_CAPACITY: usize = 1024;
/// The maximum number of blocks allowed in the WAL before emitting a warning.
///
/// This constant defines the threshold for the Write-Ahead Log (WAL) size. If the number of blocks
/// in the WAL exceeds this limit, a warning is logged to indicate potential issues.
pub const WAL_BLOCKS_WARNING: usize = 128;
/// The source of the notification.
///
/// This distinguishment is needed to not commit any pipeline notificatations to [WAL](`Wal`),
/// because they are already finalized.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ExExNotificationSource {
/// The notification was sent from the pipeline.
Pipeline,
/// The notification was sent from the blockchain tree.
BlockchainTree,
}
/// Metrics for an `ExEx`.
#[derive(Metrics)]
#[metrics(scope = "exex")]
struct ExExMetrics {
/// The total number of notifications sent to an `ExEx`.
notifications_sent_total: Counter,
/// The total number of events an `ExEx` has sent to the manager.
events_sent_total: Counter,
}
/// A handle to an `ExEx` used by the [`ExExManager`] to communicate with `ExEx`'s.
///
/// A handle should be created for each `ExEx` with a unique ID. The channels returned by
/// [`ExExHandle::new`] should be given to the `ExEx`, while the handle itself should be given to
/// the manager in [`ExExManager::new`].
#[derive(Debug)]
pub struct ExExHandle<N: NodePrimitives = EthPrimitives> {
/// The execution extension's ID.
id: String,
/// Metrics for an `ExEx`.
metrics: ExExMetrics,
/// Channel to send [`ExExNotification`]s to the `ExEx`.
sender: PollSender<ExExNotification<N>>,
/// Channel to receive [`ExExEvent`]s from the `ExEx`.
receiver: UnboundedReceiver<ExExEvent>,
/// The ID of the next notification to send to this `ExEx`.
next_notification_id: usize,
/// The finished block of the `ExEx`.
///
/// If this is `None`, the `ExEx` has not emitted a `FinishedHeight` event.
finished_height: Option<BlockNumHash>,
}
impl<N: NodePrimitives> ExExHandle<N> {
/// Create a new handle for the given `ExEx`.
///
/// Returns the handle, as well as a [`UnboundedSender`] for [`ExExEvent`]s and a
/// [`mpsc::Receiver`] for [`ExExNotification`]s that should be given to the `ExEx`.
pub fn new<P, E: ConfigureEvm<Primitives = N>>(
id: String,
node_head: BlockNumHash,
provider: P,
evm_config: E,
wal_handle: WalHandle<N>,
) -> (Self, UnboundedSender<ExExEvent>, ExExNotifications<P, E>) {
let (notification_tx, notification_rx) = mpsc::channel(1);
let (event_tx, event_rx) = mpsc::unbounded_channel();
let notifications =
ExExNotifications::new(node_head, provider, evm_config, notification_rx, wal_handle);
(
Self {
id: id.clone(),
metrics: ExExMetrics::new_with_labels(&[("exex", id)]),
sender: PollSender::new(notification_tx),
receiver: event_rx,
next_notification_id: 0,
finished_height: None,
},
event_tx,
notifications,
)
}
/// Reserves a slot in the `PollSender` channel and sends the notification if the slot was
/// successfully reserved.
///
/// When the notification is sent, it is considered delivered.
fn send(
&mut self,
cx: &mut Context<'_>,
(notification_id, notification): &(usize, ExExNotification<N>),
) -> Poll<Result<(), PollSendError<ExExNotification<N>>>> {
if let Some(finished_height) = self.finished_height {
match notification {
ExExNotification::ChainCommitted { new } => {
// Skip the chain commit notification if the finished height of the ExEx is
// higher than or equal to the tip of the new notification.
// I.e., the ExEx has already processed the notification.
if finished_height.number >= new.tip().number() {
debug!(
target: "exex::manager",
exex_id = %self.id,
%notification_id,
?finished_height,
new_tip = %new.tip().number(),
"Skipping notification"
);
self.next_notification_id = notification_id + 1;
return Poll::Ready(Ok(()))
}
}
// Do not handle [ExExNotification::ChainReorged] and
// [ExExNotification::ChainReverted] cases and always send the
// notification, because the ExEx should be aware of the reorgs and reverts lower
// than its finished height
ExExNotification::ChainReorged { .. } | ExExNotification::ChainReverted { .. } => {}
}
}
debug!(
target: "exex::manager",
exex_id = %self.id,
%notification_id,
"Reserving slot for notification"
);
match self.sender.poll_reserve(cx) {
Poll::Ready(Ok(())) => (),
other => return other,
}
debug!(
target: "exex::manager",
exex_id = %self.id,
%notification_id,
"Sending notification"
);
match self.sender.send_item(notification.clone()) {
Ok(()) => {
self.next_notification_id = notification_id + 1;
self.metrics.notifications_sent_total.increment(1);
Poll::Ready(Ok(()))
}
Err(err) => Poll::Ready(Err(err)),
}
}
}
/// Metrics for the `ExEx` manager.
#[derive(Metrics)]
#[metrics(scope = "exex.manager")]
pub struct ExExManagerMetrics {
/// Max size of the internal state notifications buffer.
max_capacity: Gauge,
/// Current capacity of the internal state notifications buffer.
current_capacity: Gauge,
/// Current size of the internal state notifications buffer.
///
/// Note that this might be slightly bigger than the maximum capacity in some cases.
buffer_size: Gauge,
/// Current number of `ExEx`'s on the node.
num_exexs: Gauge,
}
/// The execution extension manager.
///
/// The manager is responsible for:
///
/// - Receiving relevant events from the rest of the node, and sending these to the execution
/// extensions
/// - Backpressure
/// - Error handling
/// - Monitoring
#[derive(Debug)]
pub struct ExExManager<P, N: NodePrimitives> {
/// Provider for querying headers.
provider: P,
/// Handles to communicate with the `ExEx`'s.
exex_handles: Vec<ExExHandle<N>>,
/// [`ExExNotification`] channel from the [`ExExManagerHandle`]s.
handle_rx: UnboundedReceiver<(ExExNotificationSource, ExExNotification<N>)>,
/// The minimum notification ID currently present in the buffer.
min_id: usize,
/// Monotonically increasing ID for [`ExExNotification`]s.
next_id: usize,
/// Internal buffer of [`ExExNotification`]s.
///
/// The first element of the tuple is a monotonically increasing ID unique to the notification
/// (the second element of the tuple).
buffer: VecDeque<(usize, ExExNotification<N>)>,
/// Max size of the internal state notifications buffer.
max_capacity: usize,
/// Current state notifications buffer capacity.
///
/// Used to inform the execution stage of possible batch sizes.
current_capacity: Arc<AtomicUsize>,
/// Whether the manager is ready to receive new notifications.
is_ready: watch::Sender<bool>,
/// The finished height of all `ExEx`'s.
finished_height: watch::Sender<FinishedExExHeight>,
/// Write-Ahead Log for the [`ExExNotification`]s.
wal: Wal<N>,
/// A stream of finalized headers.
finalized_header_stream: ForkChoiceStream<SealedHeader<N::BlockHeader>>,
/// A handle to the `ExEx` manager.
handle: ExExManagerHandle<N>,
/// Metrics for the `ExEx` manager.
metrics: ExExManagerMetrics,
}
impl<P, N> ExExManager<P, N>
where
N: NodePrimitives,
{
/// Create a new [`ExExManager`].
///
/// You must provide an [`ExExHandle`] for each `ExEx` and the maximum capacity of the
/// notification buffer in the manager.
///
/// When the capacity is exceeded (which can happen if an `ExEx` is slow) no one can send
/// notifications over [`ExExManagerHandle`]s until there is capacity again.
pub fn new(
provider: P,
handles: Vec<ExExHandle<N>>,
max_capacity: usize,
wal: Wal<N>,
finalized_header_stream: ForkChoiceStream<SealedHeader<N::BlockHeader>>,
) -> Self {
let num_exexs = handles.len();
let (handle_tx, handle_rx) = mpsc::unbounded_channel();
let (is_ready_tx, is_ready_rx) = watch::channel(true);
let (finished_height_tx, finished_height_rx) = watch::channel(if num_exexs == 0 {
FinishedExExHeight::NoExExs
} else {
FinishedExExHeight::NotReady
});
let current_capacity = Arc::new(AtomicUsize::new(max_capacity));
let metrics = ExExManagerMetrics::default();
metrics.max_capacity.set(max_capacity as f64);
metrics.num_exexs.set(num_exexs as f64);
Self {
provider,
exex_handles: handles,
handle_rx,
min_id: 0,
next_id: 0,
buffer: VecDeque::with_capacity(max_capacity),
max_capacity,
current_capacity: Arc::clone(¤t_capacity),
is_ready: is_ready_tx,
finished_height: finished_height_tx,
wal,
finalized_header_stream,
handle: ExExManagerHandle {
exex_tx: handle_tx,
num_exexs,
is_ready_receiver: is_ready_rx.clone(),
is_ready: ReusableBoxFuture::new(make_wait_future(is_ready_rx)),
current_capacity,
finished_height: finished_height_rx,
},
metrics,
}
}
/// Returns the handle to the manager.
pub fn handle(&self) -> ExExManagerHandle<N> {
self.handle.clone()
}
/// Updates the current buffer capacity and notifies all `is_ready` watchers of the manager's
/// readiness to receive notifications.
fn update_capacity(&self) {
let capacity = self.max_capacity.saturating_sub(self.buffer.len());
self.current_capacity.store(capacity, Ordering::Relaxed);
self.metrics.current_capacity.set(capacity as f64);
self.metrics.buffer_size.set(self.buffer.len() as f64);
// we can safely ignore if the channel is closed, since the manager always holds it open
// internally
let _ = self.is_ready.send(capacity > 0);
}
/// Pushes a new notification into the managers internal buffer, assigning the notification a
/// unique ID.
fn push_notification(&mut self, notification: ExExNotification<N>) {
let next_id = self.next_id;
self.buffer.push_back((next_id, notification));
self.next_id += 1;
}
}
impl<P, N> ExExManager<P, N>
where
P: HeaderProvider,
N: NodePrimitives,
{
/// Finalizes the WAL according to the passed finalized header.
///
/// This function checks if all ExExes are on the canonical chain and finalizes the WAL if
/// necessary.
fn finalize_wal(&self, finalized_header: SealedHeader<N::BlockHeader>) -> eyre::Result<()> {
debug!(target: "exex::manager", header = ?finalized_header.num_hash(), "Received finalized header");
// Check if all ExExes are on the canonical chain
let exex_finished_heights = self
.exex_handles
.iter()
// Get ID and finished height for each ExEx
.map(|exex_handle| (&exex_handle.id, exex_handle.finished_height))
// Deduplicate all hashes
.unique_by(|(_, num_hash)| num_hash.map(|num_hash| num_hash.hash))
// Check if hashes are canonical
.map(|(exex_id, num_hash)| {
num_hash.map_or(Ok((exex_id, num_hash, false)), |num_hash| {
self.provider
.is_known(&num_hash.hash)
// Save the ExEx ID, finished height, and whether the hash is canonical
.map(|is_canonical| (exex_id, Some(num_hash), is_canonical))
})
})
// We collect here to be able to log the unfinalized ExExes below
.collect::<Result<Vec<_>, _>>()?;
if exex_finished_heights.iter().all(|(_, _, is_canonical)| *is_canonical) {
// If there is a finalized header and all ExExs are on the canonical chain, finalize
// the WAL with either the lowest finished height among all ExExes, or finalized header
// – whichever is lower.
let lowest_finished_height = exex_finished_heights
.iter()
.copied()
.filter_map(|(_, num_hash, _)| num_hash)
.chain([(finalized_header.num_hash())])
.min_by_key(|num_hash| num_hash.number)
.unwrap();
self.wal.finalize(lowest_finished_height)?;
if self.wal.num_blocks() > WAL_BLOCKS_WARNING {
warn!(
target: "exex::manager",
blocks = ?self.wal.num_blocks(),
"WAL contains too many blocks and is not getting cleared. That will lead to increased disk space usage. Check that you emit the FinishedHeight event from your ExExes."
);
}
} else {
let unfinalized_exexes = exex_finished_heights
.into_iter()
.filter_map(|(exex_id, num_hash, is_canonical)| {
is_canonical.not().then_some((exex_id, num_hash))
})
.format_with(", ", |(exex_id, num_hash), f| {
f(&format_args!("{exex_id} = {num_hash:?}"))
})
// We need this because `debug!` uses the argument twice when formatting the final
// log message, but the result of `format_with` can only be used once
.to_string();
debug!(
target: "exex::manager",
%unfinalized_exexes,
"Not all ExExes are on the canonical chain, can't finalize the WAL"
);
}
Ok(())
}
}
impl<P, N> Future for ExExManager<P, N>
where
P: HeaderProvider + Unpin + 'static,
N: NodePrimitives,
{
type Output = eyre::Result<()>;
/// Main loop of the [`ExExManager`]. The order of operations is as follows:
/// 1. Handle incoming ExEx events. We do it before finalizing the WAL, because it depends on
/// the latest state of [`ExExEvent::FinishedHeight`] events.
/// 2. Finalize the WAL with the finalized header, if necessary.
/// 3. Drain [`ExExManagerHandle`] notifications, push them to the internal buffer and update
/// the internal buffer capacity.
/// 5. Send notifications from the internal buffer to those ExExes that are ready to receive new
/// notifications.
/// 5. Remove notifications from the internal buffer that have been sent to **all** ExExes and
/// update the internal buffer capacity.
/// 6. Update the channel with the lowest [`FinishedExExHeight`] among all ExExes.
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
// Handle incoming ExEx events
for exex in &mut this.exex_handles {
while let Poll::Ready(Some(event)) = exex.receiver.poll_recv(cx) {
debug!(target: "exex::manager", exex_id = %exex.id, ?event, "Received event from ExEx");
exex.metrics.events_sent_total.increment(1);
match event {
ExExEvent::FinishedHeight(height) => exex.finished_height = Some(height),
}
}
}
// Drain the finalized header stream and finalize the WAL with the last header
let mut last_finalized_header = None;
while let Poll::Ready(finalized_header) = this.finalized_header_stream.poll_next_unpin(cx) {
last_finalized_header = finalized_header;
}
if let Some(header) = last_finalized_header {
this.finalize_wal(header)?;
}
// Drain handle notifications
while this.buffer.len() < this.max_capacity {
if let Poll::Ready(Some((source, notification))) = this.handle_rx.poll_recv(cx) {
let committed_tip =
notification.committed_chain().map(|chain| chain.tip().number());
let reverted_tip = notification.reverted_chain().map(|chain| chain.tip().number());
debug!(target: "exex::manager", ?committed_tip, ?reverted_tip, "Received new notification");
// Commit to WAL only notifications from blockchain tree. Pipeline notifications
// always contain only finalized blocks.
match source {
ExExNotificationSource::BlockchainTree => {
debug!(target: "exex::manager", ?committed_tip, ?reverted_tip, "Committing notification to WAL");
this.wal.commit(¬ification)?;
}
ExExNotificationSource::Pipeline => {
debug!(target: "exex::manager", ?committed_tip, ?reverted_tip, "Notification was sent from pipeline, skipping WAL commit");
}
}
this.push_notification(notification);
continue
}
break
}
// Update capacity
this.update_capacity();
// Advance all poll senders
let mut min_id = usize::MAX;
for idx in (0..this.exex_handles.len()).rev() {
let mut exex = this.exex_handles.swap_remove(idx);
// It is a logic error for this to ever underflow since the manager manages the
// notification IDs
let notification_index = exex
.next_notification_id
.checked_sub(this.min_id)
.expect("exex expected notification ID outside the manager's range");
if let Some(notification) = this.buffer.get(notification_index) {
if let Poll::Ready(Err(err)) = exex.send(cx, notification) {
// The channel was closed, which is irrecoverable for the manager
return Poll::Ready(Err(err.into()))
}
}
min_id = min_id.min(exex.next_notification_id);
this.exex_handles.push(exex);
}
// Remove processed buffered notifications
debug!(target: "exex::manager", %min_id, "Updating lowest notification id in buffer");
this.buffer.retain(|&(id, _)| id >= min_id);
this.min_id = min_id;
// Update capacity
this.update_capacity();
// Update watch channel block number
let finished_height = this.exex_handles.iter_mut().try_fold(u64::MAX, |curr, exex| {
exex.finished_height.map_or(Err(()), |height| Ok(height.number.min(curr)))
});
if let Ok(finished_height) = finished_height {
let _ = this.finished_height.send(FinishedExExHeight::Height(finished_height));
}
Poll::Pending
}
}
/// A handle to communicate with the [`ExExManager`].
#[derive(Debug)]
pub struct ExExManagerHandle<N: NodePrimitives = EthPrimitives> {
/// Channel to send notifications to the `ExEx` manager.
exex_tx: UnboundedSender<(ExExNotificationSource, ExExNotification<N>)>,
/// The number of `ExEx`'s running on the node.
num_exexs: usize,
/// A watch channel denoting whether the manager is ready for new notifications or not.
///
/// This is stored internally alongside a `ReusableBoxFuture` representation of the same value.
/// This field is only used to create a new `ReusableBoxFuture` when the handle is cloned,
/// but is otherwise unused.
is_ready_receiver: watch::Receiver<bool>,
/// A reusable future that resolves when the manager is ready for new
/// notifications.
is_ready: ReusableBoxFuture<'static, watch::Receiver<bool>>,
/// The current capacity of the manager's internal notification buffer.
current_capacity: Arc<AtomicUsize>,
/// The finished height of all `ExEx`'s.
finished_height: watch::Receiver<FinishedExExHeight>,
}
impl<N: NodePrimitives> ExExManagerHandle<N> {
/// Creates an empty manager handle.
///
/// Use this if there is no manager present.
///
/// The handle will always be ready, and have a capacity of 0.
pub fn empty() -> Self {
let (exex_tx, _) = mpsc::unbounded_channel();
let (_, is_ready_rx) = watch::channel(true);
let (_, finished_height_rx) = watch::channel(FinishedExExHeight::NoExExs);
Self {
exex_tx,
num_exexs: 0,
is_ready_receiver: is_ready_rx.clone(),
is_ready: ReusableBoxFuture::new(make_wait_future(is_ready_rx)),
current_capacity: Arc::new(AtomicUsize::new(0)),
finished_height: finished_height_rx,
}
}
/// Synchronously send a notification over the channel to all execution extensions.
///
/// Senders should call [`Self::has_capacity`] first.
pub fn send(
&self,
source: ExExNotificationSource,
notification: ExExNotification<N>,
) -> Result<(), SendError<(ExExNotificationSource, ExExNotification<N>)>> {
self.exex_tx.send((source, notification))
}
/// Asynchronously send a notification over the channel to all execution extensions.
///
/// The returned future resolves when the notification has been delivered. If there is no
/// capacity in the channel, the future will wait.
pub async fn send_async(
&mut self,
source: ExExNotificationSource,
notification: ExExNotification<N>,
) -> Result<(), SendError<(ExExNotificationSource, ExExNotification<N>)>> {
self.ready().await;
self.exex_tx.send((source, notification))
}
/// Get the current capacity of the `ExEx` manager's internal notification buffer.
pub fn capacity(&self) -> usize {
self.current_capacity.load(Ordering::Relaxed)
}
/// Whether there is capacity in the `ExEx` manager's internal notification buffer.
///
/// If this returns `false`, the owner of the handle should **NOT** send new notifications over
/// the channel until the manager is ready again, as this can lead to unbounded memory growth.
pub fn has_capacity(&self) -> bool {
self.capacity() > 0
}
/// Returns `true` if there are `ExEx`'s installed in the node.
pub const fn has_exexs(&self) -> bool {
self.num_exexs > 0
}
/// The finished height of all `ExEx`'s.
pub fn finished_height(&self) -> watch::Receiver<FinishedExExHeight> {
self.finished_height.clone()
}
/// Wait until the manager is ready for new notifications.
pub async fn ready(&mut self) {
poll_fn(|cx| self.poll_ready(cx)).await
}
/// Wait until the manager is ready for new notifications.
pub fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<()> {
let rx = ready!(self.is_ready.poll(cx));
self.is_ready.set(make_wait_future(rx));
Poll::Ready(())
}
}
/// Creates a future that resolves once the given watch channel receiver is true.
async fn make_wait_future(mut rx: watch::Receiver<bool>) -> watch::Receiver<bool> {
// NOTE(onbjerg): We can ignore the error here, because if the channel is closed, the node
// is shutting down.
let _ = rx.wait_for(|ready| *ready).await;
rx
}
impl<N: NodePrimitives> Clone for ExExManagerHandle<N> {
fn clone(&self) -> Self {
Self {
exex_tx: self.exex_tx.clone(),
num_exexs: self.num_exexs,
is_ready_receiver: self.is_ready_receiver.clone(),
is_ready: ReusableBoxFuture::new(make_wait_future(self.is_ready_receiver.clone())),
current_capacity: self.current_capacity.clone(),
finished_height: self.finished_height.clone(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::wal::WalResult;
use alloy_primitives::B256;
use futures::{StreamExt, TryStreamExt};
use rand::Rng;
use reth_db_common::init::init_genesis;
use reth_evm_ethereum::EthEvmConfig;
use reth_primitives_traits::RecoveredBlock;
use reth_provider::{
providers::BlockchainProvider, test_utils::create_test_provider_factory, BlockReader,
BlockWriter, Chain, DatabaseProviderFactory, StorageLocation, TransactionVariant,
};
use reth_testing_utils::generators::{self, random_block, BlockParams};
fn empty_finalized_header_stream() -> ForkChoiceStream<SealedHeader> {
let (tx, rx) = watch::channel(None);
// Do not drop the sender, otherwise the receiver will always return an error
std::mem::forget(tx);
ForkChoiceStream::new(rx)
}
#[tokio::test]
async fn test_delivers_events() {
let temp_dir = tempfile::tempdir().unwrap();
let wal = Wal::new(temp_dir.path()).unwrap();
let (mut exex_handle, event_tx, mut _notification_rx) = ExExHandle::new(
"test_exex".to_string(),
Default::default(),
(),
EthEvmConfig::mainnet(),
wal.handle(),
);
// Send an event and check that it's delivered correctly
let event = ExExEvent::FinishedHeight(BlockNumHash::new(42, B256::random()));
event_tx.send(event).unwrap();
let received_event = exex_handle.receiver.recv().await.unwrap();
assert_eq!(received_event, event);
}
#[tokio::test]
async fn test_has_exexs() {
let temp_dir = tempfile::tempdir().unwrap();
let wal = Wal::new(temp_dir.path()).unwrap();
let (exex_handle_1, _, _) = ExExHandle::new(
"test_exex_1".to_string(),
Default::default(),
(),
EthEvmConfig::mainnet(),
wal.handle(),
);
assert!(!ExExManager::new((), vec![], 0, wal.clone(), empty_finalized_header_stream())
.handle
.has_exexs());
assert!(ExExManager::new((), vec![exex_handle_1], 0, wal, empty_finalized_header_stream())
.handle
.has_exexs());
}
#[tokio::test]
async fn test_has_capacity() {
let temp_dir = tempfile::tempdir().unwrap();
let wal = Wal::new(temp_dir.path()).unwrap();
let (exex_handle_1, _, _) = ExExHandle::new(
"test_exex_1".to_string(),
Default::default(),
(),
EthEvmConfig::mainnet(),
wal.handle(),
);
assert!(!ExExManager::new((), vec![], 0, wal.clone(), empty_finalized_header_stream())
.handle
.has_capacity());
assert!(ExExManager::new(
(),
vec![exex_handle_1],
10,
wal,
empty_finalized_header_stream()
)
.handle
.has_capacity());
}
#[test]
fn test_push_notification() {
let temp_dir = tempfile::tempdir().unwrap();
let wal = Wal::new(temp_dir.path()).unwrap();
let (exex_handle, _, _) = ExExHandle::new(
"test_exex".to_string(),
Default::default(),
(),
EthEvmConfig::mainnet(),
wal.handle(),
);
// Create a mock ExExManager and add the exex_handle to it
let mut exex_manager =
ExExManager::new((), vec![exex_handle], 10, wal, empty_finalized_header_stream());
// Define the notification for testing
let mut block1: RecoveredBlock<reth_ethereum_primitives::Block> = Default::default();
block1.set_hash(B256::new([0x01; 32]));
block1.set_block_number(10);
let notification1 = ExExNotification::ChainCommitted {
new: Arc::new(Chain::new(vec![block1.clone()], Default::default(), Default::default())),
};
// Push the first notification
exex_manager.push_notification(notification1.clone());
// Verify the buffer contains the notification with the correct ID
assert_eq!(exex_manager.buffer.len(), 1);
assert_eq!(exex_manager.buffer.front().unwrap().0, 0);
assert_eq!(exex_manager.buffer.front().unwrap().1, notification1);
assert_eq!(exex_manager.next_id, 1);
// Push another notification
let mut block2: RecoveredBlock<reth_ethereum_primitives::Block> = Default::default();
block2.set_hash(B256::new([0x02; 32]));
block2.set_block_number(20);
let notification2 = ExExNotification::ChainCommitted {
new: Arc::new(Chain::new(vec![block2.clone()], Default::default(), Default::default())),
};
exex_manager.push_notification(notification2.clone());
// Verify the buffer contains both notifications with correct IDs
assert_eq!(exex_manager.buffer.len(), 2);
assert_eq!(exex_manager.buffer.front().unwrap().0, 0);
assert_eq!(exex_manager.buffer.front().unwrap().1, notification1);
assert_eq!(exex_manager.buffer.get(1).unwrap().0, 1);
assert_eq!(exex_manager.buffer.get(1).unwrap().1, notification2);
assert_eq!(exex_manager.next_id, 2);
}
#[test]
fn test_update_capacity() {
let temp_dir = tempfile::tempdir().unwrap();
let wal = Wal::new(temp_dir.path()).unwrap();
let (exex_handle, _, _) = ExExHandle::new(
"test_exex".to_string(),
Default::default(),
(),
EthEvmConfig::mainnet(),
wal.handle(),
);
// Create a mock ExExManager and add the exex_handle to it
let max_capacity = 5;
let mut exex_manager = ExExManager::new(
(),
vec![exex_handle],
max_capacity,
wal,
empty_finalized_header_stream(),
);
// Push some notifications to fill part of the buffer
let mut block1: RecoveredBlock<reth_ethereum_primitives::Block> = Default::default();
block1.set_hash(B256::new([0x01; 32]));
block1.set_block_number(10);
let notification1 = ExExNotification::ChainCommitted {
new: Arc::new(Chain::new(vec![block1.clone()], Default::default(), Default::default())),
};
exex_manager.push_notification(notification1.clone());
exex_manager.push_notification(notification1);
// Update capacity
exex_manager.update_capacity();
// Verify current capacity and metrics
assert_eq!(exex_manager.current_capacity.load(Ordering::Relaxed), max_capacity - 2);
// Clear the buffer and update capacity
exex_manager.buffer.clear();
exex_manager.update_capacity();
// Verify current capacity
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/exex/exex/src/dyn_context.rs | crates/exex/exex/src/dyn_context.rs | //! Mirrored version of [`ExExContext`](`crate::ExExContext`)
//! without generic abstraction over [Node](`reth_node_api::FullNodeComponents`)
use alloy_eips::BlockNumHash;
use reth_chainspec::EthChainSpec;
use reth_ethereum_primitives::EthPrimitives;
use reth_node_api::{FullNodeComponents, HeaderTy, NodePrimitives, NodeTypes, PrimitivesTy};
use reth_node_core::node_config::NodeConfig;
use reth_provider::BlockReader;
use std::fmt::Debug;
use tokio::sync::mpsc;
use crate::{ExExContext, ExExEvent, ExExNotificationsStream};
// TODO(0xurb) - add `node` after abstractions
/// Captures the context that an `ExEx` has access to.
pub struct ExExContextDyn<N: NodePrimitives = EthPrimitives> {
/// The current head of the blockchain at launch.
pub head: BlockNumHash,
/// The config of the node
pub config: NodeConfig<Box<dyn EthChainSpec<Header = N::BlockHeader> + 'static>>,
/// The loaded node config
pub reth_config: reth_config::Config,
/// Channel used to send [`ExExEvent`]s to the rest of the node.
///
/// # Important
///
/// The exex should emit a `FinishedHeight` whenever a processed block is safe to prune.
/// Additionally, the exex can preemptively emit a `FinishedHeight` event to specify what
/// blocks to receive notifications for.
pub events: mpsc::UnboundedSender<ExExEvent>,
/// Channel to receive [`ExExNotification`](crate::ExExNotification)s.
///
/// # Important
///
/// Once an [`ExExNotification`](crate::ExExNotification) is sent over the channel, it is
/// considered delivered by the node.
pub notifications: Box<dyn ExExNotificationsStream<N>>,
}
impl<N: NodePrimitives> Debug for ExExContextDyn<N> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("ExExContext")
.field("head", &self.head)
.field("config", &self.config)
.field("reth_config", &self.reth_config)
.field("events", &self.events)
.field("notifications", &"...")
.finish()
}
}
impl<Node> From<ExExContext<Node>> for ExExContextDyn<PrimitivesTy<Node::Types>>
where
Node: FullNodeComponents<Types: NodeTypes<Primitives: NodePrimitives>>,
Node::Provider: Debug + BlockReader,
{
fn from(ctx: ExExContext<Node>) -> Self {
let config = ctx.config.map_chainspec(|chainspec| {
Box::new(chainspec) as Box<dyn EthChainSpec<Header = HeaderTy<Node::Types>>>
});
let notifications = Box::new(ctx.notifications) as Box<_>;
Self {
head: ctx.head,
config,
reth_config: ctx.reth_config,
events: ctx.events,
notifications,
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/exex/exex/src/context.rs | crates/exex/exex/src/context.rs | use crate::{ExExContextDyn, ExExEvent, ExExNotifications, ExExNotificationsStream};
use alloy_eips::BlockNumHash;
use reth_exex_types::ExExHead;
use reth_node_api::{FullNodeComponents, NodePrimitives, NodeTypes, PrimitivesTy};
use reth_node_core::node_config::NodeConfig;
use reth_payload_builder::PayloadBuilderHandle;
use reth_provider::BlockReader;
use reth_tasks::TaskExecutor;
use std::fmt::Debug;
use tokio::sync::mpsc::{error::SendError, UnboundedSender};
/// Captures the context that an `ExEx` has access to.
///
/// This type wraps various node components that the `ExEx` has access to.
pub struct ExExContext<Node: FullNodeComponents> {
/// The current head of the blockchain at launch.
pub head: BlockNumHash,
/// The config of the node
pub config: NodeConfig<<Node::Types as NodeTypes>::ChainSpec>,
/// The loaded node config
pub reth_config: reth_config::Config,
/// Channel used to send [`ExExEvent`]s to the rest of the node.
///
/// # Important
///
/// The exex should emit a `FinishedHeight` whenever a processed block is safe to prune.
/// Additionally, the exex can preemptively emit a `FinishedHeight` event to specify what
/// blocks to receive notifications for.
pub events: UnboundedSender<ExExEvent>,
/// Channel to receive [`ExExNotification`](crate::ExExNotification)s.
///
/// # Important
///
/// Once an [`ExExNotification`](crate::ExExNotification) is sent over the channel, it is
/// considered delivered by the node.
pub notifications: ExExNotifications<Node::Provider, Node::Evm>,
/// Node components
pub components: Node,
}
impl<Node> Debug for ExExContext<Node>
where
Node: FullNodeComponents,
Node::Provider: Debug,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("ExExContext")
.field("head", &self.head)
.field("config", &self.config)
.field("reth_config", &self.reth_config)
.field("events", &self.events)
.field("notifications", &self.notifications)
.field("components", &"...")
.finish()
}
}
impl<Node> ExExContext<Node>
where
Node: FullNodeComponents,
Node::Provider: Debug + BlockReader,
Node::Types: NodeTypes<Primitives: NodePrimitives>,
{
/// Returns dynamic version of the context
pub fn into_dyn(self) -> ExExContextDyn<PrimitivesTy<Node::Types>> {
ExExContextDyn::from(self)
}
}
impl<Node> ExExContext<Node>
where
Node: FullNodeComponents,
Node::Types: NodeTypes<Primitives: NodePrimitives>,
{
/// Returns the transaction pool of the node.
pub fn pool(&self) -> &Node::Pool {
self.components.pool()
}
/// Returns the node's evm config.
pub fn evm_config(&self) -> &Node::Evm {
self.components.evm_config()
}
/// Returns the provider of the node.
pub fn provider(&self) -> &Node::Provider {
self.components.provider()
}
/// Returns the handle to the network
pub fn network(&self) -> &Node::Network {
self.components.network()
}
/// Returns the handle to the payload builder service.
pub fn payload_builder_handle(
&self,
) -> &PayloadBuilderHandle<<Node::Types as NodeTypes>::Payload> {
self.components.payload_builder_handle()
}
/// Returns the task executor.
///
/// This type should be used to spawn (critical) tasks.
pub fn task_executor(&self) -> &TaskExecutor {
self.components.task_executor()
}
/// Sets notifications stream to [`crate::ExExNotificationsWithoutHead`], a stream of
/// notifications without a head.
pub fn set_notifications_without_head(&mut self) {
self.notifications.set_without_head();
}
/// Sets notifications stream to [`crate::ExExNotificationsWithHead`], a stream of notifications
/// with the provided head.
pub fn set_notifications_with_head(&mut self, head: ExExHead) {
self.notifications.set_with_head(head);
}
/// Sends an [`ExExEvent::FinishedHeight`] to the ExEx task manager letting it know that this
/// ExEx has processed the corresponding block.
///
/// Returns an error if the channel was closed (ExEx task manager panicked).
pub fn send_finished_height(
&self,
height: BlockNumHash,
) -> Result<(), SendError<BlockNumHash>> {
self.events.send(ExExEvent::FinishedHeight(height)).map_err(|_| SendError(height))
}
}
#[cfg(test)]
mod tests {
use crate::ExExContext;
use reth_exex_types::ExExHead;
use reth_node_api::FullNodeComponents;
use reth_provider::BlockReader;
/// <https://github.com/paradigmxyz/reth/issues/12054>
#[test]
const fn issue_12054() {
#[expect(dead_code)]
struct ExEx<Node: FullNodeComponents> {
ctx: ExExContext<Node>,
}
impl<Node: FullNodeComponents> ExEx<Node>
where
Node::Provider: BlockReader,
{
async fn _test_bounds(mut self) -> eyre::Result<()> {
self.ctx.pool();
self.ctx.evm_config();
self.ctx.provider();
self.ctx.network();
self.ctx.payload_builder_handle();
self.ctx.task_executor();
self.ctx.set_notifications_without_head();
self.ctx.set_notifications_with_head(ExExHead { block: Default::default() });
Ok(())
}
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/exex/exex/src/notifications.rs | crates/exex/exex/src/notifications.rs | use crate::{BackfillJobFactory, ExExNotification, StreamBackfillJob, WalHandle};
use alloy_consensus::BlockHeader;
use alloy_eips::BlockNumHash;
use futures::{Stream, StreamExt};
use reth_ethereum_primitives::EthPrimitives;
use reth_evm::ConfigureEvm;
use reth_exex_types::ExExHead;
use reth_node_api::NodePrimitives;
use reth_provider::{BlockReader, Chain, HeaderProvider, StateProviderFactory};
use reth_tracing::tracing::debug;
use std::{
fmt::Debug,
pin::Pin,
sync::Arc,
task::{ready, Context, Poll},
};
use tokio::sync::mpsc::Receiver;
/// A stream of [`ExExNotification`]s. The stream will emit notifications for all blocks. If the
/// stream is configured with a head via [`ExExNotifications::set_with_head`] or
/// [`ExExNotifications::with_head`], it will run backfill jobs to catch up to the node head.
#[derive(Debug)]
pub struct ExExNotifications<P, E>
where
E: ConfigureEvm,
{
inner: ExExNotificationsInner<P, E>,
}
/// A trait, that represents a stream of [`ExExNotification`]s. The stream will emit notifications
/// for all blocks. If the stream is configured with a head via [`ExExNotifications::set_with_head`]
/// or [`ExExNotifications::with_head`], it will run backfill jobs to catch up to the node head.
pub trait ExExNotificationsStream<N: NodePrimitives = EthPrimitives>:
Stream<Item = eyre::Result<ExExNotification<N>>> + Unpin
{
/// Sets [`ExExNotificationsStream`] to a stream of [`ExExNotification`]s without a head.
///
/// It's a no-op if the stream has already been configured without a head.
///
/// See the documentation of [`ExExNotificationsWithoutHead`] for more details.
fn set_without_head(&mut self);
/// Sets [`ExExNotificationsStream`] to a stream of [`ExExNotification`]s with the provided
/// head.
///
/// It's a no-op if the stream has already been configured with a head.
///
/// See the documentation of [`ExExNotificationsWithHead`] for more details.
fn set_with_head(&mut self, exex_head: ExExHead);
/// Returns a new [`ExExNotificationsStream`] without a head.
///
/// See the documentation of [`ExExNotificationsWithoutHead`] for more details.
fn without_head(self) -> Self
where
Self: Sized;
/// Returns a new [`ExExNotificationsStream`] with the provided head.
///
/// See the documentation of [`ExExNotificationsWithHead`] for more details.
fn with_head(self, exex_head: ExExHead) -> Self
where
Self: Sized;
}
#[derive(Debug)]
enum ExExNotificationsInner<P, E>
where
E: ConfigureEvm,
{
/// A stream of [`ExExNotification`]s. The stream will emit notifications for all blocks.
WithoutHead(ExExNotificationsWithoutHead<P, E>),
/// A stream of [`ExExNotification`]s. The stream will only emit notifications for blocks that
/// are committed or reverted after the given head.
WithHead(Box<ExExNotificationsWithHead<P, E>>),
/// Internal state used when transitioning between [`ExExNotificationsInner::WithoutHead`] and
/// [`ExExNotificationsInner::WithHead`].
Invalid,
}
impl<P, E> ExExNotifications<P, E>
where
E: ConfigureEvm,
{
/// Creates a new stream of [`ExExNotifications`] without a head.
pub const fn new(
node_head: BlockNumHash,
provider: P,
evm_config: E,
notifications: Receiver<ExExNotification<E::Primitives>>,
wal_handle: WalHandle<E::Primitives>,
) -> Self {
Self {
inner: ExExNotificationsInner::WithoutHead(ExExNotificationsWithoutHead::new(
node_head,
provider,
evm_config,
notifications,
wal_handle,
)),
}
}
}
impl<P, E> ExExNotificationsStream<E::Primitives> for ExExNotifications<P, E>
where
P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static,
E: ConfigureEvm<Primitives: NodePrimitives<Block = P::Block>> + Clone + Unpin + 'static,
{
fn set_without_head(&mut self) {
let current = std::mem::replace(&mut self.inner, ExExNotificationsInner::Invalid);
self.inner = ExExNotificationsInner::WithoutHead(match current {
ExExNotificationsInner::WithoutHead(notifications) => notifications,
ExExNotificationsInner::WithHead(notifications) => ExExNotificationsWithoutHead::new(
notifications.initial_local_head,
notifications.provider,
notifications.evm_config,
notifications.notifications,
notifications.wal_handle,
),
ExExNotificationsInner::Invalid => unreachable!(),
});
}
fn set_with_head(&mut self, exex_head: ExExHead) {
let current = std::mem::replace(&mut self.inner, ExExNotificationsInner::Invalid);
self.inner = ExExNotificationsInner::WithHead(match current {
ExExNotificationsInner::WithoutHead(notifications) => {
Box::new(notifications.with_head(exex_head))
}
ExExNotificationsInner::WithHead(notifications) => {
Box::new(ExExNotificationsWithHead::new(
notifications.initial_local_head,
notifications.provider,
notifications.evm_config,
notifications.notifications,
notifications.wal_handle,
exex_head,
))
}
ExExNotificationsInner::Invalid => unreachable!(),
});
}
fn without_head(mut self) -> Self {
self.set_without_head();
self
}
fn with_head(mut self, exex_head: ExExHead) -> Self {
self.set_with_head(exex_head);
self
}
}
impl<P, E> Stream for ExExNotifications<P, E>
where
P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static,
E: ConfigureEvm<Primitives: NodePrimitives<Block = P::Block>> + 'static,
{
type Item = eyre::Result<ExExNotification<E::Primitives>>;
fn poll_next(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Option<Self::Item>> {
match &mut self.get_mut().inner {
ExExNotificationsInner::WithoutHead(notifications) => {
notifications.poll_next_unpin(cx).map(|result| result.map(Ok))
}
ExExNotificationsInner::WithHead(notifications) => notifications.poll_next_unpin(cx),
ExExNotificationsInner::Invalid => unreachable!(),
}
}
}
/// A stream of [`ExExNotification`]s. The stream will emit notifications for all blocks.
pub struct ExExNotificationsWithoutHead<P, E>
where
E: ConfigureEvm,
{
node_head: BlockNumHash,
provider: P,
evm_config: E,
notifications: Receiver<ExExNotification<E::Primitives>>,
wal_handle: WalHandle<E::Primitives>,
}
impl<P: Debug, E> Debug for ExExNotificationsWithoutHead<P, E>
where
E: ConfigureEvm + Debug,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("ExExNotifications")
.field("provider", &self.provider)
.field("evm_config", &self.evm_config)
.field("notifications", &self.notifications)
.finish()
}
}
impl<P, E> ExExNotificationsWithoutHead<P, E>
where
E: ConfigureEvm,
{
/// Creates a new instance of [`ExExNotificationsWithoutHead`].
const fn new(
node_head: BlockNumHash,
provider: P,
evm_config: E,
notifications: Receiver<ExExNotification<E::Primitives>>,
wal_handle: WalHandle<E::Primitives>,
) -> Self {
Self { node_head, provider, evm_config, notifications, wal_handle }
}
/// Subscribe to notifications with the given head.
fn with_head(self, head: ExExHead) -> ExExNotificationsWithHead<P, E> {
ExExNotificationsWithHead::new(
self.node_head,
self.provider,
self.evm_config,
self.notifications,
self.wal_handle,
head,
)
}
}
impl<P: Unpin, E> Stream for ExExNotificationsWithoutHead<P, E>
where
E: ConfigureEvm,
{
type Item = ExExNotification<E::Primitives>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.get_mut().notifications.poll_recv(cx)
}
}
/// A stream of [`ExExNotification`]s. The stream will only emit notifications for blocks that are
/// committed or reverted after the given head. The head is the ExEx's latest view of the host
/// chain.
///
/// Notifications will be sent starting from the head, not inclusive. For example, if
/// `exex_head.number == 10`, then the first notification will be with `block.number == 11`. An
/// `exex_head.number` of 10 indicates that the ExEx has processed up to block 10, and is ready to
/// process block 11.
#[derive(Debug)]
pub struct ExExNotificationsWithHead<P, E>
where
E: ConfigureEvm,
{
/// The node's local head at launch.
initial_local_head: BlockNumHash,
provider: P,
evm_config: E,
notifications: Receiver<ExExNotification<E::Primitives>>,
wal_handle: WalHandle<E::Primitives>,
/// The exex head at launch
initial_exex_head: ExExHead,
/// If true, then we need to check if the ExEx head is on the canonical chain and if not,
/// revert its head.
pending_check_canonical: bool,
/// If true, then we need to check if the ExEx head is behind the node head and if so, backfill
/// the missing blocks.
pending_check_backfill: bool,
/// The backfill job to run before consuming any notifications.
backfill_job: Option<StreamBackfillJob<E, P, Chain<E::Primitives>>>,
}
impl<P, E> ExExNotificationsWithHead<P, E>
where
E: ConfigureEvm,
{
/// Creates a new [`ExExNotificationsWithHead`].
const fn new(
node_head: BlockNumHash,
provider: P,
evm_config: E,
notifications: Receiver<ExExNotification<E::Primitives>>,
wal_handle: WalHandle<E::Primitives>,
exex_head: ExExHead,
) -> Self {
Self {
initial_local_head: node_head,
provider,
evm_config,
notifications,
wal_handle,
initial_exex_head: exex_head,
pending_check_canonical: true,
pending_check_backfill: true,
backfill_job: None,
}
}
}
impl<P, E> ExExNotificationsWithHead<P, E>
where
P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static,
E: ConfigureEvm<Primitives: NodePrimitives<Block = P::Block>> + Clone + Unpin + 'static,
{
/// Checks if the ExEx head is on the canonical chain.
///
/// If the head block is not found in the database or it's ahead of the node head, it means
/// we're not on the canonical chain and we need to revert the notification with the ExEx
/// head block.
fn check_canonical(&mut self) -> eyre::Result<Option<ExExNotification<E::Primitives>>> {
if self.provider.is_known(&self.initial_exex_head.block.hash)? &&
self.initial_exex_head.block.number <= self.initial_local_head.number
{
// we have the targeted block and that block is below the current head
debug!(target: "exex::notifications", "ExEx head is on the canonical chain");
return Ok(None)
}
// If the head block is not found in the database, it means we're not on the canonical
// chain.
// Get the committed notification for the head block from the WAL.
let Some(notification) = self
.wal_handle
.get_committed_notification_by_block_hash(&self.initial_exex_head.block.hash)?
else {
// it's possible that the exex head is further ahead
if self.initial_exex_head.block.number > self.initial_local_head.number {
debug!(target: "exex::notifications", "ExEx head is ahead of the canonical chain");
return Ok(None);
}
return Err(eyre::eyre!(
"Could not find notification for block hash {:?} in the WAL",
self.initial_exex_head.block.hash
))
};
// Update the head block hash to the parent hash of the first committed block.
let committed_chain = notification.committed_chain().unwrap();
let new_exex_head =
(committed_chain.first().parent_hash(), committed_chain.first().number() - 1).into();
debug!(target: "exex::notifications", old_exex_head = ?self.initial_exex_head.block, new_exex_head = ?new_exex_head, "ExEx head updated");
self.initial_exex_head.block = new_exex_head;
// Return an inverted notification. See the documentation for
// `ExExNotification::into_inverted`.
Ok(Some(notification.into_inverted()))
}
/// Compares the node head against the ExEx head, and backfills if needed.
///
/// CAUTION: This method assumes that the ExEx head is <= the node head, and that it's on the
/// canonical chain.
///
/// Possible situations are:
/// - ExEx is behind the node head (`node_head.number < exex_head.number`). Backfill from the
/// node database.
/// - ExEx is at the same block number as the node head (`node_head.number ==
/// exex_head.number`). Nothing to do.
fn check_backfill(&mut self) -> eyre::Result<()> {
let backfill_job_factory =
BackfillJobFactory::new(self.evm_config.clone(), self.provider.clone());
match self.initial_exex_head.block.number.cmp(&self.initial_local_head.number) {
std::cmp::Ordering::Less => {
// ExEx is behind the node head, start backfill
debug!(target: "exex::notifications", "ExEx is behind the node head and on the canonical chain, starting backfill");
let backfill = backfill_job_factory
.backfill(
self.initial_exex_head.block.number + 1..=self.initial_local_head.number,
)
.into_stream();
self.backfill_job = Some(backfill);
}
std::cmp::Ordering::Equal => {
debug!(target: "exex::notifications", "ExEx is at the node head");
}
std::cmp::Ordering::Greater => {
debug!(target: "exex::notifications", "ExEx is ahead of the node head");
}
};
Ok(())
}
}
impl<P, E> Stream for ExExNotificationsWithHead<P, E>
where
P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static,
E: ConfigureEvm<Primitives: NodePrimitives<Block = P::Block>> + Clone + Unpin + 'static,
{
type Item = eyre::Result<ExExNotification<E::Primitives>>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
// 1. Check once whether we need to retrieve a notification gap from the WAL.
if this.pending_check_canonical {
if let Some(canonical_notification) = this.check_canonical()? {
return Poll::Ready(Some(Ok(canonical_notification)))
}
// ExEx head is on the canonical chain, we no longer need to check it
this.pending_check_canonical = false;
}
// 2. Check once whether we need to trigger backfill sync
if this.pending_check_backfill {
this.check_backfill()?;
this.pending_check_backfill = false;
}
// 3. If backfill is in progress yield new notifications
if let Some(backfill_job) = &mut this.backfill_job {
debug!(target: "exex::notifications", "Polling backfill job");
if let Some(chain) = ready!(backfill_job.poll_next_unpin(cx)).transpose()? {
debug!(target: "exex::notifications", range = ?chain.range(), "Backfill job returned a chain");
return Poll::Ready(Some(Ok(ExExNotification::ChainCommitted {
new: Arc::new(chain),
})))
}
// Backfill job is done, remove it
this.backfill_job = None;
}
// 4. Otherwise advance the regular event stream
loop {
let Some(notification) = ready!(this.notifications.poll_recv(cx)) else {
return Poll::Ready(None)
};
// 5. In case the exex is ahead of the new tip, we must skip it
if let Some(committed) = notification.committed_chain() {
// inclusive check because we should start with `exex.head + 1`
if this.initial_exex_head.block.number >= committed.tip().number() {
continue
}
}
return Poll::Ready(Some(Ok(notification)))
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::Wal;
use alloy_consensus::Header;
use alloy_eips::BlockNumHash;
use eyre::OptionExt;
use futures::StreamExt;
use reth_db_common::init::init_genesis;
use reth_ethereum_primitives::Block;
use reth_evm_ethereum::EthEvmConfig;
use reth_primitives_traits::Block as _;
use reth_provider::{
providers::BlockchainProvider, test_utils::create_test_provider_factory, BlockWriter,
Chain, DatabaseProviderFactory, StorageLocation,
};
use reth_testing_utils::generators::{self, random_block, BlockParams};
use tokio::sync::mpsc;
#[tokio::test(flavor = "multi_thread")]
async fn exex_notifications_behind_head_canonical() -> eyre::Result<()> {
let mut rng = generators::rng();
let temp_dir = tempfile::tempdir().unwrap();
let wal = Wal::new(temp_dir.path()).unwrap();
let provider_factory = create_test_provider_factory();
let genesis_hash = init_genesis(&provider_factory)?;
let genesis_block = provider_factory
.block(genesis_hash.into())?
.ok_or_else(|| eyre::eyre!("genesis block not found"))?;
let provider = BlockchainProvider::new(provider_factory.clone())?;
let node_head_block = random_block(
&mut rng,
genesis_block.number + 1,
BlockParams { parent: Some(genesis_hash), tx_count: Some(0), ..Default::default() },
);
let provider_rw = provider_factory.provider_rw()?;
provider_rw
.insert_block(node_head_block.clone().try_recover()?, StorageLocation::Database)?;
provider_rw.commit()?;
let node_head = node_head_block.num_hash();
let exex_head =
ExExHead { block: BlockNumHash { number: genesis_block.number, hash: genesis_hash } };
let notification = ExExNotification::ChainCommitted {
new: Arc::new(Chain::new(
vec![random_block(
&mut rng,
node_head.number + 1,
BlockParams { parent: Some(node_head.hash), ..Default::default() },
)
.try_recover()?],
Default::default(),
None,
)),
};
let (notifications_tx, notifications_rx) = mpsc::channel(1);
notifications_tx.send(notification.clone()).await?;
let mut notifications = ExExNotificationsWithoutHead::new(
node_head,
provider,
EthEvmConfig::mainnet(),
notifications_rx,
wal.handle(),
)
.with_head(exex_head);
// First notification is the backfill of missing blocks from the canonical chain
assert_eq!(
notifications.next().await.transpose()?,
Some(ExExNotification::ChainCommitted {
new: Arc::new(
BackfillJobFactory::new(
notifications.evm_config.clone(),
notifications.provider.clone()
)
.backfill(1..=1)
.next()
.ok_or_eyre("failed to backfill")??
)
})
);
// Second notification is the actual notification that we sent before
assert_eq!(notifications.next().await.transpose()?, Some(notification));
Ok(())
}
#[tokio::test]
async fn exex_notifications_same_head_canonical() -> eyre::Result<()> {
let temp_dir = tempfile::tempdir().unwrap();
let wal = Wal::new(temp_dir.path()).unwrap();
let provider_factory = create_test_provider_factory();
let genesis_hash = init_genesis(&provider_factory)?;
let genesis_block = provider_factory
.block(genesis_hash.into())?
.ok_or_else(|| eyre::eyre!("genesis block not found"))?;
let provider = BlockchainProvider::new(provider_factory)?;
let node_head = BlockNumHash { number: genesis_block.number, hash: genesis_hash };
let exex_head = ExExHead { block: node_head };
let notification = ExExNotification::ChainCommitted {
new: Arc::new(Chain::new(
vec![Block {
header: Header {
parent_hash: node_head.hash,
number: node_head.number + 1,
..Default::default()
},
..Default::default()
}
.seal_slow()
.try_recover()?],
Default::default(),
None,
)),
};
let (notifications_tx, notifications_rx) = mpsc::channel(1);
notifications_tx.send(notification.clone()).await?;
let mut notifications = ExExNotificationsWithoutHead::new(
node_head,
provider,
EthEvmConfig::mainnet(),
notifications_rx,
wal.handle(),
)
.with_head(exex_head);
let new_notification = notifications.next().await.transpose()?;
assert_eq!(new_notification, Some(notification));
Ok(())
}
#[tokio::test(flavor = "multi_thread")]
async fn exex_notifications_same_head_non_canonical() -> eyre::Result<()> {
let mut rng = generators::rng();
let temp_dir = tempfile::tempdir().unwrap();
let wal = Wal::new(temp_dir.path()).unwrap();
let provider_factory = create_test_provider_factory();
let genesis_hash = init_genesis(&provider_factory)?;
let genesis_block = provider_factory
.block(genesis_hash.into())?
.ok_or_else(|| eyre::eyre!("genesis block not found"))?;
let provider = BlockchainProvider::new(provider_factory)?;
let node_head_block = random_block(
&mut rng,
genesis_block.number + 1,
BlockParams { parent: Some(genesis_hash), tx_count: Some(0), ..Default::default() },
)
.try_recover()?;
let node_head = node_head_block.num_hash();
let provider_rw = provider.database_provider_rw()?;
provider_rw.insert_block(node_head_block, StorageLocation::Database)?;
provider_rw.commit()?;
let node_head_notification = ExExNotification::ChainCommitted {
new: Arc::new(
BackfillJobFactory::new(EthEvmConfig::mainnet(), provider.clone())
.backfill(node_head.number..=node_head.number)
.next()
.ok_or_else(|| eyre::eyre!("failed to backfill"))??,
),
};
let exex_head_block = random_block(
&mut rng,
genesis_block.number + 1,
BlockParams { parent: Some(genesis_hash), tx_count: Some(0), ..Default::default() },
);
let exex_head = ExExHead { block: exex_head_block.num_hash() };
let exex_head_notification = ExExNotification::ChainCommitted {
new: Arc::new(Chain::new(
vec![exex_head_block.clone().try_recover()?],
Default::default(),
None,
)),
};
wal.commit(&exex_head_notification)?;
let new_notification = ExExNotification::ChainCommitted {
new: Arc::new(Chain::new(
vec![random_block(
&mut rng,
node_head.number + 1,
BlockParams { parent: Some(node_head.hash), ..Default::default() },
)
.try_recover()?],
Default::default(),
None,
)),
};
let (notifications_tx, notifications_rx) = mpsc::channel(1);
notifications_tx.send(new_notification.clone()).await?;
let mut notifications = ExExNotificationsWithoutHead::new(
node_head,
provider,
EthEvmConfig::mainnet(),
notifications_rx,
wal.handle(),
)
.with_head(exex_head);
// First notification is the revert of the ExEx head block to get back to the canonical
// chain
assert_eq!(
notifications.next().await.transpose()?,
Some(exex_head_notification.into_inverted())
);
// Second notification is the backfilled block from the canonical chain to get back to the
// canonical tip
assert_eq!(notifications.next().await.transpose()?, Some(node_head_notification));
// Third notification is the actual notification that we sent before
assert_eq!(notifications.next().await.transpose()?, Some(new_notification));
Ok(())
}
#[tokio::test]
async fn test_notifications_ahead_of_head() -> eyre::Result<()> {
reth_tracing::init_test_tracing();
let mut rng = generators::rng();
let temp_dir = tempfile::tempdir().unwrap();
let wal = Wal::new(temp_dir.path()).unwrap();
let provider_factory = create_test_provider_factory();
let genesis_hash = init_genesis(&provider_factory)?;
let genesis_block = provider_factory
.block(genesis_hash.into())?
.ok_or_else(|| eyre::eyre!("genesis block not found"))?;
let provider = BlockchainProvider::new(provider_factory)?;
let exex_head_block = random_block(
&mut rng,
genesis_block.number + 1,
BlockParams { parent: Some(genesis_hash), tx_count: Some(0), ..Default::default() },
);
let exex_head_notification = ExExNotification::ChainCommitted {
new: Arc::new(Chain::new(
vec![exex_head_block.clone().try_recover()?],
Default::default(),
None,
)),
};
wal.commit(&exex_head_notification)?;
let node_head = BlockNumHash { number: genesis_block.number, hash: genesis_hash };
let exex_head = ExExHead {
block: BlockNumHash { number: exex_head_block.number, hash: exex_head_block.hash() },
};
let new_notification = ExExNotification::ChainCommitted {
new: Arc::new(Chain::new(
vec![random_block(
&mut rng,
genesis_block.number + 1,
BlockParams { parent: Some(genesis_hash), ..Default::default() },
)
.try_recover()?],
Default::default(),
None,
)),
};
let (notifications_tx, notifications_rx) = mpsc::channel(1);
notifications_tx.send(new_notification.clone()).await?;
let mut notifications = ExExNotificationsWithoutHead::new(
node_head,
provider,
EthEvmConfig::mainnet(),
notifications_rx,
wal.handle(),
)
.with_head(exex_head);
// First notification is the revert of the ExEx head block to get back to the canonical
// chain
assert_eq!(
notifications.next().await.transpose()?,
Some(exex_head_notification.into_inverted())
);
// Second notification is the actual notification that we sent before
assert_eq!(notifications.next().await.transpose()?, Some(new_notification));
Ok(())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.