repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/util/src/lib.rs | crates/cli/util/src/lib.rs | //! This crate defines a set of commonly used cli utils.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
pub mod allocator;
/// Helper function to load a secret key from a file.
pub mod load_secret_key;
pub use load_secret_key::get_secret_key;
/// Cli parsers functions.
pub mod parsers;
pub use parsers::{
hash_or_num_value_parser, parse_duration_from_secs, parse_duration_from_secs_or_ms,
parse_ether_value, parse_socket_address,
};
#[cfg(all(unix, any(target_env = "gnu", target_os = "macos")))]
pub mod sigsegv_handler;
/// Signal handler to extract a backtrace from stack overflow.
///
/// This is a no-op because this platform doesn't support our signal handler's requirements.
#[cfg(not(all(unix, any(target_env = "gnu", target_os = "macos"))))]
pub mod sigsegv_handler {
/// No-op function.
pub fn install() {}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/util/src/sigsegv_handler.rs | crates/cli/util/src/sigsegv_handler.rs | //! Signal handler to extract a backtrace from stack overflow.
//!
//! Implementation modified from [`rustc`](https://github.com/rust-lang/rust/blob/3dee9775a8c94e701a08f7b2df2c444f353d8699/compiler/rustc_driver_impl/src/signal_handler.rs).
use std::{
alloc::{alloc, Layout},
fmt, mem, ptr,
};
extern "C" {
fn backtrace_symbols_fd(buffer: *const *mut libc::c_void, size: libc::c_int, fd: libc::c_int);
}
fn backtrace_stderr(buffer: &[*mut libc::c_void]) {
let size = buffer.len().try_into().unwrap_or_default();
unsafe { backtrace_symbols_fd(buffer.as_ptr(), size, libc::STDERR_FILENO) };
}
/// Unbuffered, unsynchronized writer to stderr.
///
/// Only acceptable because everything will end soon anyways.
struct RawStderr(());
impl fmt::Write for RawStderr {
fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> {
let ret = unsafe { libc::write(libc::STDERR_FILENO, s.as_ptr().cast(), s.len()) };
if ret == -1 {
Err(fmt::Error)
} else {
Ok(())
}
}
}
/// We don't really care how many bytes we actually get out. SIGSEGV comes for our head.
/// Splash stderr with letters of our own blood to warn our friends about the monster.
macro_rules! raw_errln {
($tokens:tt) => {
let _ = ::core::fmt::Write::write_fmt(&mut RawStderr(()), format_args!($tokens));
let _ = ::core::fmt::Write::write_char(&mut RawStderr(()), '\n');
};
}
/// Signal handler installed for SIGSEGV
extern "C" fn print_stack_trace(_: libc::c_int) {
const MAX_FRAMES: usize = 256;
let mut stack_trace: [*mut libc::c_void; MAX_FRAMES] = [ptr::null_mut(); MAX_FRAMES];
let stack = unsafe {
// Collect return addresses
let depth = libc::backtrace(stack_trace.as_mut_ptr(), MAX_FRAMES as i32);
if depth == 0 {
return
}
&stack_trace[0..depth as usize]
};
// Just a stack trace is cryptic. Explain what we're doing.
raw_errln!("error: reth interrupted by SIGSEGV, printing backtrace\n");
let mut written = 1;
let mut consumed = 0;
// Begin elaborating return addrs into symbols and writing them directly to stderr
// Most backtraces are stack overflow, most stack overflows are from recursion
// Check for cycles before writing 250 lines of the same ~5 symbols
let cycled = |(runner, walker)| runner == walker;
let mut cyclic = false;
if let Some(period) = stack.iter().skip(1).step_by(2).zip(stack).position(cycled) {
let period = period.saturating_add(1); // avoid "what if wrapped?" branches
let Some(offset) = stack.iter().skip(period).zip(stack).position(cycled) else {
// impossible.
return
};
// Count matching trace slices, else we could miscount "biphasic cycles"
// with the same period + loop entry but a different inner loop
let next_cycle = stack[offset..].chunks_exact(period).skip(1);
let cycles = 1 + next_cycle
.zip(stack[offset..].chunks_exact(period))
.filter(|(next, prev)| next == prev)
.count();
backtrace_stderr(&stack[..offset]);
written += offset;
consumed += offset;
if cycles > 1 {
raw_errln!("\n### cycle encountered after {offset} frames with period {period}");
backtrace_stderr(&stack[consumed..consumed + period]);
raw_errln!("### recursed {cycles} times\n");
written += period + 4;
consumed += period * cycles;
cyclic = true;
};
}
let rem = &stack[consumed..];
backtrace_stderr(rem);
raw_errln!("");
written += rem.len() + 1;
let random_depth = || 8 * 16; // chosen by random diceroll (2d20)
if cyclic || stack.len() > random_depth() {
// technically speculation, but assert it with confidence anyway.
// We only arrived in this signal handler because bad things happened
// and this message is for explaining it's not the programmer's fault
raw_errln!("note: reth unexpectedly overflowed its stack! this is a bug");
written += 1;
}
if stack.len() == MAX_FRAMES {
raw_errln!("note: maximum backtrace depth reached, frames may have been lost");
written += 1;
}
raw_errln!("note: we would appreciate a report at https://github.com/paradigmxyz/reth");
written += 1;
if written > 24 {
// We probably just scrolled the earlier "we got SIGSEGV" message off the terminal
raw_errln!("note: backtrace dumped due to SIGSEGV! resuming signal");
}
}
/// Installs a SIGSEGV handler.
///
/// When SIGSEGV is delivered to the process, print a stack trace and then exit.
pub fn install() {
unsafe {
let alt_stack_size: usize = min_sigstack_size() + 64 * 1024;
let mut alt_stack: libc::stack_t = mem::zeroed();
alt_stack.ss_sp = alloc(Layout::from_size_align(alt_stack_size, 1).unwrap()).cast();
alt_stack.ss_size = alt_stack_size;
libc::sigaltstack(&raw const alt_stack, ptr::null_mut());
let mut sa: libc::sigaction = mem::zeroed();
sa.sa_sigaction = print_stack_trace as libc::sighandler_t;
sa.sa_flags = libc::SA_NODEFER | libc::SA_RESETHAND | libc::SA_ONSTACK;
libc::sigemptyset(&raw mut sa.sa_mask);
libc::sigaction(libc::SIGSEGV, &raw const sa, ptr::null_mut());
}
}
/// Modern kernels on modern hardware can have dynamic signal stack sizes.
#[cfg(any(target_os = "linux", target_os = "android"))]
fn min_sigstack_size() -> usize {
const AT_MINSIGSTKSZ: core::ffi::c_ulong = 51;
let dynamic_sigstksz = unsafe { libc::getauxval(AT_MINSIGSTKSZ) };
// If getauxval couldn't find the entry, it returns 0,
// so take the higher of the "constant" and auxval.
// This transparently supports older kernels which don't provide AT_MINSIGSTKSZ
libc::MINSIGSTKSZ.max(dynamic_sigstksz as _)
}
/// Not all OS support hardware where this is needed.
#[cfg(not(any(target_os = "linux", target_os = "android")))]
const fn min_sigstack_size() -> usize {
libc::MINSIGSTKSZ
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/util/src/load_secret_key.rs | crates/cli/util/src/load_secret_key.rs | use reth_fs_util::{self as fs, FsPathError};
use secp256k1::{Error as SecretKeyBaseError, SecretKey};
use std::{
io,
path::{Path, PathBuf},
};
use thiserror::Error;
/// Convenience function to create a new random [`SecretKey`]
pub fn rng_secret_key() -> SecretKey {
SecretKey::new(&mut rand_08::thread_rng())
}
/// Errors returned by loading a [`SecretKey`], including IO errors.
#[derive(Error, Debug)]
pub enum SecretKeyError {
/// Error encountered during decoding of the secret key.
#[error(transparent)]
SecretKeyDecodeError(#[from] SecretKeyBaseError),
/// Error related to file system path operations.
#[error(transparent)]
SecretKeyFsPathError(#[from] FsPathError),
/// Represents an error when failed to access the key file.
#[error("failed to access key file {secret_file:?}: {error}")]
FailedToAccessKeyFile {
/// The encountered IO error.
error: io::Error,
/// Path to the secret key file.
secret_file: PathBuf,
},
}
/// Attempts to load a [`SecretKey`] from a specified path. If no file exists there, then it
/// generates a secret key and stores it in the provided path. I/O errors might occur during write
/// operations in the form of a [`SecretKeyError`]
pub fn get_secret_key(secret_key_path: &Path) -> Result<SecretKey, SecretKeyError> {
let exists = secret_key_path.try_exists();
match exists {
Ok(true) => {
let contents = fs::read_to_string(secret_key_path)?;
Ok(contents.as_str().parse().map_err(SecretKeyError::SecretKeyDecodeError)?)
}
Ok(false) => {
if let Some(dir) = secret_key_path.parent() {
// Create parent directory
fs::create_dir_all(dir)?;
}
let secret = rng_secret_key();
let hex = alloy_primitives::hex::encode(secret.as_ref());
fs::write(secret_key_path, hex)?;
Ok(secret)
}
Err(error) => Err(SecretKeyError::FailedToAccessKeyFile {
error,
secret_file: secret_key_path.to_path_buf(),
}),
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/util/src/allocator.rs | crates/cli/util/src/allocator.rs | //! Custom allocator implementation.
//!
//! We provide support for jemalloc and snmalloc on unix systems, and prefer jemalloc if both are
//! enabled.
// We provide jemalloc allocator support, alongside snmalloc. If both features are enabled, jemalloc
// is prioritized.
cfg_if::cfg_if! {
if #[cfg(all(feature = "jemalloc", unix))] {
type AllocatorInner = tikv_jemallocator::Jemalloc;
} else if #[cfg(all(feature = "snmalloc", unix))] {
type AllocatorInner = snmalloc_rs::SnMalloc;
} else {
type AllocatorInner = std::alloc::System;
}
}
// This is to prevent clippy unused warnings when we do `--all-features`
cfg_if::cfg_if! {
if #[cfg(all(feature = "snmalloc", feature = "jemalloc", unix))] {
use snmalloc_rs as _;
}
}
cfg_if::cfg_if! {
if #[cfg(feature = "tracy-allocator")] {
type AllocatorWrapper = tracy_client::ProfiledAllocator<AllocatorInner>;
tracy_client::register_demangler!();
const fn new_allocator_wrapper() -> AllocatorWrapper {
AllocatorWrapper::new(AllocatorInner {}, 100)
}
} else {
type AllocatorWrapper = AllocatorInner;
const fn new_allocator_wrapper() -> AllocatorWrapper {
AllocatorInner {}
}
}
}
/// Custom allocator.
pub type Allocator = AllocatorWrapper;
/// Creates a new [custom allocator][Allocator].
pub const fn new_allocator() -> Allocator {
new_allocator_wrapper()
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/util/src/parsers.rs | crates/cli/util/src/parsers.rs | use alloy_eips::BlockHashOrNumber;
use alloy_primitives::B256;
use reth_fs_util::FsPathError;
use std::{
net::{IpAddr, Ipv4Addr, SocketAddr, ToSocketAddrs},
path::Path,
str::FromStr,
time::Duration,
};
/// Helper to parse a [Duration] from seconds
pub fn parse_duration_from_secs(arg: &str) -> eyre::Result<Duration, std::num::ParseIntError> {
let seconds = arg.parse()?;
Ok(Duration::from_secs(seconds))
}
/// Helper to parse a [Duration] from seconds if it's a number or milliseconds if the input contains
/// a `ms` suffix:
/// * `5ms` -> 5 milliseconds
/// * `5` -> 5 seconds
/// * `5s` -> 5 seconds
pub fn parse_duration_from_secs_or_ms(
arg: &str,
) -> eyre::Result<Duration, std::num::ParseIntError> {
if arg.ends_with("ms") {
arg.trim_end_matches("ms").parse().map(Duration::from_millis)
} else if arg.ends_with('s') {
arg.trim_end_matches('s').parse().map(Duration::from_secs)
} else {
arg.parse().map(Duration::from_secs)
}
}
/// Parse [`BlockHashOrNumber`]
pub fn hash_or_num_value_parser(value: &str) -> eyre::Result<BlockHashOrNumber, eyre::Error> {
match B256::from_str(value) {
Ok(hash) => Ok(BlockHashOrNumber::Hash(hash)),
Err(_) => Ok(BlockHashOrNumber::Number(value.parse()?)),
}
}
/// Error thrown while parsing a socket address.
#[derive(thiserror::Error, Debug)]
pub enum SocketAddressParsingError {
/// Failed to convert the string into a socket addr
#[error("could not parse socket address: {0}")]
Io(#[from] std::io::Error),
/// Input must not be empty
#[error("cannot parse socket address from empty string")]
Empty,
/// Failed to parse the address
#[error("could not parse socket address from {0}")]
Parse(String),
/// Failed to parse port
#[error("could not parse port: {0}")]
Port(#[from] std::num::ParseIntError),
}
/// Parse a [`SocketAddr`] from a `str`.
///
/// The following formats are checked:
///
/// - If the value can be parsed as a `u16` or starts with `:` it is considered a port, and the
/// hostname is set to `localhost`.
/// - If the value contains `:` it is assumed to be the format `<host>:<port>`
/// - Otherwise it is assumed to be a hostname
///
/// An error is returned if the value is empty.
pub fn parse_socket_address(value: &str) -> eyre::Result<SocketAddr, SocketAddressParsingError> {
if value.is_empty() {
return Err(SocketAddressParsingError::Empty)
}
if let Some(port) = value.strip_prefix(':').or_else(|| value.strip_prefix("localhost:")) {
let port: u16 = port.parse()?;
return Ok(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port))
}
if let Ok(port) = value.parse() {
return Ok(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port))
}
value
.to_socket_addrs()?
.next()
.ok_or_else(|| SocketAddressParsingError::Parse(value.to_string()))
}
/// Wrapper around [`reth_fs_util::read_json_file`] which can be used as a clap value parser.
pub fn read_json_from_file<T: serde::de::DeserializeOwned>(path: &str) -> Result<T, FsPathError> {
reth_fs_util::read_json_file(Path::new(path))
}
/// Parses an ether value from a string.
///
/// The amount in eth like "1.05" will be interpreted in wei (1.05 * 1e18).
/// Supports both decimal and integer inputs.
///
/// # Examples
/// - "1.05" -> 1.05 ETH = 1.05 * 10^18 wei
/// - "2" -> 2 ETH = 2 * 10^18 wei
pub fn parse_ether_value(value: &str) -> eyre::Result<u128> {
let eth = value.parse::<f64>()?;
if eth.is_sign_negative() {
return Err(eyre::eyre!("Ether value cannot be negative"))
}
let wei = eth * 1e18;
Ok(wei as u128)
}
#[cfg(test)]
mod tests {
use super::*;
use rand::Rng;
#[test]
fn parse_socket_addresses() {
for value in ["localhost:9000", ":9000", "9000"] {
let socket_addr = parse_socket_address(value)
.unwrap_or_else(|_| panic!("could not parse socket address: {value}"));
assert!(socket_addr.ip().is_loopback());
assert_eq!(socket_addr.port(), 9000);
}
}
#[test]
fn parse_socket_address_random() {
let port: u16 = rand::rng().random();
for value in [format!("localhost:{port}"), format!(":{port}"), port.to_string()] {
let socket_addr = parse_socket_address(&value)
.unwrap_or_else(|_| panic!("could not parse socket address: {value}"));
assert!(socket_addr.ip().is_loopback());
assert_eq!(socket_addr.port(), port);
}
}
#[test]
fn parse_ms_or_seconds() {
let ms = parse_duration_from_secs_or_ms("5ms").unwrap();
assert_eq!(ms, Duration::from_millis(5));
let seconds = parse_duration_from_secs_or_ms("5").unwrap();
assert_eq!(seconds, Duration::from_secs(5));
let seconds = parse_duration_from_secs_or_ms("5s").unwrap();
assert_eq!(seconds, Duration::from_secs(5));
assert!(parse_duration_from_secs_or_ms("5ns").is_err());
}
#[test]
fn parse_ether_values() {
// Test basic decimal value
let wei = parse_ether_value("1.05").unwrap();
assert_eq!(wei, 1_050_000_000_000_000_000u128);
// Test integer value
let wei = parse_ether_value("2").unwrap();
assert_eq!(wei, 2_000_000_000_000_000_000u128);
// Test zero
let wei = parse_ether_value("0").unwrap();
assert_eq!(wei, 0);
// Test negative value fails
assert!(parse_ether_value("-1").is_err());
// Test invalid input fails
assert!(parse_ether_value("abc").is_err());
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/prune.rs | crates/cli/commands/src/prune.rs | //! Command that runs pruning without any limits.
use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs};
use clap::Parser;
use reth_chainspec::{EthChainSpec, EthereumHardforks};
use reth_cli::chainspec::ChainSpecParser;
use reth_prune::PrunerBuilder;
use reth_static_file::StaticFileProducer;
use std::sync::Arc;
use tracing::info;
/// Prunes according to the configuration without any limits
#[derive(Debug, Parser)]
pub struct PruneCommand<C: ChainSpecParser> {
#[command(flatten)]
env: EnvironmentArgs<C>,
}
impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> PruneCommand<C> {
/// Execute the `prune` command
pub async fn execute<N: CliNodeTypes<ChainSpec = C::ChainSpec>>(self) -> eyre::Result<()> {
let Environment { config, provider_factory, .. } = self.env.init::<N>(AccessRights::RW)?;
let prune_config = config.prune.unwrap_or_default();
// Copy data from database to static files
info!(target: "reth::cli", "Copying data from database to static files...");
let static_file_producer =
StaticFileProducer::new(provider_factory.clone(), prune_config.segments.clone());
let lowest_static_file_height =
static_file_producer.lock().copy_to_static_files()?.min_block_num();
info!(target: "reth::cli", ?lowest_static_file_height, "Copied data from database to static files");
// Delete data which has been copied to static files.
if let Some(prune_tip) = lowest_static_file_height {
info!(target: "reth::cli", ?prune_tip, ?prune_config, "Pruning data from database...");
// Run the pruner according to the configuration, and don't enforce any limits on it
let mut pruner = PrunerBuilder::new(prune_config)
.delete_limit(usize::MAX)
.build_with_provider_factory(provider_factory);
pruner.run(prune_tip)?;
info!(target: "reth::cli", "Pruned data from database");
}
Ok(())
}
}
impl<C: ChainSpecParser> PruneCommand<C> {
/// Returns the underlying chain being used to run this command
pub fn chain_spec(&self) -> Option<&Arc<C::ChainSpec>> {
Some(&self.env.chain)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/node.rs | crates/cli/commands/src/node.rs | //! Main node command for launching a node
use crate::launcher::Launcher;
use clap::{value_parser, Args, Parser};
use reth_chainspec::{EthChainSpec, EthereumHardforks};
use reth_cli::chainspec::ChainSpecParser;
use reth_cli_runner::CliContext;
use reth_cli_util::parse_socket_address;
use reth_db::init_db;
use reth_node_builder::NodeBuilder;
use reth_node_core::{
args::{
DatabaseArgs, DatadirArgs, DebugArgs, DevArgs, EngineArgs, EraArgs, NetworkArgs,
PayloadBuilderArgs, PruningArgs, RpcServerArgs, TxPoolArgs,
},
node_config::NodeConfig,
version,
};
use std::{ffi::OsString, fmt, net::SocketAddr, path::PathBuf, sync::Arc};
use reth_node_core::args::EnclaveArgs;
/// Start the node
#[derive(Debug, Parser)]
pub struct NodeCommand<C: ChainSpecParser, Ext: clap::Args + fmt::Debug = NoArgs> {
/// The path to the configuration file to use.
#[arg(long, value_name = "FILE", verbatim_doc_comment)]
pub config: Option<PathBuf>,
/// The chain this node is running.
///
/// Possible values are either a built-in chain or the path to a chain specification file.
#[arg(
long,
value_name = "CHAIN_OR_PATH",
long_help = C::help_message(),
default_value = C::SUPPORTED_CHAINS[0],
default_value_if("dev", "true", "dev"),
value_parser = C::parser(),
required = false,
)]
pub chain: Arc<C::ChainSpec>,
/// Enable Prometheus metrics.
///
/// The metrics will be served at the given interface and port.
#[arg(long, value_name = "SOCKET", value_parser = parse_socket_address, help_heading = "Metrics")]
pub metrics: Option<SocketAddr>,
/// Add a new instance of a node.
///
/// Configures the ports of the node to avoid conflicts with the defaults.
/// This is useful for running multiple nodes on the same machine.
///
/// Max number of instances is 200. It is chosen in a way so that it's not possible to have
/// port numbers that conflict with each other.
///
/// Changes to the following port numbers:
/// - `DISCOVERY_PORT`: default + `instance` - 1
/// - `AUTH_PORT`: default + `instance` * 100 - 100
/// - `HTTP_RPC_PORT`: default - `instance` + 1
/// - `WS_RPC_PORT`: default + `instance` * 2 - 2
/// - `IPC_PATH`: default + `-instance`
#[arg(long, value_name = "INSTANCE", global = true, value_parser = value_parser!(u16).range(..=200))]
pub instance: Option<u16>,
/// Sets all ports to unused, allowing the OS to choose random unused ports when sockets are
/// bound.
///
/// Mutually exclusive with `--instance`.
#[arg(long, conflicts_with = "instance", global = true)]
pub with_unused_ports: bool,
/// All datadir related arguments
#[command(flatten)]
pub datadir: DatadirArgs,
/// All networking related arguments
#[command(flatten)]
pub network: NetworkArgs,
/// All rpc related arguments
#[command(flatten)]
pub rpc: RpcServerArgs,
/// All txpool related arguments with --txpool prefix
#[command(flatten)]
pub txpool: TxPoolArgs,
/// All payload builder related arguments
#[command(flatten)]
pub builder: PayloadBuilderArgs,
/// All debug related arguments with --debug prefix
#[command(flatten)]
pub debug: DebugArgs,
/// All database related arguments
#[command(flatten)]
pub db: DatabaseArgs,
/// All dev related arguments with --dev prefix
#[command(flatten)]
pub dev: DevArgs,
/// All pruning related arguments
#[command(flatten)]
pub pruning: PruningArgs,
/// Engine cli arguments
#[command(flatten, next_help_heading = "Engine")]
pub engine: EngineArgs,
/// All ERA related arguments with --era prefix
#[command(flatten, next_help_heading = "ERA")]
pub era: EraArgs,
/// Additional cli arguments
#[command(flatten, next_help_heading = "Extension")]
pub ext: Ext,
/// All enclave related arguments
#[command(flatten)]
pub enclave: EnclaveArgs,
}
impl<C: ChainSpecParser> NodeCommand<C> {
/// Parsers only the default CLI arguments
pub fn parse_args() -> Self {
Self::parse()
}
/// Parsers only the default [`NodeCommand`] arguments from the given iterator
pub fn try_parse_args_from<I, T>(itr: I) -> Result<Self, clap::error::Error>
where
I: IntoIterator<Item = T>,
T: Into<OsString> + Clone,
{
Self::try_parse_from(itr)
}
}
impl<C, Ext> NodeCommand<C, Ext>
where
C: ChainSpecParser,
C::ChainSpec: EthChainSpec + EthereumHardforks,
Ext: clap::Args + fmt::Debug,
{
/// Launches the node
///
/// This transforms the node command into a node config and launches the node using the given
/// launcher.
pub async fn execute<L>(self, ctx: CliContext, launcher: L) -> eyre::Result<()>
where
L: Launcher<C, Ext>,
{
tracing::info!(target: "reth::cli", version = ?version::version_metadata().short_version, "Starting reth");
let Self {
enclave,
datadir,
config,
chain,
metrics,
instance,
with_unused_ports,
network,
rpc,
txpool,
builder,
debug,
db,
dev,
pruning,
ext,
engine,
era,
} = self;
// set up node config
let mut node_config = NodeConfig {
enclave,
datadir,
config,
chain,
metrics,
instance,
network,
rpc,
txpool,
builder,
debug,
db,
dev,
pruning,
engine,
era,
};
let data_dir = node_config.datadir();
let db_path = data_dir.db();
tracing::info!(target: "reth::cli", path = ?db_path, "Opening database");
let database = Arc::new(init_db(db_path.clone(), self.db.database_args())?.with_metrics());
if with_unused_ports {
node_config = node_config.with_unused_ports();
}
let builder = NodeBuilder::new(node_config)
.with_database(database)
.with_launch_context(ctx.task_executor);
launcher.entrypoint(builder, ext).await
}
}
impl<C: ChainSpecParser, Ext: clap::Args + fmt::Debug> NodeCommand<C, Ext> {
/// Returns the underlying chain being used to run this command
pub fn chain_spec(&self) -> Option<&Arc<C::ChainSpec>> {
Some(&self.chain)
}
}
/// No Additional arguments
#[derive(Debug, Clone, Copy, Default, Args)]
#[non_exhaustive]
pub struct NoArgs;
#[cfg(test)]
mod tests {
use super::*;
use reth_discv4::DEFAULT_DISCOVERY_PORT;
use reth_ethereum_cli::chainspec::{EthereumChainSpecParser, SUPPORTED_CHAINS};
use std::{
net::{IpAddr, Ipv4Addr},
path::Path,
};
#[test]
fn parse_help_node_command() {
let err = NodeCommand::<EthereumChainSpecParser>::try_parse_args_from(["reth", "--help"])
.unwrap_err();
assert_eq!(err.kind(), clap::error::ErrorKind::DisplayHelp);
}
#[test]
fn parse_common_node_command_chain_args() {
for chain in SUPPORTED_CHAINS {
let args: NodeCommand<EthereumChainSpecParser> =
NodeCommand::parse_from(["reth", "--chain", chain]);
assert_eq!(args.chain.chain, chain.parse::<reth_chainspec::Chain>().unwrap());
}
}
#[test]
fn parse_discovery_addr() {
let cmd: NodeCommand<EthereumChainSpecParser> =
NodeCommand::try_parse_args_from(["reth", "--discovery.addr", "127.0.0.1"]).unwrap();
assert_eq!(cmd.network.discovery.addr, IpAddr::V4(Ipv4Addr::LOCALHOST));
}
#[test]
fn parse_addr() {
let cmd: NodeCommand<EthereumChainSpecParser> = NodeCommand::try_parse_args_from([
"reth",
"--discovery.addr",
"127.0.0.1",
"--addr",
"127.0.0.1",
])
.unwrap();
assert_eq!(cmd.network.discovery.addr, IpAddr::V4(Ipv4Addr::LOCALHOST));
assert_eq!(cmd.network.addr, IpAddr::V4(Ipv4Addr::LOCALHOST));
}
#[test]
fn parse_discovery_port() {
let cmd: NodeCommand<EthereumChainSpecParser> =
NodeCommand::try_parse_args_from(["reth", "--discovery.port", "300"]).unwrap();
assert_eq!(cmd.network.discovery.port, 300);
}
#[test]
fn parse_port() {
let cmd: NodeCommand<EthereumChainSpecParser> =
NodeCommand::try_parse_args_from(["reth", "--discovery.port", "300", "--port", "99"])
.unwrap();
assert_eq!(cmd.network.discovery.port, 300);
assert_eq!(cmd.network.port, 99);
}
#[test]
fn parse_metrics_port() {
let cmd: NodeCommand<EthereumChainSpecParser> =
NodeCommand::try_parse_args_from(["reth", "--metrics", "9001"]).unwrap();
assert_eq!(cmd.metrics, Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001)));
let cmd: NodeCommand<EthereumChainSpecParser> =
NodeCommand::try_parse_args_from(["reth", "--metrics", ":9001"]).unwrap();
assert_eq!(cmd.metrics, Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001)));
let cmd: NodeCommand<EthereumChainSpecParser> =
NodeCommand::try_parse_args_from(["reth", "--metrics", "localhost:9001"]).unwrap();
assert_eq!(cmd.metrics, Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001)));
}
#[test]
fn parse_config_path() {
let cmd: NodeCommand<EthereumChainSpecParser> =
NodeCommand::try_parse_args_from(["reth", "--config", "my/path/to/reth.toml"]).unwrap();
// always store reth.toml in the data dir, not the chain specific data dir
let data_dir = cmd.datadir.resolve_datadir(cmd.chain.chain);
let config_path = cmd.config.unwrap_or_else(|| data_dir.config());
assert_eq!(config_path, Path::new("my/path/to/reth.toml"));
let cmd: NodeCommand<EthereumChainSpecParser> =
NodeCommand::try_parse_args_from(["reth"]).unwrap();
// always store reth.toml in the data dir, not the chain specific data dir
let data_dir = cmd.datadir.resolve_datadir(cmd.chain.chain);
let config_path = cmd.config.clone().unwrap_or_else(|| data_dir.config());
let end = format!("{}/reth.toml", SUPPORTED_CHAINS[0]);
assert!(config_path.ends_with(end), "{:?}", cmd.config);
}
#[test]
fn parse_db_path() {
let cmd: NodeCommand<EthereumChainSpecParser> =
NodeCommand::try_parse_args_from(["reth"]).unwrap();
let data_dir = cmd.datadir.resolve_datadir(cmd.chain.chain);
let db_path = data_dir.db();
let end = format!("reth/{}/db", SUPPORTED_CHAINS[0]);
assert!(db_path.ends_with(end), "{:?}", cmd.config);
let cmd: NodeCommand<EthereumChainSpecParser> =
NodeCommand::try_parse_args_from(["reth", "--datadir", "my/custom/path"]).unwrap();
let data_dir = cmd.datadir.resolve_datadir(cmd.chain.chain);
let db_path = data_dir.db();
assert_eq!(db_path, Path::new("my/custom/path/db"));
}
#[test]
fn parse_instance() {
let mut cmd: NodeCommand<EthereumChainSpecParser> = NodeCommand::parse_from(["reth"]);
cmd.rpc.adjust_instance_ports(cmd.instance);
cmd.network.port = DEFAULT_DISCOVERY_PORT;
// check rpc port numbers
assert_eq!(cmd.rpc.auth_port, 8551);
assert_eq!(cmd.rpc.http_port, 8545);
assert_eq!(cmd.rpc.ws_port, 8546);
// check network listening port number
assert_eq!(cmd.network.port, 30303);
let mut cmd: NodeCommand<EthereumChainSpecParser> =
NodeCommand::parse_from(["reth", "--instance", "2"]);
cmd.rpc.adjust_instance_ports(cmd.instance);
cmd.network.port = DEFAULT_DISCOVERY_PORT + 2 - 1;
// check rpc port numbers
assert_eq!(cmd.rpc.auth_port, 8651);
assert_eq!(cmd.rpc.http_port, 8544);
assert_eq!(cmd.rpc.ws_port, 8548);
// check network listening port number
assert_eq!(cmd.network.port, 30304);
let mut cmd: NodeCommand<EthereumChainSpecParser> =
NodeCommand::parse_from(["reth", "--instance", "3"]);
cmd.rpc.adjust_instance_ports(cmd.instance);
cmd.network.port = DEFAULT_DISCOVERY_PORT + 3 - 1;
// check rpc port numbers
assert_eq!(cmd.rpc.auth_port, 8751);
assert_eq!(cmd.rpc.http_port, 8543);
assert_eq!(cmd.rpc.ws_port, 8550);
// check network listening port number
assert_eq!(cmd.network.port, 30305);
}
#[test]
fn parse_with_unused_ports() {
let cmd: NodeCommand<EthereumChainSpecParser> =
NodeCommand::parse_from(["reth", "--with-unused-ports"]);
assert!(cmd.with_unused_ports);
}
#[test]
fn with_unused_ports_conflicts_with_instance() {
let err = NodeCommand::<EthereumChainSpecParser>::try_parse_args_from([
"reth",
"--with-unused-ports",
"--instance",
"2",
])
.unwrap_err();
assert_eq!(err.kind(), clap::error::ErrorKind::ArgumentConflict);
}
#[test]
fn with_unused_ports_check_zero() {
let mut cmd: NodeCommand<EthereumChainSpecParser> = NodeCommand::parse_from(["reth"]);
cmd.rpc = cmd.rpc.with_unused_ports();
cmd.network = cmd.network.with_unused_ports();
// make sure the rpc ports are zero
assert_eq!(cmd.rpc.auth_port, 0);
assert_eq!(cmd.rpc.http_port, 0);
assert_eq!(cmd.rpc.ws_port, 0);
// make sure the network ports are zero
assert_eq!(cmd.network.port, 0);
assert_eq!(cmd.network.discovery.port, 0);
// make sure the ipc path is not the default
assert_ne!(cmd.rpc.ipcpath, String::from("/tmp/reth.ipc"));
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/lib.rs | crates/cli/commands/src/lib.rs | //! Commonly used reth CLI commands.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
pub mod common;
pub mod config_cmd;
pub mod db;
pub mod download;
pub mod dump_genesis;
pub mod export_era;
pub mod import;
pub mod import_core;
pub mod import_era;
pub mod init_cmd;
pub mod init_state;
pub mod launcher;
pub mod node;
pub mod p2p;
pub mod prune;
pub mod re_execute;
pub mod recover;
pub mod stage;
#[cfg(feature = "arbitrary")]
pub mod test_vectors;
pub use node::NodeCommand;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/dump_genesis.rs | crates/cli/commands/src/dump_genesis.rs | //! Command that dumps genesis block JSON configuration to stdout
use std::sync::Arc;
use clap::Parser;
use reth_chainspec::EthChainSpec;
use reth_cli::chainspec::ChainSpecParser;
/// Dumps genesis block JSON configuration to stdout
#[derive(Debug, Parser)]
pub struct DumpGenesisCommand<C: ChainSpecParser> {
/// The chain this node is running.
///
/// Possible values are either a built-in chain or the path to a chain specification file.
#[arg(
long,
value_name = "CHAIN_OR_PATH",
long_help = C::help_message(),
default_value = C::SUPPORTED_CHAINS[0],
value_parser = C::parser()
)]
chain: Arc<C::ChainSpec>,
}
impl<C: ChainSpecParser<ChainSpec: EthChainSpec>> DumpGenesisCommand<C> {
/// Execute the `dump-genesis` command
pub async fn execute(self) -> eyre::Result<()> {
println!("{}", serde_json::to_string_pretty(self.chain.genesis())?);
Ok(())
}
}
impl<C: ChainSpecParser> DumpGenesisCommand<C> {
/// Returns the underlying chain being used to run this command
pub fn chain_spec(&self) -> Option<&Arc<C::ChainSpec>> {
Some(&self.chain)
}
}
#[cfg(test)]
mod tests {
use super::*;
use reth_ethereum_cli::chainspec::{EthereumChainSpecParser, SUPPORTED_CHAINS};
#[test]
fn parse_dump_genesis_command_chain_args() {
for chain in SUPPORTED_CHAINS {
let args: DumpGenesisCommand<EthereumChainSpecParser> =
DumpGenesisCommand::parse_from(["reth", "--chain", chain]);
assert_eq!(
Ok(args.chain.chain),
chain.parse::<reth_chainspec::Chain>(),
"failed to parse chain {chain}"
);
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/download.rs | crates/cli/commands/src/download.rs | use crate::common::EnvironmentArgs;
use clap::Parser;
use eyre::Result;
use lz4::Decoder;
use reqwest::Client;
use reth_chainspec::{EthChainSpec, EthereumHardforks};
use reth_cli::chainspec::ChainSpecParser;
use reth_fs_util as fs;
use std::{
io::{self, Read, Write},
path::Path,
sync::Arc,
time::{Duration, Instant},
};
use tar::Archive;
use tokio::task;
use tracing::info;
use zstd::stream::read::Decoder as ZstdDecoder;
const BYTE_UNITS: [&str; 4] = ["B", "KB", "MB", "GB"];
const MERKLE_BASE_URL: &str = "https://downloads.merkle.io";
const EXTENSION_TAR_LZ4: &str = ".tar.lz4";
const EXTENSION_TAR_ZSTD: &str = ".tar.zst";
#[derive(Debug, Parser)]
pub struct DownloadCommand<C: ChainSpecParser> {
#[command(flatten)]
env: EnvironmentArgs<C>,
#[arg(
long,
short,
help = "Custom URL to download the snapshot from",
long_help = "Specify a snapshot URL or let the command propose a default one.\n\
\n\
Available snapshot sources:\n\
- https://www.merkle.io/snapshots (default, mainnet archive)\n\
- https://publicnode.com/snapshots (full nodes & testnets)\n\
\n\
If no URL is provided, the latest mainnet archive snapshot\n\
will be proposed for download from merkle.io"
)]
url: Option<String>,
}
impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> DownloadCommand<C> {
pub async fn execute<N>(self) -> Result<()> {
let data_dir = self.env.datadir.resolve_datadir(self.env.chain.chain());
fs::create_dir_all(&data_dir)?;
let url = match self.url {
Some(url) => url,
None => {
let url = get_latest_snapshot_url().await?;
info!(target: "reth::cli", "Using default snapshot URL: {}", url);
url
}
};
info!(target: "reth::cli",
chain = %self.env.chain.chain(),
dir = ?data_dir.data_dir(),
url = %url,
"Starting snapshot download and extraction"
);
stream_and_extract(&url, data_dir.data_dir()).await?;
info!(target: "reth::cli", "Snapshot downloaded and extracted successfully");
Ok(())
}
}
impl<C: ChainSpecParser> DownloadCommand<C> {
/// Returns the underlying chain being used to run this command
pub fn chain_spec(&self) -> Option<&Arc<C::ChainSpec>> {
Some(&self.env.chain)
}
}
// Monitor process status and display progress every 100ms
// to avoid overwhelming stdout
struct DownloadProgress {
downloaded: u64,
total_size: u64,
last_displayed: Instant,
}
impl DownloadProgress {
/// Creates new progress tracker with given total size
fn new(total_size: u64) -> Self {
Self { downloaded: 0, total_size, last_displayed: Instant::now() }
}
/// Converts bytes to human readable format (B, KB, MB, GB)
fn format_size(size: u64) -> String {
let mut size = size as f64;
let mut unit_index = 0;
while size >= 1024.0 && unit_index < BYTE_UNITS.len() - 1 {
size /= 1024.0;
unit_index += 1;
}
format!("{:.2} {}", size, BYTE_UNITS[unit_index])
}
/// Updates progress bar
fn update(&mut self, chunk_size: u64) -> Result<()> {
self.downloaded += chunk_size;
// Only update display at most 10 times per second for efficiency
if self.last_displayed.elapsed() >= Duration::from_millis(100) {
let formatted_downloaded = Self::format_size(self.downloaded);
let formatted_total = Self::format_size(self.total_size);
let progress = (self.downloaded as f64 / self.total_size as f64) * 100.0;
print!(
"\rDownloading and extracting... {progress:.2}% ({formatted_downloaded} / {formatted_total})",
);
io::stdout().flush()?;
self.last_displayed = Instant::now();
}
Ok(())
}
}
/// Adapter to track progress while reading
struct ProgressReader<R> {
reader: R,
progress: DownloadProgress,
}
impl<R: Read> ProgressReader<R> {
fn new(reader: R, total_size: u64) -> Self {
Self { reader, progress: DownloadProgress::new(total_size) }
}
}
impl<R: Read> Read for ProgressReader<R> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let bytes = self.reader.read(buf)?;
if bytes > 0 {
if let Err(e) = self.progress.update(bytes as u64) {
return Err(io::Error::other(e));
}
}
Ok(bytes)
}
}
/// Supported compression formats for snapshots
#[derive(Debug, Clone, Copy)]
enum CompressionFormat {
Lz4,
Zstd,
}
impl CompressionFormat {
/// Detect compression format from file extension
fn from_url(url: &str) -> Result<Self> {
if url.ends_with(EXTENSION_TAR_LZ4) {
Ok(Self::Lz4)
} else if url.ends_with(EXTENSION_TAR_ZSTD) {
Ok(Self::Zstd)
} else {
Err(eyre::eyre!("Unsupported file format. Expected .tar.lz4 or .tar.zst, got: {}", url))
}
}
}
/// Downloads and extracts a snapshot, blocking until finished.
fn blocking_download_and_extract(url: &str, target_dir: &Path) -> Result<()> {
let client = reqwest::blocking::Client::builder().build()?;
let response = client.get(url).send()?.error_for_status()?;
let total_size = response.content_length().ok_or_else(|| {
eyre::eyre!(
"Server did not provide Content-Length header. This is required for snapshot downloads"
)
})?;
let progress_reader = ProgressReader::new(response, total_size);
let format = CompressionFormat::from_url(url)?;
match format {
CompressionFormat::Lz4 => {
let decoder = Decoder::new(progress_reader)?;
Archive::new(decoder).unpack(target_dir)?;
}
CompressionFormat::Zstd => {
let decoder = ZstdDecoder::new(progress_reader)?;
Archive::new(decoder).unpack(target_dir)?;
}
}
info!(target: "reth::cli", "Extraction complete.");
Ok(())
}
async fn stream_and_extract(url: &str, target_dir: &Path) -> Result<()> {
let target_dir = target_dir.to_path_buf();
let url = url.to_string();
task::spawn_blocking(move || blocking_download_and_extract(&url, &target_dir)).await??;
Ok(())
}
// Builds default URL for latest mainnet archive snapshot
async fn get_latest_snapshot_url() -> Result<String> {
let latest_url = format!("{MERKLE_BASE_URL}/latest.txt");
let filename = Client::new()
.get(latest_url)
.send()
.await?
.error_for_status()?
.text()
.await?
.trim()
.to_string();
Ok(format!("{MERKLE_BASE_URL}/{filename}"))
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/import_era.rs | crates/cli/commands/src/import_era.rs | //! Command that initializes the node by importing a chain from ERA files.
use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs};
use alloy_chains::{ChainKind, NamedChain};
use clap::{Args, Parser};
use eyre::eyre;
use reqwest::{Client, Url};
use reth_chainspec::{EthChainSpec, EthereumHardforks};
use reth_cli::chainspec::ChainSpecParser;
use reth_era_downloader::{read_dir, EraClient, EraStream, EraStreamConfig};
use reth_era_utils as era;
use reth_etl::Collector;
use reth_fs_util as fs;
use reth_node_core::version::version_metadata;
use reth_provider::StaticFileProviderFactory;
use reth_static_file_types::StaticFileSegment;
use std::{path::PathBuf, sync::Arc};
use tracing::info;
/// Syncs ERA encoded blocks from a local or remote source.
#[derive(Debug, Parser)]
pub struct ImportEraCommand<C: ChainSpecParser> {
#[command(flatten)]
env: EnvironmentArgs<C>,
#[clap(flatten)]
import: ImportArgs,
}
#[derive(Debug, Args)]
#[group(required = false, multiple = false)]
pub struct ImportArgs {
/// The path to a directory for import.
///
/// The ERA1 files are read from the local directory parsing headers and bodies.
#[arg(long, value_name = "IMPORT_ERA_PATH", verbatim_doc_comment)]
path: Option<PathBuf>,
/// The URL to a remote host where the ERA1 files are hosted.
///
/// The ERA1 files are read from the remote host using HTTP GET requests parsing headers
/// and bodies.
#[arg(long, value_name = "IMPORT_ERA_URL", verbatim_doc_comment)]
url: Option<Url>,
}
trait TryFromChain {
fn try_to_url(&self) -> eyre::Result<Url>;
}
impl TryFromChain for ChainKind {
fn try_to_url(&self) -> eyre::Result<Url> {
Ok(match self {
ChainKind::Named(NamedChain::Mainnet) => {
Url::parse("https://era.ithaca.xyz/era1/index.html").expect("URL should be valid")
}
ChainKind::Named(NamedChain::Sepolia) => {
Url::parse("https://era.ithaca.xyz/sepolia-era1/index.html")
.expect("URL should be valid")
}
chain => return Err(eyre!("No known host for ERA files on chain {chain:?}")),
})
}
}
impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> ImportEraCommand<C> {
/// Execute `import-era` command
pub async fn execute<N>(self) -> eyre::Result<()>
where
N: CliNodeTypes<ChainSpec = C::ChainSpec>,
{
info!(target: "reth::cli", "reth {} starting", version_metadata().short_version);
let Environment { provider_factory, config, .. } = self.env.init::<N>(AccessRights::RW)?;
let mut hash_collector = Collector::new(config.stages.etl.file_size, config.stages.etl.dir);
let next_block = provider_factory
.static_file_provider()
.get_highest_static_file_block(StaticFileSegment::Headers)
.unwrap_or_default() +
1;
if let Some(path) = self.import.path {
let stream = read_dir(path, next_block)?;
era::import(stream, &provider_factory, &mut hash_collector)?;
} else {
let url = match self.import.url {
Some(url) => url,
None => self.env.chain.chain().kind().try_to_url()?,
};
let folder =
self.env.datadir.resolve_datadir(self.env.chain.chain()).data_dir().join("era");
fs::create_dir_all(&folder)?;
let config = EraStreamConfig::default().start_from(next_block);
let client = EraClient::new(Client::new(), url, folder);
let stream = EraStream::new(client, config);
era::import(stream, &provider_factory, &mut hash_collector)?;
}
Ok(())
}
}
impl<C: ChainSpecParser> ImportEraCommand<C> {
/// Returns the underlying chain being used to run this command
pub fn chain_spec(&self) -> Option<&Arc<C::ChainSpec>> {
Some(&self.env.chain)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/export_era.rs | crates/cli/commands/src/export_era.rs | //! Command exporting block data to convert them to ERA1 files.
use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs};
use clap::{Args, Parser};
use reth_chainspec::{EthChainSpec, EthereumHardforks};
use reth_cli::chainspec::ChainSpecParser;
use reth_era::execution_types::MAX_BLOCKS_PER_ERA1;
use reth_era_utils as era1;
use reth_provider::DatabaseProviderFactory;
use std::{path::PathBuf, sync::Arc};
use tracing::info;
// Default folder name for era1 export files
const ERA1_EXPORT_FOLDER_NAME: &str = "era1-export";
#[derive(Debug, Parser)]
pub struct ExportEraCommand<C: ChainSpecParser> {
#[command(flatten)]
env: EnvironmentArgs<C>,
#[clap(flatten)]
export: ExportArgs,
}
#[derive(Debug, Args)]
pub struct ExportArgs {
/// Optional first block number to export from the db.
/// It is by default 0.
#[arg(long, value_name = "first-block-number", verbatim_doc_comment)]
first_block_number: Option<u64>,
/// Optional last block number to export from the db.
/// It is by default 8191.
#[arg(long, value_name = "last-block-number", verbatim_doc_comment)]
last_block_number: Option<u64>,
/// The maximum number of blocks per file, it can help you to decrease the size of the files.
/// Must be less than or equal to 8192.
#[arg(long, value_name = "max-blocks-per-file", verbatim_doc_comment)]
max_blocks_per_file: Option<u64>,
/// The directory path where to export era1 files.
/// The block data are read from the database.
#[arg(long, value_name = "EXPORT_ERA1_PATH", verbatim_doc_comment)]
path: Option<PathBuf>,
}
impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> ExportEraCommand<C> {
/// Execute `export-era` command
pub async fn execute<N>(self) -> eyre::Result<()>
where
N: CliNodeTypes<ChainSpec = C::ChainSpec>,
{
let Environment { provider_factory, .. } = self.env.init::<N>(AccessRights::RO)?;
// Either specified path or default to `<data-dir>/<chain>/era1-export/`
let data_dir = match &self.export.path {
Some(path) => path.clone(),
None => self
.env
.datadir
.resolve_datadir(self.env.chain.chain())
.data_dir()
.join(ERA1_EXPORT_FOLDER_NAME),
};
let export_config = era1::ExportConfig {
network: self.env.chain.chain().to_string(),
first_block_number: self.export.first_block_number.unwrap_or(0),
last_block_number: self
.export
.last_block_number
.unwrap_or(MAX_BLOCKS_PER_ERA1 as u64 - 1),
max_blocks_per_file: self
.export
.max_blocks_per_file
.unwrap_or(MAX_BLOCKS_PER_ERA1 as u64),
dir: data_dir,
};
export_config.validate()?;
info!(
target: "reth::cli",
"Starting ERA1 block export: blocks {}-{} to {}",
export_config.first_block_number,
export_config.last_block_number,
export_config.dir.display()
);
// Only read access is needed for the database provider
let provider = provider_factory.database_provider_ro()?;
let exported_files = era1::export(&provider, &export_config)?;
info!(
target: "reth::cli",
"Successfully exported {} ERA1 files to {}",
exported_files.len(),
export_config.dir.display()
);
Ok(())
}
}
impl<C: ChainSpecParser> ExportEraCommand<C> {
/// Returns the underlying chain being used to run this command
pub fn chain_spec(&self) -> Option<&Arc<C::ChainSpec>> {
Some(&self.env.chain)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/config_cmd.rs | crates/cli/commands/src/config_cmd.rs | //! CLI command to show configs.
use clap::Parser;
use eyre::{bail, WrapErr};
use reth_config::Config;
use std::path::PathBuf;
/// `reth config` command
#[derive(Debug, Parser)]
pub struct Command {
/// The path to the configuration file to use.
#[arg(long, value_name = "FILE", verbatim_doc_comment)]
config: Option<PathBuf>,
/// Show the default config
#[arg(long, verbatim_doc_comment, conflicts_with = "config")]
default: bool,
}
impl Command {
/// Execute `config` command
pub async fn execute(&self) -> eyre::Result<()> {
let config = if self.default {
Config::default()
} else {
let path = self.config.clone().unwrap_or_default();
// Check if the file exists
if !path.exists() {
bail!("Config file does not exist: {}", path.display());
}
// Read the configuration file
Config::from_path(&path)
.wrap_err_with(|| format!("Could not load config file: {}", path.display()))?
};
println!("{}", toml::to_string_pretty(&config)?);
Ok(())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/import_core.rs | crates/cli/commands/src/import_core.rs | //! Core import functionality without CLI dependencies.
use alloy_primitives::B256;
use futures::StreamExt;
use reth_config::Config;
use reth_consensus::FullConsensus;
use reth_db_api::{tables, transaction::DbTx};
use reth_downloaders::{
bodies::bodies::BodiesDownloaderBuilder,
file_client::{ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE},
headers::reverse_headers::ReverseHeadersDownloaderBuilder,
};
use reth_evm::ConfigureEvm;
use reth_network_p2p::{
bodies::downloader::BodyDownloader,
headers::downloader::{HeaderDownloader, SyncTarget},
};
use reth_node_api::BlockTy;
use reth_node_events::node::NodeEvent;
use reth_provider::{
providers::ProviderNodeTypes, BlockNumReader, HeaderProvider, ProviderError, ProviderFactory,
StageCheckpointReader,
};
use reth_prune::PruneModes;
use reth_stages::{prelude::*, Pipeline, StageId, StageSet};
use reth_static_file::StaticFileProducer;
use std::{path::Path, sync::Arc};
use tokio::sync::watch;
use tracing::{debug, error, info};
/// Configuration for importing blocks from RLP files.
#[derive(Debug, Clone, Default)]
pub struct ImportConfig {
/// Disables stages that require state.
pub no_state: bool,
/// Chunk byte length to read from file.
pub chunk_len: Option<u64>,
}
/// Result of an import operation.
#[derive(Debug)]
pub struct ImportResult {
/// Total number of blocks decoded from the file.
pub total_decoded_blocks: usize,
/// Total number of transactions decoded from the file.
pub total_decoded_txns: usize,
/// Total number of blocks imported into the database.
pub total_imported_blocks: usize,
/// Total number of transactions imported into the database.
pub total_imported_txns: usize,
}
impl ImportResult {
/// Returns true if all blocks and transactions were imported successfully.
pub fn is_complete(&self) -> bool {
self.total_decoded_blocks == self.total_imported_blocks &&
self.total_decoded_txns == self.total_imported_txns
}
}
/// Imports blocks from an RLP-encoded file into the database.
///
/// This function reads RLP-encoded blocks from a file in chunks and imports them
/// using the pipeline infrastructure. It's designed to be used both from the CLI
/// and from test code.
pub async fn import_blocks_from_file<N>(
path: &Path,
import_config: ImportConfig,
provider_factory: ProviderFactory<N>,
config: &Config,
executor: impl ConfigureEvm<Primitives = N::Primitives> + 'static,
consensus: Arc<
impl FullConsensus<N::Primitives, Error = reth_consensus::ConsensusError> + 'static,
>,
) -> eyre::Result<ImportResult>
where
N: ProviderNodeTypes,
{
if import_config.no_state {
info!(target: "reth::import", "Disabled stages requiring state");
}
debug!(target: "reth::import",
chunk_byte_len=import_config.chunk_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE),
"Chunking chain import"
);
info!(target: "reth::import", "Consensus engine initialized");
// open file
let mut reader = ChunkedFileReader::new(path, import_config.chunk_len).await?;
let provider = provider_factory.provider()?;
let init_blocks = provider.tx_ref().entries::<tables::HeaderNumbers>()?;
let init_txns = provider.tx_ref().entries::<tables::TransactionHashNumbers>()?;
drop(provider);
let mut total_decoded_blocks = 0;
let mut total_decoded_txns = 0;
let mut sealed_header = provider_factory
.sealed_header(provider_factory.last_block_number()?)?
.expect("should have genesis");
while let Some(file_client) =
reader.next_chunk::<BlockTy<N>>(consensus.clone(), Some(sealed_header)).await?
{
// create a new FileClient from chunk read from file
info!(target: "reth::import",
"Importing chain file chunk"
);
let tip = file_client.tip().ok_or(eyre::eyre!("file client has no tip"))?;
info!(target: "reth::import", "Chain file chunk read");
total_decoded_blocks += file_client.headers_len();
total_decoded_txns += file_client.total_transactions();
let (mut pipeline, events) = build_import_pipeline_impl(
config,
provider_factory.clone(),
&consensus,
Arc::new(file_client),
StaticFileProducer::new(provider_factory.clone(), PruneModes::default()),
import_config.no_state,
executor.clone(),
)?;
// override the tip
pipeline.set_tip(tip);
debug!(target: "reth::import", ?tip, "Tip manually set");
let latest_block_number =
provider_factory.get_stage_checkpoint(StageId::Finish)?.map(|ch| ch.block_number);
tokio::spawn(reth_node_events::node::handle_events(None, latest_block_number, events));
// Run pipeline
info!(target: "reth::import", "Starting sync pipeline");
tokio::select! {
res = pipeline.run() => res?,
_ = tokio::signal::ctrl_c() => {
info!(target: "reth::import", "Import interrupted by user");
break;
},
}
sealed_header = provider_factory
.sealed_header(provider_factory.last_block_number()?)?
.expect("should have genesis");
}
let provider = provider_factory.provider()?;
let total_imported_blocks = provider.tx_ref().entries::<tables::HeaderNumbers>()? - init_blocks;
let total_imported_txns =
provider.tx_ref().entries::<tables::TransactionHashNumbers>()? - init_txns;
let result = ImportResult {
total_decoded_blocks,
total_decoded_txns,
total_imported_blocks,
total_imported_txns,
};
if !result.is_complete() {
error!(target: "reth::import",
total_decoded_blocks,
total_imported_blocks,
total_decoded_txns,
total_imported_txns,
"Chain was partially imported"
);
} else {
info!(target: "reth::import",
total_imported_blocks,
total_imported_txns,
"Chain was fully imported"
);
}
Ok(result)
}
/// Builds import pipeline.
///
/// If configured to execute, all stages will run. Otherwise, only stages that don't require state
/// will run.
pub fn build_import_pipeline_impl<N, C, E>(
config: &Config,
provider_factory: ProviderFactory<N>,
consensus: &Arc<C>,
file_client: Arc<FileClient<BlockTy<N>>>,
static_file_producer: StaticFileProducer<ProviderFactory<N>>,
disable_exec: bool,
evm_config: E,
) -> eyre::Result<(Pipeline<N>, impl futures::Stream<Item = NodeEvent<N::Primitives>>)>
where
N: ProviderNodeTypes,
C: FullConsensus<N::Primitives, Error = reth_consensus::ConsensusError> + 'static,
E: ConfigureEvm<Primitives = N::Primitives> + 'static,
{
if !file_client.has_canonical_blocks() {
eyre::bail!("unable to import non canonical blocks");
}
// Retrieve latest header found in the database.
let last_block_number = provider_factory.last_block_number()?;
let local_head = provider_factory
.sealed_header(last_block_number)?
.ok_or_else(|| ProviderError::HeaderNotFound(last_block_number.into()))?;
let mut header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers)
.build(file_client.clone(), consensus.clone())
.into_task();
// TODO: The pipeline should correctly configure the downloader on its own.
// Find the possibility to remove unnecessary pre-configuration.
header_downloader.update_local_head(local_head);
header_downloader.update_sync_target(SyncTarget::Tip(file_client.tip().unwrap()));
let mut body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies)
.build(file_client.clone(), consensus.clone(), provider_factory.clone())
.into_task();
// TODO: The pipeline should correctly configure the downloader on its own.
// Find the possibility to remove unnecessary pre-configuration.
body_downloader
.set_download_range(file_client.min_block().unwrap()..=file_client.max_block().unwrap())
.expect("failed to set download range");
let (tip_tx, tip_rx) = watch::channel(B256::ZERO);
let max_block = file_client.max_block().unwrap_or(0);
let pipeline = Pipeline::builder()
.with_tip_sender(tip_tx)
// we want to sync all blocks the file client provides or 0 if empty
.with_max_block(max_block)
.with_fail_on_unwind(true)
.add_stages(
DefaultStages::new(
provider_factory.clone(),
tip_rx,
consensus.clone(),
header_downloader,
body_downloader,
evm_config,
config.stages.clone(),
PruneModes::default(),
None,
)
.builder()
.disable_all_if(&StageId::STATE_REQUIRED, || disable_exec),
)
.build(provider_factory, static_file_producer);
let events = pipeline.events().map(Into::into);
Ok((pipeline, events))
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/common.rs | crates/cli/commands/src/common.rs | //! Contains common `reth` arguments
use alloy_primitives::B256;
use clap::Parser;
use reth_chainspec::EthChainSpec;
use reth_cli::chainspec::ChainSpecParser;
use reth_config::{config::EtlConfig, Config};
use reth_consensus::{noop::NoopConsensus, ConsensusError, FullConsensus};
use reth_db::{init_db, open_db_read_only, DatabaseEnv};
use reth_db_common::init::init_genesis;
use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader};
use reth_eth_wire::NetPrimitivesFor;
use reth_evm::{noop::NoopEvmConfig, ConfigureEvm};
use reth_network::NetworkEventListenerProvider;
use reth_node_api::FullNodeTypesAdapter;
use reth_node_builder::{
Node, NodeComponents, NodeComponentsBuilder, NodeTypes, NodeTypesWithDBAdapter,
};
use reth_node_core::{
args::{DatabaseArgs, DatadirArgs},
dirs::{ChainPath, DataDirPath},
};
use reth_provider::{
providers::{BlockchainProvider, NodeTypesForProvider, StaticFileProvider},
ProviderFactory, StaticFileProviderFactory,
};
use reth_stages::{sets::DefaultStages, Pipeline, PipelineTarget};
use reth_static_file::StaticFileProducer;
use std::{path::PathBuf, sync::Arc};
use tokio::sync::watch;
use tracing::{debug, info, warn};
/// Struct to hold config and datadir paths
#[derive(Debug, Parser)]
pub struct EnvironmentArgs<C: ChainSpecParser> {
/// Parameters for datadir configuration
#[command(flatten)]
pub datadir: DatadirArgs,
/// The path to the configuration file to use.
#[arg(long, value_name = "FILE")]
pub config: Option<PathBuf>,
/// The chain this node is running.
///
/// Possible values are either a built-in chain or the path to a chain specification file.
#[arg(
long,
value_name = "CHAIN_OR_PATH",
long_help = C::help_message(),
default_value = C::SUPPORTED_CHAINS[0],
value_parser = C::parser(),
global = true
)]
pub chain: Arc<C::ChainSpec>,
/// All database related arguments
#[command(flatten)]
pub db: DatabaseArgs,
}
impl<C: ChainSpecParser> EnvironmentArgs<C> {
/// Initializes environment according to [`AccessRights`] and returns an instance of
/// [`Environment`].
pub fn init<N: CliNodeTypes>(&self, access: AccessRights) -> eyre::Result<Environment<N>>
where
C: ChainSpecParser<ChainSpec = N::ChainSpec>,
{
let data_dir = self.datadir.clone().resolve_datadir(self.chain.chain());
let db_path = data_dir.db();
let sf_path = data_dir.static_files();
if access.is_read_write() {
reth_fs_util::create_dir_all(&db_path)?;
reth_fs_util::create_dir_all(&sf_path)?;
}
let config_path = self.config.clone().unwrap_or_else(|| data_dir.config());
let mut config = Config::from_path(config_path)
.inspect_err(
|err| warn!(target: "reth::cli", %err, "Failed to load config file, using default"),
)
.unwrap_or_default();
// Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to
if config.stages.etl.dir.is_none() {
config.stages.etl.dir = Some(EtlConfig::from_datadir(data_dir.data_dir()));
}
if config.stages.era.folder.is_none() {
config.stages.era = config.stages.era.with_datadir(data_dir.data_dir());
}
info!(target: "reth::cli", ?db_path, ?sf_path, "Opening storage");
let (db, sfp) = match access {
AccessRights::RW => (
Arc::new(init_db(db_path, self.db.database_args())?),
StaticFileProvider::read_write(sf_path)?,
),
AccessRights::RO => (
Arc::new(open_db_read_only(&db_path, self.db.database_args())?),
StaticFileProvider::read_only(sf_path, false)?,
),
};
let provider_factory = self.create_provider_factory(&config, db, sfp)?;
if access.is_read_write() {
debug!(target: "reth::cli", chain=%self.chain.chain(), genesis=?self.chain.genesis_hash(), "Initializing genesis");
init_genesis(&provider_factory)?;
}
Ok(Environment { config, provider_factory, data_dir })
}
/// Returns a [`ProviderFactory`] after executing consistency checks.
///
/// If it's a read-write environment and an issue is found, it will attempt to heal (including a
/// pipeline unwind). Otherwise, it will print out a warning, advising the user to restart the
/// node to heal.
fn create_provider_factory<N: CliNodeTypes>(
&self,
config: &Config,
db: Arc<DatabaseEnv>,
static_file_provider: StaticFileProvider<N::Primitives>,
) -> eyre::Result<ProviderFactory<NodeTypesWithDBAdapter<N, Arc<DatabaseEnv>>>>
where
C: ChainSpecParser<ChainSpec = N::ChainSpec>,
{
let has_receipt_pruning = config.prune.as_ref().is_some_and(|a| a.has_receipts_pruning());
let prune_modes =
config.prune.as_ref().map(|prune| prune.segments.clone()).unwrap_or_default();
let factory = ProviderFactory::<NodeTypesWithDBAdapter<N, Arc<DatabaseEnv>>>::new(
db,
self.chain.clone(),
static_file_provider,
)
.with_prune_modes(prune_modes.clone());
// Check for consistency between database and static files.
if let Some(unwind_target) = factory
.static_file_provider()
.check_consistency(&factory.provider()?, has_receipt_pruning)?
{
if factory.db_ref().is_read_only()? {
warn!(target: "reth::cli", ?unwind_target, "Inconsistent storage. Restart node to heal.");
return Ok(factory)
}
// Highly unlikely to happen, and given its destructive nature, it's better to panic
// instead.
assert_ne!(
unwind_target,
PipelineTarget::Unwind(0),
"A static file <> database inconsistency was found that would trigger an unwind to block 0"
);
info!(target: "reth::cli", unwind_target = %unwind_target, "Executing an unwind after a failed storage consistency check.");
let (_tip_tx, tip_rx) = watch::channel(B256::ZERO);
// Builds and executes an unwind-only pipeline
let mut pipeline = Pipeline::<NodeTypesWithDBAdapter<N, Arc<DatabaseEnv>>>::builder()
.add_stages(DefaultStages::new(
factory.clone(),
tip_rx,
Arc::new(NoopConsensus::default()),
NoopHeaderDownloader::default(),
NoopBodiesDownloader::default(),
NoopEvmConfig::<N::Evm>::default(),
config.stages.clone(),
prune_modes.clone(),
None,
))
.build(factory.clone(), StaticFileProducer::new(factory.clone(), prune_modes));
// Move all applicable data from database to static files.
pipeline.move_to_static_files()?;
pipeline.unwind(unwind_target.unwind_target().expect("should exist"), None)?;
}
Ok(factory)
}
}
/// Environment built from [`EnvironmentArgs`].
#[derive(Debug)]
pub struct Environment<N: NodeTypes> {
/// Configuration for reth node
pub config: Config,
/// Provider factory.
pub provider_factory: ProviderFactory<NodeTypesWithDBAdapter<N, Arc<DatabaseEnv>>>,
/// Datadir path.
pub data_dir: ChainPath<DataDirPath>,
}
/// Environment access rights.
#[derive(Debug, Copy, Clone)]
pub enum AccessRights {
/// Read-write access
RW,
/// Read-only access
RO,
}
impl AccessRights {
/// Returns `true` if it requires read-write access to the environment.
pub const fn is_read_write(&self) -> bool {
matches!(self, Self::RW)
}
}
/// Helper alias to satisfy `FullNodeTypes` bound on [`Node`] trait generic.
type FullTypesAdapter<T> = FullNodeTypesAdapter<
T,
Arc<DatabaseEnv>,
BlockchainProvider<NodeTypesWithDBAdapter<T, Arc<DatabaseEnv>>>,
>;
/// Trait for block headers that can be modified through CLI operations.
pub trait CliHeader {
fn set_number(&mut self, number: u64);
}
impl CliHeader for alloy_consensus::Header {
fn set_number(&mut self, number: u64) {
self.number = number;
}
}
/// Helper trait with a common set of requirements for the
/// [`NodeTypes`] in CLI.
pub trait CliNodeTypes: NodeTypesForProvider {
type Evm: ConfigureEvm<Primitives = Self::Primitives>;
type NetworkPrimitives: NetPrimitivesFor<Self::Primitives>;
}
impl<N> CliNodeTypes for N
where
N: Node<FullTypesAdapter<Self>> + NodeTypesForProvider,
{
type Evm = <<N::ComponentsBuilder as NodeComponentsBuilder<FullTypesAdapter<Self>>>::Components as NodeComponents<FullTypesAdapter<Self>>>::Evm;
type NetworkPrimitives = <<<N::ComponentsBuilder as NodeComponentsBuilder<FullTypesAdapter<Self>>>::Components as NodeComponents<FullTypesAdapter<Self>>>::Network as NetworkEventListenerProvider>::Primitives;
}
/// Helper trait aggregating components required for the CLI.
pub trait CliNodeComponents<N: CliNodeTypes>: Send + Sync + 'static {
/// Evm to use.
type Evm: ConfigureEvm<Primitives = N::Primitives> + 'static;
/// Consensus implementation.
type Consensus: FullConsensus<N::Primitives, Error = ConsensusError> + Clone + 'static;
/// Returns the configured EVM.
fn evm_config(&self) -> &Self::Evm;
/// Returns the consensus implementation.
fn consensus(&self) -> &Self::Consensus;
}
impl<N: CliNodeTypes, E, C> CliNodeComponents<N> for (E, C)
where
E: ConfigureEvm<Primitives = N::Primitives> + 'static,
C: FullConsensus<N::Primitives, Error = ConsensusError> + Clone + 'static,
{
type Evm = E;
type Consensus = C;
fn evm_config(&self) -> &Self::Evm {
&self.0
}
fn consensus(&self) -> &Self::Consensus {
&self.1
}
}
/// Helper trait alias for an [`FnOnce`] producing [`CliNodeComponents`].
pub trait CliComponentsBuilder<N: CliNodeTypes>:
FnOnce(Arc<N::ChainSpec>) -> Self::Components + Send + Sync + 'static
{
type Components: CliNodeComponents<N>;
}
impl<N: CliNodeTypes, F, Comp> CliComponentsBuilder<N> for F
where
F: FnOnce(Arc<N::ChainSpec>) -> Comp + Send + Sync + 'static,
Comp: CliNodeComponents<N>,
{
type Components = Comp;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/launcher.rs | crates/cli/commands/src/launcher.rs | use futures::Future;
use reth_cli::chainspec::ChainSpecParser;
use reth_db::DatabaseEnv;
use reth_node_builder::{NodeBuilder, WithLaunchContext};
use std::{fmt, sync::Arc};
/// A trait for launching a reth node with custom configuration strategies.
///
/// This trait allows defining node configuration through various object types rather than just
/// functions. By implementing this trait on your own structures, you can:
///
/// - Create flexible configurations that connect necessary components without creating separate
/// closures
/// - Take advantage of decomposition to break complex configurations into a series of methods
/// - Encapsulate configuration logic in dedicated types with their own state and behavior
/// - Reuse configuration patterns across different parts of your application
pub trait Launcher<C, Ext>
where
C: ChainSpecParser,
Ext: clap::Args + fmt::Debug,
{
/// Entry point for launching a node with custom configuration.
///
/// Consumes `self` to use pre-configured state, takes a builder and arguments,
/// and returns an async future.
///
/// # Arguments
///
/// * `builder` - Node builder with launch context
/// * `builder_args` - Extension arguments for configuration
fn entrypoint(
self,
builder: WithLaunchContext<NodeBuilder<Arc<DatabaseEnv>, C::ChainSpec>>,
builder_args: Ext,
) -> impl Future<Output = eyre::Result<()>>;
}
/// A function-based adapter implementation of the [`Launcher`] trait.
///
/// This struct adapts existing closures to work with the new [`Launcher`] trait,
/// maintaining backward compatibility with current node implementations while
/// enabling the transition to the more flexible trait-based approach.
pub struct FnLauncher<F> {
/// The function to execute when launching the node
func: F,
}
impl<F> FnLauncher<F> {
/// Creates a new function launcher adapter.
///
/// Type parameters `C` and `Ext` help the compiler infer correct types
/// since they're not stored in the struct itself.
///
/// # Arguments
///
/// * `func` - Function that configures and launches a node
pub fn new<C, Ext>(func: F) -> Self
where
C: ChainSpecParser,
F: AsyncFnOnce(
WithLaunchContext<NodeBuilder<Arc<DatabaseEnv>, C::ChainSpec>>,
Ext,
) -> eyre::Result<()>,
{
Self { func }
}
}
impl<C, Ext, F> Launcher<C, Ext> for FnLauncher<F>
where
C: ChainSpecParser,
Ext: clap::Args + fmt::Debug,
F: AsyncFnOnce(
WithLaunchContext<NodeBuilder<Arc<DatabaseEnv>, C::ChainSpec>>,
Ext,
) -> eyre::Result<()>,
{
fn entrypoint(
self,
builder: WithLaunchContext<NodeBuilder<Arc<DatabaseEnv>, C::ChainSpec>>,
builder_args: Ext,
) -> impl Future<Output = eyre::Result<()>> {
(self.func)(builder, builder_args)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/import.rs | crates/cli/commands/src/import.rs | //! Command that initializes the node by importing a chain from a file.
use crate::{
common::{AccessRights, CliNodeComponents, CliNodeTypes, Environment, EnvironmentArgs},
import_core::{import_blocks_from_file, ImportConfig},
};
use clap::Parser;
use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks};
use reth_cli::chainspec::ChainSpecParser;
use reth_node_core::version::version_metadata;
use std::{path::PathBuf, sync::Arc};
use tracing::info;
pub use crate::import_core::build_import_pipeline_impl as build_import_pipeline;
/// Syncs RLP encoded blocks from a file or files.
#[derive(Debug, Parser)]
pub struct ImportCommand<C: ChainSpecParser> {
#[command(flatten)]
env: EnvironmentArgs<C>,
/// Disables stages that require state.
#[arg(long, verbatim_doc_comment)]
no_state: bool,
/// Chunk byte length to read from file.
#[arg(long, value_name = "CHUNK_LEN", verbatim_doc_comment)]
chunk_len: Option<u64>,
/// The path(s) to block file(s) for import.
///
/// The online stages (headers and bodies) are replaced by a file import, after which the
/// remaining stages are executed. Multiple files will be imported sequentially.
#[arg(value_name = "IMPORT_PATH", required = true, num_args = 1.., verbatim_doc_comment)]
paths: Vec<PathBuf>,
}
impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> ImportCommand<C> {
/// Execute `import` command
pub async fn execute<N, Comp>(
self,
components: impl FnOnce(Arc<N::ChainSpec>) -> Comp,
) -> eyre::Result<()>
where
N: CliNodeTypes<ChainSpec = C::ChainSpec>,
Comp: CliNodeComponents<N>,
{
info!(target: "reth::cli", "reth {} starting", version_metadata().short_version);
let Environment { provider_factory, config, .. } = self.env.init::<N>(AccessRights::RW)?;
let components = components(provider_factory.chain_spec());
info!(target: "reth::cli", "Starting import of {} file(s)", self.paths.len());
let import_config = ImportConfig { no_state: self.no_state, chunk_len: self.chunk_len };
let executor = components.evm_config().clone();
let consensus = Arc::new(components.consensus().clone());
let mut total_imported_blocks = 0;
let mut total_imported_txns = 0;
let mut total_decoded_blocks = 0;
let mut total_decoded_txns = 0;
// Import each file sequentially
for (index, path) in self.paths.iter().enumerate() {
info!(target: "reth::cli", "Importing file {} of {}: {}", index + 1, self.paths.len(), path.display());
let result = import_blocks_from_file(
path,
import_config.clone(),
provider_factory.clone(),
&config,
executor.clone(),
consensus.clone(),
)
.await?;
total_imported_blocks += result.total_imported_blocks;
total_imported_txns += result.total_imported_txns;
total_decoded_blocks += result.total_decoded_blocks;
total_decoded_txns += result.total_decoded_txns;
if !result.is_complete() {
return Err(eyre::eyre!(
"Chain was partially imported from file: {}. Imported {}/{} blocks, {}/{} transactions",
path.display(),
result.total_imported_blocks,
result.total_decoded_blocks,
result.total_imported_txns,
result.total_decoded_txns
));
}
info!(target: "reth::cli",
"Successfully imported file {}: {} blocks, {} transactions",
path.display(), result.total_imported_blocks, result.total_imported_txns);
}
info!(target: "reth::cli",
"All files imported successfully. Total: {}/{} blocks, {}/{} transactions",
total_imported_blocks, total_decoded_blocks, total_imported_txns, total_decoded_txns);
Ok(())
}
}
impl<C: ChainSpecParser> ImportCommand<C> {
/// Returns the underlying chain being used to run this command
pub fn chain_spec(&self) -> Option<&Arc<C::ChainSpec>> {
Some(&self.env.chain)
}
}
#[cfg(test)]
mod tests {
use super::*;
use reth_ethereum_cli::chainspec::{EthereumChainSpecParser, SUPPORTED_CHAINS};
#[test]
fn parse_common_import_command_chain_args() {
for chain in SUPPORTED_CHAINS {
let args: ImportCommand<EthereumChainSpecParser> =
ImportCommand::parse_from(["reth", "--chain", chain, "."]);
assert_eq!(
Ok(args.env.chain.chain),
chain.parse::<reth_chainspec::Chain>(),
"failed to parse chain {chain}"
);
}
}
#[test]
fn parse_import_command_with_multiple_paths() {
let args: ImportCommand<EthereumChainSpecParser> =
ImportCommand::parse_from(["reth", "file1.rlp", "file2.rlp", "file3.rlp"]);
assert_eq!(args.paths.len(), 3);
assert_eq!(args.paths[0], PathBuf::from("file1.rlp"));
assert_eq!(args.paths[1], PathBuf::from("file2.rlp"));
assert_eq!(args.paths[2], PathBuf::from("file3.rlp"));
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/init_cmd.rs | crates/cli/commands/src/init_cmd.rs | //! Command that initializes the node from a genesis file.
use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs};
use clap::Parser;
use reth_chainspec::{EthChainSpec, EthereumHardforks};
use reth_cli::chainspec::ChainSpecParser;
use reth_provider::BlockHashReader;
use std::sync::Arc;
use tracing::info;
/// Initializes the database with the genesis block.
#[derive(Debug, Parser)]
pub struct InitCommand<C: ChainSpecParser> {
#[command(flatten)]
env: EnvironmentArgs<C>,
}
impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> InitCommand<C> {
/// Execute the `init` command
pub async fn execute<N: CliNodeTypes<ChainSpec = C::ChainSpec>>(self) -> eyre::Result<()> {
info!(target: "reth::cli", "reth init starting");
let Environment { provider_factory, .. } = self.env.init::<N>(AccessRights::RW)?;
let hash = provider_factory
.block_hash(0)?
.ok_or_else(|| eyre::eyre!("Genesis hash not found."))?;
info!(target: "reth::cli", hash = ?hash, "Genesis block written");
Ok(())
}
}
impl<C: ChainSpecParser> InitCommand<C> {
/// Returns the underlying chain being used to run this command
pub fn chain_spec(&self) -> Option<&Arc<C::ChainSpec>> {
Some(&self.env.chain)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/re_execute.rs | crates/cli/commands/src/re_execute.rs | //! Re-execute blocks from database in parallel.
use crate::common::{
AccessRights, CliComponentsBuilder, CliNodeComponents, CliNodeTypes, Environment,
EnvironmentArgs,
};
use alloy_consensus::{BlockHeader, TxReceipt};
use clap::Parser;
use eyre::WrapErr;
use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks};
use reth_cli::chainspec::ChainSpecParser;
use reth_consensus::FullConsensus;
use reth_evm::{execute::Executor, ConfigureEvm};
use reth_primitives_traits::{format_gas_throughput, BlockBody, GotExpected, SignedTransaction};
use reth_provider::{
BlockNumReader, BlockReader, ChainSpecProvider, DatabaseProviderFactory, ReceiptProvider,
StaticFileProviderFactory, TransactionVariant,
};
use reth_revm::database::StateProviderDatabase;
use reth_stages::stages::calculate_gas_used_from_headers;
use std::{
sync::Arc,
time::{Duration, Instant},
};
use tokio::{sync::mpsc, task::JoinSet};
use tracing::*;
/// `reth re-execute` command
///
/// Re-execute blocks in parallel to verify historical sync correctness.
#[derive(Debug, Parser)]
pub struct Command<C: ChainSpecParser> {
#[command(flatten)]
env: EnvironmentArgs<C>,
/// The height to start at.
#[arg(long, default_value = "1")]
from: u64,
/// The height to end at. Defaults to the latest block.
#[arg(long)]
to: Option<u64>,
/// Number of tasks to run in parallel
#[arg(long, default_value = "10")]
num_tasks: u64,
}
impl<C: ChainSpecParser> Command<C> {
/// Returns the underlying chain being used to run this command
pub fn chain_spec(&self) -> Option<&Arc<C::ChainSpec>> {
Some(&self.env.chain)
}
}
impl<C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>> Command<C> {
/// Execute `re-execute` command
pub async fn execute<N>(self, components: impl CliComponentsBuilder<N>) -> eyre::Result<()>
where
N: CliNodeTypes<ChainSpec = C::ChainSpec>,
{
let Environment { provider_factory, .. } = self.env.init::<N>(AccessRights::RO)?;
let provider = provider_factory.database_provider_ro()?;
let components = components(provider_factory.chain_spec());
let min_block = self.from;
let max_block = self.to.unwrap_or(provider.best_block_number()?);
let total_blocks = max_block - min_block;
let total_gas = calculate_gas_used_from_headers(
&provider_factory.static_file_provider(),
min_block..=max_block,
)?;
let blocks_per_task = total_blocks / self.num_tasks;
let db_at = {
let provider_factory = provider_factory.clone();
move |block_number: u64| {
StateProviderDatabase(
provider_factory.history_by_block_number(block_number).unwrap(),
)
}
};
let (stats_tx, mut stats_rx) = mpsc::unbounded_channel();
let mut tasks = JoinSet::new();
for i in 0..self.num_tasks {
let start_block = min_block + i * blocks_per_task;
let end_block =
if i == self.num_tasks - 1 { max_block } else { start_block + blocks_per_task };
// Spawn thread executing blocks
let provider_factory = provider_factory.clone();
let evm_config = components.evm_config().clone();
let consensus = components.consensus().clone();
let db_at = db_at.clone();
let stats_tx = stats_tx.clone();
tasks.spawn_blocking(move || {
let mut executor = evm_config.batch_executor(db_at(start_block - 1));
for block in start_block..end_block {
let block = provider_factory
.recovered_block(block.into(), TransactionVariant::NoHash)?
.unwrap();
let result = executor.execute_one(&block)?;
if let Err(err) = consensus
.validate_block_post_execution(&block, &result)
.wrap_err_with(|| format!("Failed to validate block {}", block.number()))
{
let correct_receipts =
provider_factory.receipts_by_block(block.number().into())?.unwrap();
for (i, (receipt, correct_receipt)) in
result.receipts.iter().zip(correct_receipts.iter()).enumerate()
{
if receipt != correct_receipt {
let tx_hash = block.body().transactions()[i].tx_hash();
error!(
?receipt,
?correct_receipt,
index = i,
?tx_hash,
"Invalid receipt"
);
let expected_gas_used = correct_receipt.cumulative_gas_used() -
if i == 0 {
0
} else {
correct_receipts[i - 1].cumulative_gas_used()
};
let got_gas_used = receipt.cumulative_gas_used() -
if i == 0 {
0
} else {
result.receipts[i - 1].cumulative_gas_used()
};
if got_gas_used != expected_gas_used {
let mismatch = GotExpected {
expected: expected_gas_used,
got: got_gas_used,
};
error!(number=?block.number(), ?mismatch, "Gas usage mismatch");
return Err(err);
}
} else {
continue;
}
}
return Err(err);
}
let _ = stats_tx.send(block.gas_used());
// Reset DB once in a while to avoid OOM
if executor.size_hint() > 1_000_000 {
executor = evm_config.batch_executor(db_at(block.number()));
}
}
eyre::Ok(())
});
}
let instant = Instant::now();
let mut total_executed_blocks = 0;
let mut total_executed_gas = 0;
let mut last_logged_gas = 0;
let mut last_logged_blocks = 0;
let mut last_logged_time = Instant::now();
let mut interval = tokio::time::interval(Duration::from_secs(10));
loop {
tokio::select! {
Some(gas_used) = stats_rx.recv() => {
total_executed_blocks += 1;
total_executed_gas += gas_used;
}
result = tasks.join_next() => {
if let Some(result) = result {
if matches!(result, Err(_) | Ok(Err(_))) {
error!(?result);
return Err(eyre::eyre!("Re-execution failed: {result:?}"));
}
} else {
break;
}
}
_ = interval.tick() => {
let blocks_executed = total_executed_blocks - last_logged_blocks;
let gas_executed = total_executed_gas - last_logged_gas;
if blocks_executed > 0 {
let progress = 100.0 * total_executed_gas as f64 / total_gas as f64;
info!(
throughput=?format_gas_throughput(gas_executed, last_logged_time.elapsed()),
progress=format!("{progress:.2}%"),
"Executed {blocks_executed} blocks"
);
}
last_logged_blocks = total_executed_blocks;
last_logged_gas = total_executed_gas;
last_logged_time = Instant::now();
}
}
}
info!(
start_block = min_block,
end_block = max_block,
throughput=?format_gas_throughput(total_executed_gas, instant.elapsed()),
"Re-executed successfully"
);
Ok(())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/recover/mod.rs | crates/cli/commands/src/recover/mod.rs | //! `reth recover` command.
use crate::common::CliNodeTypes;
use clap::{Parser, Subcommand};
use reth_chainspec::{EthChainSpec, EthereumHardforks};
use reth_cli::chainspec::ChainSpecParser;
use reth_cli_runner::CliContext;
use std::sync::Arc;
mod storage_tries;
/// `reth recover` command
#[derive(Debug, Parser)]
pub struct Command<C: ChainSpecParser> {
#[command(subcommand)]
command: Subcommands<C>,
}
/// `reth recover` subcommands
#[derive(Subcommand, Debug)]
pub enum Subcommands<C: ChainSpecParser> {
/// Recover the node by deleting dangling storage tries.
StorageTries(storage_tries::Command<C>),
}
impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> Command<C> {
/// Execute `recover` command
pub async fn execute<N: CliNodeTypes<ChainSpec = C::ChainSpec>>(
self,
ctx: CliContext,
) -> eyre::Result<()> {
match self.command {
Subcommands::StorageTries(command) => command.execute::<N>(ctx).await,
}
}
}
impl<C: ChainSpecParser> Command<C> {
/// Returns the underlying chain being used to run this command
pub fn chain_spec(&self) -> Option<&Arc<C::ChainSpec>> {
match &self.command {
Subcommands::StorageTries(command) => command.chain_spec(),
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/recover/storage_tries.rs | crates/cli/commands/src/recover/storage_tries.rs | use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs};
use alloy_consensus::BlockHeader;
use clap::Parser;
use reth_chainspec::{EthChainSpec, EthereumHardforks};
use reth_cli::chainspec::ChainSpecParser;
use reth_cli_runner::CliContext;
use reth_db_api::{
cursor::{DbCursorRO, DbDupCursorRW},
tables,
transaction::DbTx,
};
use reth_provider::{BlockNumReader, HeaderProvider, ProviderError};
use reth_trie::StateRoot;
use reth_trie_db::DatabaseStateRoot;
use std::sync::Arc;
use tracing::*;
/// `reth recover storage-tries` command
#[derive(Debug, Parser)]
pub struct Command<C: ChainSpecParser> {
#[command(flatten)]
env: EnvironmentArgs<C>,
}
impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> Command<C> {
/// Execute `storage-tries` recovery command
pub async fn execute<N: CliNodeTypes<ChainSpec = C::ChainSpec>>(
self,
_ctx: CliContext,
) -> eyre::Result<()> {
let Environment { provider_factory, .. } = self.env.init::<N>(AccessRights::RW)?;
let mut provider = provider_factory.provider_rw()?;
let best_block = provider.best_block_number()?;
let best_header = provider
.sealed_header(best_block)?
.ok_or_else(|| ProviderError::HeaderNotFound(best_block.into()))?;
let mut deleted_tries = 0;
let tx_mut = provider.tx_mut();
let mut hashed_account_cursor = tx_mut.cursor_read::<tables::HashedAccounts>()?;
let mut storage_trie_cursor = tx_mut.cursor_dup_read::<tables::StoragesTrie>()?;
let mut entry = storage_trie_cursor.first()?;
info!(target: "reth::cli", "Starting pruning of storage tries");
while let Some((hashed_address, _)) = entry {
if hashed_account_cursor.seek_exact(hashed_address)?.is_none() {
deleted_tries += 1;
storage_trie_cursor.delete_current_duplicates()?;
}
entry = storage_trie_cursor.next()?;
}
let state_root = StateRoot::from_tx(tx_mut).root()?;
if state_root != best_header.state_root() {
eyre::bail!(
"Recovery failed. Incorrect state root. Expected: {:?}. Received: {:?}",
best_header.state_root(),
state_root
);
}
provider.commit()?;
info!(target: "reth::cli", deleted = deleted_tries, "Finished recovery");
Ok(())
}
}
impl<C: ChainSpecParser> Command<C> {
/// Returns the underlying chain being used to run this command
pub fn chain_spec(&self) -> Option<&Arc<C::ChainSpec>> {
Some(&self.env.chain)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/db/stats.rs | crates/cli/commands/src/db/stats.rs | use crate::{common::CliNodeTypes, db::checksum::ChecksumViewer};
use clap::Parser;
use comfy_table::{Cell, Row, Table as ComfyTable};
use eyre::WrapErr;
use human_bytes::human_bytes;
use itertools::Itertools;
use reth_chainspec::EthereumHardforks;
use reth_db::{mdbx, static_file::iter_static_files, DatabaseEnv};
use reth_db_api::{database::Database, TableViewer, Tables};
use reth_db_common::DbTool;
use reth_fs_util as fs;
use reth_node_builder::{NodePrimitives, NodeTypesWithDB, NodeTypesWithDBAdapter};
use reth_node_core::dirs::{ChainPath, DataDirPath};
use reth_provider::providers::{ProviderNodeTypes, StaticFileProvider};
use reth_static_file_types::SegmentRangeInclusive;
use std::{sync::Arc, time::Duration};
#[derive(Parser, Debug)]
/// The arguments for the `reth db stats` command
pub struct Command {
/// Show only the total size for static files.
#[arg(long, default_value_t = false)]
detailed_sizes: bool,
/// Show detailed information per static file segment.
#[arg(long, default_value_t = false)]
detailed_segments: bool,
/// Show a checksum of each table in the database.
///
/// WARNING: this option will take a long time to run, as it needs to traverse and hash the
/// entire database.
///
/// For individual table checksums, use the `reth db checksum` command.
#[arg(long, default_value_t = false)]
checksum: bool,
}
impl Command {
/// Execute `db stats` command
pub fn execute<N: CliNodeTypes<ChainSpec: EthereumHardforks>>(
self,
data_dir: ChainPath<DataDirPath>,
tool: &DbTool<NodeTypesWithDBAdapter<N, Arc<DatabaseEnv>>>,
) -> eyre::Result<()> {
if self.checksum {
let checksum_report = self.checksum_report(tool)?;
println!("{checksum_report}");
println!("\n");
}
let static_files_stats_table = self.static_files_stats_table::<N::Primitives>(data_dir)?;
println!("{static_files_stats_table}");
println!("\n");
let db_stats_table = self.db_stats_table(tool)?;
println!("{db_stats_table}");
Ok(())
}
fn db_stats_table<N: NodeTypesWithDB<DB = Arc<DatabaseEnv>>>(
&self,
tool: &DbTool<N>,
) -> eyre::Result<ComfyTable> {
let mut table = ComfyTable::new();
table.load_preset(comfy_table::presets::ASCII_MARKDOWN);
table.set_header([
"Table Name",
"# Entries",
"Branch Pages",
"Leaf Pages",
"Overflow Pages",
"Total Size",
]);
tool.provider_factory.db_ref().view(|tx| {
let mut db_tables = Tables::ALL.iter().map(|table| table.name()).collect::<Vec<_>>();
db_tables.sort();
let mut total_size = 0;
for db_table in db_tables {
let table_db = tx.inner.open_db(Some(db_table)).wrap_err("Could not open db.")?;
let stats = tx
.inner
.db_stat(&table_db)
.wrap_err(format!("Could not find table: {db_table}"))?;
// Defaults to 16KB right now but we should
// re-evaluate depending on the DB we end up using
// (e.g. REDB does not have these options as configurable intentionally)
let page_size = stats.page_size() as usize;
let leaf_pages = stats.leaf_pages();
let branch_pages = stats.branch_pages();
let overflow_pages = stats.overflow_pages();
let num_pages = leaf_pages + branch_pages + overflow_pages;
let table_size = page_size * num_pages;
total_size += table_size;
let mut row = Row::new();
row.add_cell(Cell::new(db_table))
.add_cell(Cell::new(stats.entries()))
.add_cell(Cell::new(branch_pages))
.add_cell(Cell::new(leaf_pages))
.add_cell(Cell::new(overflow_pages))
.add_cell(Cell::new(human_bytes(table_size as f64)));
table.add_row(row);
}
let max_widths = table.column_max_content_widths();
let mut separator = Row::new();
for width in max_widths {
separator.add_cell(Cell::new("-".repeat(width as usize)));
}
table.add_row(separator);
let mut row = Row::new();
row.add_cell(Cell::new("Tables"))
.add_cell(Cell::new(""))
.add_cell(Cell::new(""))
.add_cell(Cell::new(""))
.add_cell(Cell::new(""))
.add_cell(Cell::new(human_bytes(total_size as f64)));
table.add_row(row);
let freelist = tx.inner.env().freelist()?;
let pagesize = tx.inner.db_stat(&mdbx::Database::freelist_db())?.page_size() as usize;
let freelist_size = freelist * pagesize;
let mut row = Row::new();
row.add_cell(Cell::new("Freelist"))
.add_cell(Cell::new(freelist))
.add_cell(Cell::new(""))
.add_cell(Cell::new(""))
.add_cell(Cell::new(""))
.add_cell(Cell::new(human_bytes(freelist_size as f64)));
table.add_row(row);
Ok::<(), eyre::Report>(())
})??;
Ok(table)
}
fn static_files_stats_table<N: NodePrimitives>(
&self,
data_dir: ChainPath<DataDirPath>,
) -> eyre::Result<ComfyTable> {
let mut table = ComfyTable::new();
table.load_preset(comfy_table::presets::ASCII_MARKDOWN);
if self.detailed_sizes {
table.set_header([
"Segment",
"Block Range",
"Transaction Range",
"Shape (columns x rows)",
"Data Size",
"Index Size",
"Offsets Size",
"Config Size",
"Total Size",
]);
} else {
table.set_header([
"Segment",
"Block Range",
"Transaction Range",
"Shape (columns x rows)",
"Size",
]);
}
let static_files = iter_static_files(&data_dir.static_files())?;
let static_file_provider =
StaticFileProvider::<N>::read_only(data_dir.static_files(), false)?;
let mut total_data_size = 0;
let mut total_index_size = 0;
let mut total_offsets_size = 0;
let mut total_config_size = 0;
for (segment, ranges) in static_files.into_iter().sorted_by_key(|(segment, _)| *segment) {
let (
mut segment_columns,
mut segment_rows,
mut segment_data_size,
mut segment_index_size,
mut segment_offsets_size,
mut segment_config_size,
) = (0, 0, 0, 0, 0, 0);
for (block_range, tx_range) in &ranges {
let fixed_block_range = static_file_provider.find_fixed_range(block_range.start());
let jar_provider = static_file_provider
.get_segment_provider(segment, || Some(fixed_block_range), None)?
.ok_or_else(|| {
eyre::eyre!("Failed to get segment provider for segment: {}", segment)
})?;
let columns = jar_provider.columns();
let rows = jar_provider.rows();
let data_size = fs::metadata(jar_provider.data_path())
.map(|metadata| metadata.len())
.unwrap_or_default();
let index_size = fs::metadata(jar_provider.index_path())
.map(|metadata| metadata.len())
.unwrap_or_default();
let offsets_size = fs::metadata(jar_provider.offsets_path())
.map(|metadata| metadata.len())
.unwrap_or_default();
let config_size = fs::metadata(jar_provider.config_path())
.map(|metadata| metadata.len())
.unwrap_or_default();
if self.detailed_segments {
let mut row = Row::new();
row.add_cell(Cell::new(segment))
.add_cell(Cell::new(format!("{block_range}")))
.add_cell(Cell::new(
tx_range.map_or("N/A".to_string(), |tx_range| format!("{tx_range}")),
))
.add_cell(Cell::new(format!("{columns} x {rows}")));
if self.detailed_sizes {
row.add_cell(Cell::new(human_bytes(data_size as f64)))
.add_cell(Cell::new(human_bytes(index_size as f64)))
.add_cell(Cell::new(human_bytes(offsets_size as f64)))
.add_cell(Cell::new(human_bytes(config_size as f64)));
}
row.add_cell(Cell::new(human_bytes(
(data_size + index_size + offsets_size + config_size) as f64,
)));
table.add_row(row);
} else {
if segment_columns > 0 {
assert_eq!(segment_columns, columns);
} else {
segment_columns = columns;
}
segment_rows += rows;
segment_data_size += data_size;
segment_index_size += index_size;
segment_offsets_size += offsets_size;
segment_config_size += config_size;
}
total_data_size += data_size;
total_index_size += index_size;
total_offsets_size += offsets_size;
total_config_size += config_size;
// Manually drop provider, otherwise removal from cache will deadlock.
drop(jar_provider);
// Removes from cache, since if we have many files, it may hit ulimit limits
static_file_provider.remove_cached_provider(segment, fixed_block_range.end());
}
if !self.detailed_segments {
let first_ranges = ranges.first().expect("not empty list of ranges");
let last_ranges = ranges.last().expect("not empty list of ranges");
let block_range =
SegmentRangeInclusive::new(first_ranges.0.start(), last_ranges.0.end());
// Transaction ranges can be empty, so we need to find the first and last which are
// not.
let tx_range = {
let start = ranges
.iter()
.find_map(|(_, tx_range)| tx_range.map(|r| r.start()))
.unwrap_or_default();
let end =
ranges.iter().rev().find_map(|(_, tx_range)| tx_range.map(|r| r.end()));
end.map(|end| SegmentRangeInclusive::new(start, end))
};
let mut row = Row::new();
row.add_cell(Cell::new(segment))
.add_cell(Cell::new(format!("{block_range}")))
.add_cell(Cell::new(
tx_range.map_or("N/A".to_string(), |tx_range| format!("{tx_range}")),
))
.add_cell(Cell::new(format!("{segment_columns} x {segment_rows}")));
if self.detailed_sizes {
row.add_cell(Cell::new(human_bytes(segment_data_size as f64)))
.add_cell(Cell::new(human_bytes(segment_index_size as f64)))
.add_cell(Cell::new(human_bytes(segment_offsets_size as f64)))
.add_cell(Cell::new(human_bytes(segment_config_size as f64)));
}
row.add_cell(Cell::new(human_bytes(
(segment_data_size +
segment_index_size +
segment_offsets_size +
segment_config_size) as f64,
)));
table.add_row(row);
}
}
let max_widths = table.column_max_content_widths();
let mut separator = Row::new();
for width in max_widths {
separator.add_cell(Cell::new("-".repeat(width as usize)));
}
table.add_row(separator);
let mut row = Row::new();
row.add_cell(Cell::new("Total"))
.add_cell(Cell::new(""))
.add_cell(Cell::new(""))
.add_cell(Cell::new(""));
if self.detailed_sizes {
row.add_cell(Cell::new(human_bytes(total_data_size as f64)))
.add_cell(Cell::new(human_bytes(total_index_size as f64)))
.add_cell(Cell::new(human_bytes(total_offsets_size as f64)))
.add_cell(Cell::new(human_bytes(total_config_size as f64)));
}
row.add_cell(Cell::new(human_bytes(
(total_data_size + total_index_size + total_offsets_size + total_config_size) as f64,
)));
table.add_row(row);
Ok(table)
}
fn checksum_report<N: ProviderNodeTypes>(&self, tool: &DbTool<N>) -> eyre::Result<ComfyTable> {
let mut table = ComfyTable::new();
table.load_preset(comfy_table::presets::ASCII_MARKDOWN);
table.set_header(vec![Cell::new("Table"), Cell::new("Checksum"), Cell::new("Elapsed")]);
let db_tables = Tables::ALL;
let mut total_elapsed = Duration::default();
for &db_table in db_tables {
let (checksum, elapsed) = ChecksumViewer::new(tool).view_rt(db_table).unwrap();
// increment duration for final report
total_elapsed += elapsed;
// add rows containing checksums to the table
let mut row = Row::new();
row.add_cell(Cell::new(db_table));
row.add_cell(Cell::new(format!("{checksum:x}")));
row.add_cell(Cell::new(format!("{elapsed:?}")));
table.add_row(row);
}
// add a separator for the final report
let max_widths = table.column_max_content_widths();
let mut separator = Row::new();
for width in max_widths {
separator.add_cell(Cell::new("-".repeat(width as usize)));
}
table.add_row(separator);
// add the final report
let mut row = Row::new();
row.add_cell(Cell::new("Total elapsed"));
row.add_cell(Cell::new(""));
row.add_cell(Cell::new(format!("{total_elapsed:?}")));
table.add_row(row);
Ok(table)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/db/diff.rs | crates/cli/commands/src/db/diff.rs | use clap::Parser;
use reth_db::{open_db_read_only, tables_to_generic, DatabaseEnv};
use reth_db_api::{
cursor::DbCursorRO, database::Database, table::Table, transaction::DbTx, Tables,
};
use reth_db_common::DbTool;
use reth_node_builder::{NodeTypes, NodeTypesWithDBAdapter};
use reth_node_core::{
args::DatabaseArgs,
dirs::{DataDirPath, PlatformPath},
};
use std::{
collections::BTreeMap,
fmt::Debug,
fs::{self, File},
hash::Hash,
io::Write,
path::{Path, PathBuf},
sync::Arc,
};
use tracing::{info, warn};
#[derive(Parser, Debug)]
/// The arguments for the `reth db diff` command
pub struct Command {
/// The path to the data dir for all reth files and subdirectories.
#[arg(long, verbatim_doc_comment)]
secondary_datadir: PlatformPath<DataDirPath>,
/// Arguments for the second database
#[command(flatten)]
second_db: DatabaseArgs,
/// The table name to diff. If not specified, all tables are diffed.
#[arg(long, verbatim_doc_comment)]
table: Option<Tables>,
/// The output directory for the diff report.
#[arg(long, verbatim_doc_comment)]
output: PlatformPath<PathBuf>,
}
impl Command {
/// Execute the `db diff` command.
///
/// This first opens the `db/` folder from the secondary datadir, where the second database is
/// opened read-only.
///
/// The tool will then iterate through all key-value pairs for the primary and secondary
/// databases. The value for each key will be compared with its corresponding value in the
/// other database. If the values are different, a discrepancy will be recorded in-memory. If
/// one key is present in one database but not the other, this will be recorded as an "extra
/// element" for that database.
///
/// The discrepancies and extra elements, along with a brief summary of the diff results are
/// then written to a file in the output directory.
pub fn execute<T: NodeTypes>(
self,
tool: &DbTool<NodeTypesWithDBAdapter<T, Arc<DatabaseEnv>>>,
) -> eyre::Result<()> {
warn!("Make sure the node is not running when running `reth db diff`!");
// open second db
let second_db_path: PathBuf = self.secondary_datadir.join("db").into();
let second_db = open_db_read_only(&second_db_path, self.second_db.database_args())?;
let tables = match &self.table {
Some(table) => std::slice::from_ref(table),
None => Tables::ALL,
};
for table in tables {
let mut primary_tx = tool.provider_factory.db_ref().tx()?;
let mut secondary_tx = second_db.tx()?;
// disable long read transaction safety, since this will run for a while and it's
// expected that the node is not running
primary_tx.disable_long_read_transaction_safety();
secondary_tx.disable_long_read_transaction_safety();
let output_dir = self.output.clone();
tables_to_generic!(table, |Table| find_diffs::<Table>(
primary_tx,
secondary_tx,
output_dir
))?;
}
Ok(())
}
}
/// Find diffs for a table, then analyzing the result
fn find_diffs<T: Table>(
primary_tx: impl DbTx,
secondary_tx: impl DbTx,
output_dir: impl AsRef<Path>,
) -> eyre::Result<()>
where
T::Key: Hash,
T::Value: PartialEq,
{
let table = T::NAME;
info!("Analyzing table {table}...");
let result = find_diffs_advanced::<T>(&primary_tx, &secondary_tx)?;
info!("Done analyzing table {table}!");
// Pretty info summary header: newline then header
info!("");
info!("Diff results for {table}:");
// analyze the result and print some stats
let discrepancies = result.discrepancies.len();
let extra_elements = result.extra_elements.len();
if discrepancies == 0 && extra_elements == 0 {
info!("No discrepancies or extra elements found in table {table}");
return Ok(());
}
// create directory and open file
fs::create_dir_all(output_dir.as_ref())?;
let file_name = format!("{table}.txt");
let mut file = File::create(output_dir.as_ref().join(file_name.clone()))?;
// Make a pretty summary header for the table
writeln!(file, "Diff results for {table}")?;
if discrepancies > 0 {
// write to file
writeln!(file, "Found {discrepancies} discrepancies in table {table}")?;
// also print to info
info!("Found {discrepancies} discrepancies in table {table}");
} else {
// write to file
writeln!(file, "No discrepancies found in table {table}")?;
// also print to info
info!("No discrepancies found in table {table}");
}
if extra_elements > 0 {
// write to file
writeln!(file, "Found {extra_elements} extra elements in table {table}")?;
// also print to info
info!("Found {extra_elements} extra elements in table {table}");
} else {
writeln!(file, "No extra elements found in table {table}")?;
// also print to info
info!("No extra elements found in table {table}");
}
info!("Writing diff results for {table} to {file_name}...");
if discrepancies > 0 {
writeln!(file, "Discrepancies:")?;
}
for discrepancy in result.discrepancies.values() {
writeln!(file, "{discrepancy:#?}")?;
}
if extra_elements > 0 {
writeln!(file, "Extra elements:")?;
}
for extra_element in result.extra_elements.values() {
writeln!(file, "{extra_element:#?}")?;
}
let full_file_name = output_dir.as_ref().join(file_name);
info!("Done writing diff results for {table} to {}", full_file_name.display());
Ok(())
}
/// This diff algorithm is slightly different, it will walk _each_ table, cross-checking for the
/// element in the other table.
fn find_diffs_advanced<T: Table>(
primary_tx: &impl DbTx,
secondary_tx: &impl DbTx,
) -> eyre::Result<TableDiffResult<T>>
where
T::Value: PartialEq,
T::Key: Hash,
{
// initialize the zipped walker
let mut primary_zip_cursor =
primary_tx.cursor_read::<T>().expect("Was not able to obtain a cursor.");
let primary_walker = primary_zip_cursor.walk(None)?;
let mut secondary_zip_cursor =
secondary_tx.cursor_read::<T>().expect("Was not able to obtain a cursor.");
let secondary_walker = secondary_zip_cursor.walk(None)?;
let zipped_cursor = primary_walker.zip(secondary_walker);
// initialize the cursors for seeking when we are cross checking elements
let mut primary_cursor =
primary_tx.cursor_read::<T>().expect("Was not able to obtain a cursor.");
let mut secondary_cursor =
secondary_tx.cursor_read::<T>().expect("Was not able to obtain a cursor.");
let mut result = TableDiffResult::<T>::default();
// this loop will walk both tables, cross-checking for the element in the other table.
// it basically just loops through both tables at the same time. if the keys are different, it
// will check each key in the other table. if the keys are the same, it will compare the
// values
for (primary_entry, secondary_entry) in zipped_cursor {
let (primary_key, primary_value) = primary_entry?;
let (secondary_key, secondary_value) = secondary_entry?;
if primary_key != secondary_key {
// if the keys are different, we need to check if the key is in the other table
let crossed_secondary =
secondary_cursor.seek_exact(primary_key.clone())?.map(|(_, value)| value);
result.try_push_discrepancy(
primary_key.clone(),
Some(primary_value),
crossed_secondary,
);
// now do the same for the primary table
let crossed_primary =
primary_cursor.seek_exact(secondary_key.clone())?.map(|(_, value)| value);
result.try_push_discrepancy(
secondary_key.clone(),
crossed_primary,
Some(secondary_value),
);
} else {
// the keys are the same, so we need to compare the values
result.try_push_discrepancy(primary_key, Some(primary_value), Some(secondary_value));
}
}
Ok(result)
}
/// Includes a table element between two databases with the same key, but different values
#[derive(Debug)]
struct TableDiffElement<T: Table> {
/// The key for the element
key: T::Key,
/// The element from the first table
#[expect(dead_code)]
first: T::Value,
/// The element from the second table
#[expect(dead_code)]
second: T::Value,
}
/// The diff result for an entire table. If the tables had the same number of elements, there will
/// be no extra elements.
struct TableDiffResult<T: Table>
where
T::Key: Hash,
{
/// All elements of the database that are different
discrepancies: BTreeMap<T::Key, TableDiffElement<T>>,
/// Any extra elements, and the table they are in
extra_elements: BTreeMap<T::Key, ExtraTableElement<T>>,
}
impl<T> Default for TableDiffResult<T>
where
T: Table,
T::Key: Hash,
{
fn default() -> Self {
Self { discrepancies: BTreeMap::default(), extra_elements: BTreeMap::default() }
}
}
impl<T: Table> TableDiffResult<T>
where
T::Key: Hash,
{
/// Push a diff result into the discrepancies set.
fn push_discrepancy(&mut self, discrepancy: TableDiffElement<T>) {
self.discrepancies.insert(discrepancy.key.clone(), discrepancy);
}
/// Push an extra element into the extra elements set.
fn push_extra_element(&mut self, element: ExtraTableElement<T>) {
self.extra_elements.insert(element.key().clone(), element);
}
}
impl<T> TableDiffResult<T>
where
T: Table,
T::Key: Hash,
T::Value: PartialEq,
{
/// Try to push a diff result into the discrepancy set, only pushing if the given elements are
/// different, and the discrepancy does not exist anywhere already.
fn try_push_discrepancy(
&mut self,
key: T::Key,
first: Option<T::Value>,
second: Option<T::Value>,
) {
// do not bother comparing if the key is already in the discrepancies map
if self.discrepancies.contains_key(&key) {
return
}
// do not bother comparing if the key is already in the extra elements map
if self.extra_elements.contains_key(&key) {
return
}
match (first, second) {
(Some(first), Some(second)) => {
if first != second {
self.push_discrepancy(TableDiffElement { key, first, second });
}
}
(Some(first), None) => {
self.push_extra_element(ExtraTableElement::First { key, value: first });
}
(None, Some(second)) => {
self.push_extra_element(ExtraTableElement::Second { key, value: second });
}
(None, None) => {}
}
}
}
/// A single extra element from a table
#[derive(Debug)]
enum ExtraTableElement<T: Table> {
/// The extra element that is in the first table
#[expect(dead_code)]
First { key: T::Key, value: T::Value },
/// The extra element that is in the second table
#[expect(dead_code)]
Second { key: T::Key, value: T::Value },
}
impl<T: Table> ExtraTableElement<T> {
/// Return the key for the extra element
const fn key(&self) -> &T::Key {
match self {
Self::First { key, .. } | Self::Second { key, .. } => key,
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/db/list.rs | crates/cli/commands/src/db/list.rs | use super::tui::DbListTUI;
use alloy_primitives::hex;
use clap::Parser;
use eyre::WrapErr;
use reth_chainspec::EthereumHardforks;
use reth_db::DatabaseEnv;
use reth_db_api::{database::Database, table::Table, RawValue, TableViewer, Tables};
use reth_db_common::{DbTool, ListFilter};
use reth_node_builder::{NodeTypes, NodeTypesWithDBAdapter};
use std::{cell::RefCell, sync::Arc};
use tracing::error;
#[derive(Parser, Debug)]
/// The arguments for the `reth db list` command
pub struct Command {
/// The table name
table: Tables,
/// Skip first N entries
#[arg(long, short, default_value_t = 0)]
skip: usize,
/// Reverse the order of the entries. If enabled last table entries are read.
#[arg(long, short, default_value_t = false)]
reverse: bool,
/// How many items to take from the walker
#[arg(long, short, default_value_t = 5)]
len: usize,
/// Search parameter for both keys and values. Prefix it with `0x` to search for binary data,
/// and text otherwise.
///
/// ATTENTION! For compressed tables (`Transactions` and `Receipts`), there might be
/// missing results since the search uses the raw uncompressed value from the database.
#[arg(long)]
search: Option<String>,
/// Minimum size of row in bytes
#[arg(long, default_value_t = 0)]
min_row_size: usize,
/// Minimum size of key in bytes
#[arg(long, default_value_t = 0)]
min_key_size: usize,
/// Minimum size of value in bytes
#[arg(long, default_value_t = 0)]
min_value_size: usize,
/// Returns the number of rows found.
#[arg(long, short)]
count: bool,
/// Dump as JSON instead of using TUI.
#[arg(long, short)]
json: bool,
/// Output bytes instead of human-readable decoded value
#[arg(long)]
raw: bool,
}
impl Command {
/// Execute `db list` command
pub fn execute<N: NodeTypes<ChainSpec: EthereumHardforks>>(
self,
tool: &DbTool<NodeTypesWithDBAdapter<N, Arc<DatabaseEnv>>>,
) -> eyre::Result<()> {
self.table.view(&ListTableViewer { tool, args: &self })
}
/// Generate [`ListFilter`] from command.
pub fn list_filter(&self) -> ListFilter {
let search = self
.search
.as_ref()
.map(|search| {
if let Some(search) = search.strip_prefix("0x") {
return hex::decode(search).unwrap()
}
search.as_bytes().to_vec()
})
.unwrap_or_default();
ListFilter {
skip: self.skip,
len: self.len,
search,
min_row_size: self.min_row_size,
min_key_size: self.min_key_size,
min_value_size: self.min_value_size,
reverse: self.reverse,
only_count: self.count,
}
}
}
struct ListTableViewer<'a, N: NodeTypes> {
tool: &'a DbTool<NodeTypesWithDBAdapter<N, Arc<DatabaseEnv>>>,
args: &'a Command,
}
impl<N: NodeTypes> TableViewer<()> for ListTableViewer<'_, N> {
type Error = eyre::Report;
fn view<T: Table>(&self) -> Result<(), Self::Error> {
self.tool.provider_factory.db_ref().view(|tx| {
let table_db = tx.inner.open_db(Some(self.args.table.name())).wrap_err("Could not open db.")?;
let stats = tx.inner.db_stat(&table_db).wrap_err(format!("Could not find table: {}", stringify!($table)))?;
let total_entries = stats.entries();
let final_entry_idx = total_entries.saturating_sub(1);
if self.args.skip > final_entry_idx {
error!(
target: "reth::cli",
"Start index {start} is greater than the final entry index ({final_entry_idx}) in the table {table}",
start = self.args.skip,
final_entry_idx = final_entry_idx,
table = self.args.table.name()
);
return Ok(())
}
let list_filter = self.args.list_filter();
if self.args.json || self.args.count {
let (list, count) = self.tool.list::<T>(&list_filter)?;
if self.args.count {
println!("{count} entries found.")
} else if self.args.raw {
let list = list.into_iter().map(|row| (row.0, RawValue::new(row.1).into_value())).collect::<Vec<_>>();
println!("{}", serde_json::to_string_pretty(&list)?);
} else {
println!("{}", serde_json::to_string_pretty(&list)?);
}
Ok(())
} else {
let list_filter = RefCell::new(list_filter);
DbListTUI::<_, T>::new(|skip, len| {
list_filter.borrow_mut().update_page(skip, len);
self.tool.list::<T>(&list_filter.borrow()).unwrap().0
}, self.args.skip, self.args.len, total_entries, self.args.raw).run()
}
})??;
Ok(())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/db/clear.rs | crates/cli/commands/src/db/clear.rs | use clap::{Parser, Subcommand};
use reth_db::static_file::iter_static_files;
use reth_db_api::{
database::Database,
table::Table,
transaction::{DbTx, DbTxMut},
TableViewer, Tables,
};
use reth_node_builder::NodeTypesWithDB;
use reth_provider::{ProviderFactory, StaticFileProviderFactory};
use reth_static_file_types::StaticFileSegment;
/// The arguments for the `reth db clear` command
#[derive(Parser, Debug)]
pub struct Command {
#[command(subcommand)]
subcommand: Subcommands,
}
impl Command {
/// Execute `db clear` command
pub fn execute<N: NodeTypesWithDB>(
self,
provider_factory: ProviderFactory<N>,
) -> eyre::Result<()> {
match self.subcommand {
Subcommands::Mdbx { table } => {
table.view(&ClearViewer { db: provider_factory.db_ref() })?
}
Subcommands::StaticFile { segment } => {
let static_file_provider = provider_factory.static_file_provider();
let static_files = iter_static_files(static_file_provider.directory())?;
if let Some(segment_static_files) = static_files.get(&segment) {
for (block_range, _) in segment_static_files {
static_file_provider.delete_jar(segment, block_range.start())?;
}
}
}
}
Ok(())
}
}
#[derive(Subcommand, Debug)]
enum Subcommands {
/// Deletes all database table entries
Mdbx { table: Tables },
/// Deletes all static file segment entries
StaticFile { segment: StaticFileSegment },
}
struct ClearViewer<'a, DB: Database> {
db: &'a DB,
}
impl<DB: Database> TableViewer<()> for ClearViewer<'_, DB> {
type Error = eyre::Report;
fn view<T: Table>(&self) -> Result<(), Self::Error> {
let tx = self.db.tx_mut()?;
tx.clear::<T>()?;
tx.commit()?;
Ok(())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/db/checksum.rs | crates/cli/commands/src/db/checksum.rs | use crate::{
common::CliNodeTypes,
db::get::{maybe_json_value_parser, table_key},
};
use alloy_primitives::map::foldhash::fast::FixedState;
use clap::Parser;
use reth_chainspec::EthereumHardforks;
use reth_db::DatabaseEnv;
use reth_db_api::{
cursor::DbCursorRO, table::Table, transaction::DbTx, RawKey, RawTable, RawValue, TableViewer,
Tables,
};
use reth_db_common::DbTool;
use reth_node_builder::{NodeTypesWithDB, NodeTypesWithDBAdapter};
use reth_provider::{providers::ProviderNodeTypes, DBProvider};
use std::{
hash::{BuildHasher, Hasher},
sync::Arc,
time::{Duration, Instant},
};
use tracing::{info, warn};
#[derive(Parser, Debug)]
/// The arguments for the `reth db checksum` command
pub struct Command {
/// The table name
table: Tables,
/// The start of the range to checksum.
#[arg(long, value_parser = maybe_json_value_parser)]
start_key: Option<String>,
/// The end of the range to checksum.
#[arg(long, value_parser = maybe_json_value_parser)]
end_key: Option<String>,
/// The maximum number of records that are queried and used to compute the
/// checksum.
#[arg(long)]
limit: Option<usize>,
}
impl Command {
/// Execute `db checksum` command
pub fn execute<N: CliNodeTypes<ChainSpec: EthereumHardforks>>(
self,
tool: &DbTool<NodeTypesWithDBAdapter<N, Arc<DatabaseEnv>>>,
) -> eyre::Result<()> {
warn!("This command should be run without the node running!");
self.table.view(&ChecksumViewer {
tool,
start_key: self.start_key,
end_key: self.end_key,
limit: self.limit,
})?;
Ok(())
}
}
pub(crate) struct ChecksumViewer<'a, N: NodeTypesWithDB> {
tool: &'a DbTool<N>,
start_key: Option<String>,
end_key: Option<String>,
limit: Option<usize>,
}
impl<N: NodeTypesWithDB> ChecksumViewer<'_, N> {
pub(crate) const fn new(tool: &'_ DbTool<N>) -> ChecksumViewer<'_, N> {
ChecksumViewer { tool, start_key: None, end_key: None, limit: None }
}
}
impl<N: ProviderNodeTypes> TableViewer<(u64, Duration)> for ChecksumViewer<'_, N> {
type Error = eyre::Report;
fn view<T: Table>(&self) -> Result<(u64, Duration), Self::Error> {
let provider =
self.tool.provider_factory.provider()?.disable_long_read_transaction_safety();
let tx = provider.tx_ref();
info!(
"Start computing checksum, start={:?}, end={:?}, limit={:?}",
self.start_key, self.end_key, self.limit
);
let mut cursor = tx.cursor_read::<RawTable<T>>()?;
let walker = match (self.start_key.as_deref(), self.end_key.as_deref()) {
(Some(start), Some(end)) => {
let start_key = table_key::<T>(start).map(RawKey::new)?;
let end_key = table_key::<T>(end).map(RawKey::new)?;
cursor.walk_range(start_key..=end_key)?
}
(None, Some(end)) => {
let end_key = table_key::<T>(end).map(RawKey::new)?;
cursor.walk_range(..=end_key)?
}
(Some(start), None) => {
let start_key = table_key::<T>(start).map(RawKey::new)?;
cursor.walk_range(start_key..)?
}
(None, None) => cursor.walk_range(..)?,
};
let start_time = Instant::now();
let mut hasher = FixedState::with_seed(u64::from_be_bytes(*b"RETHRETH")).build_hasher();
let mut total = 0;
let limit = self.limit.unwrap_or(usize::MAX);
let mut enumerate_start_key = None;
let mut enumerate_end_key = None;
for (index, entry) in walker.enumerate() {
let (k, v): (RawKey<T::Key>, RawValue<T::Value>) = entry?;
if index.is_multiple_of(100_000) {
info!("Hashed {index} entries.");
}
hasher.write(k.raw_key());
hasher.write(v.raw_value());
if enumerate_start_key.is_none() {
enumerate_start_key = Some(k.clone());
}
enumerate_end_key = Some(k);
total = index + 1;
if total >= limit {
break
}
}
info!("Hashed {total} entries.");
if let (Some(s), Some(e)) = (enumerate_start_key, enumerate_end_key) {
info!("start-key: {}", serde_json::to_string(&s.key()?).unwrap_or_default());
info!("end-key: {}", serde_json::to_string(&e.key()?).unwrap_or_default());
}
let checksum = hasher.finish();
let elapsed = start_time.elapsed();
info!("Checksum for table `{}`: {:#x} (elapsed: {:?})", T::NAME, checksum, elapsed);
Ok((checksum, elapsed))
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/db/mod.rs | crates/cli/commands/src/db/mod.rs | use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs};
use clap::{Parser, Subcommand};
use reth_chainspec::{EthChainSpec, EthereumHardforks};
use reth_cli::chainspec::ChainSpecParser;
use reth_db::version::{get_db_version, DatabaseVersionError, DB_VERSION};
use reth_db_common::DbTool;
use std::{
io::{self, Write},
sync::Arc,
};
mod checksum;
mod clear;
mod diff;
mod get;
mod list;
mod repair_trie;
mod stats;
/// DB List TUI
mod tui;
/// `reth db` command
#[derive(Debug, Parser)]
pub struct Command<C: ChainSpecParser> {
#[command(flatten)]
env: EnvironmentArgs<C>,
#[command(subcommand)]
command: Subcommands,
}
#[derive(Subcommand, Debug)]
/// `reth db` subcommands
pub enum Subcommands {
/// Lists all the tables, their entry count and their size
Stats(stats::Command),
/// Lists the contents of a table
List(list::Command),
/// Calculates the content checksum of a table
Checksum(checksum::Command),
/// Create a diff between two database tables or two entire databases.
Diff(diff::Command),
/// Gets the content of a table for the given key
Get(get::Command),
/// Deletes all database entries
Drop {
/// Bypasses the interactive confirmation and drops the database directly
#[arg(short, long)]
force: bool,
},
/// Deletes all table entries
Clear(clear::Command),
/// Verifies trie consistency and outputs any inconsistencies
RepairTrie(repair_trie::Command),
/// Lists current and local database versions
Version,
/// Returns the full database path
Path,
}
/// `db_ro_exec` opens a database in read-only mode, and then execute with the provided command
macro_rules! db_ro_exec {
($env:expr, $tool:ident, $N:ident, $command:block) => {
let Environment { provider_factory, .. } = $env.init::<$N>(AccessRights::RO)?;
let $tool = DbTool::new(provider_factory.clone())?;
$command;
};
}
impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> Command<C> {
/// Execute `db` command
pub async fn execute<N: CliNodeTypes<ChainSpec = C::ChainSpec>>(self) -> eyre::Result<()> {
let data_dir = self.env.datadir.clone().resolve_datadir(self.env.chain.chain());
let db_path = data_dir.db();
let static_files_path = data_dir.static_files();
let exex_wal_path = data_dir.exex_wal();
// ensure the provided datadir exist
eyre::ensure!(
data_dir.data_dir().is_dir(),
"Datadir does not exist: {:?}",
data_dir.data_dir()
);
// ensure the provided database exist
eyre::ensure!(db_path.is_dir(), "Database does not exist: {:?}", db_path);
match self.command {
// TODO: We'll need to add this on the DB trait.
Subcommands::Stats(command) => {
db_ro_exec!(self.env, tool, N, {
command.execute(data_dir, &tool)?;
});
}
Subcommands::List(command) => {
db_ro_exec!(self.env, tool, N, {
command.execute(&tool)?;
});
}
Subcommands::Checksum(command) => {
db_ro_exec!(self.env, tool, N, {
command.execute(&tool)?;
});
}
Subcommands::Diff(command) => {
db_ro_exec!(self.env, tool, N, {
command.execute(&tool)?;
});
}
Subcommands::Get(command) => {
db_ro_exec!(self.env, tool, N, {
command.execute(&tool)?;
});
}
Subcommands::Drop { force } => {
if !force {
// Ask for confirmation
print!(
"Are you sure you want to drop the database at {data_dir}? This cannot be undone. (y/N): "
);
// Flush the buffer to ensure the message is printed immediately
io::stdout().flush().unwrap();
let mut input = String::new();
io::stdin().read_line(&mut input).expect("Failed to read line");
if !input.trim().eq_ignore_ascii_case("y") {
println!("Database drop aborted!");
return Ok(())
}
}
let Environment { provider_factory, .. } = self.env.init::<N>(AccessRights::RW)?;
let tool = DbTool::new(provider_factory)?;
tool.drop(db_path, static_files_path, exex_wal_path)?;
}
Subcommands::Clear(command) => {
let Environment { provider_factory, .. } = self.env.init::<N>(AccessRights::RW)?;
command.execute(provider_factory)?;
}
Subcommands::RepairTrie(command) => {
let access_rights =
if command.dry_run { AccessRights::RO } else { AccessRights::RW };
let Environment { provider_factory, .. } = self.env.init::<N>(access_rights)?;
command.execute(provider_factory)?;
}
Subcommands::Version => {
let local_db_version = match get_db_version(&db_path) {
Ok(version) => Some(version),
Err(DatabaseVersionError::MissingFile) => None,
Err(err) => return Err(err.into()),
};
println!("Current database version: {DB_VERSION}");
if let Some(version) = local_db_version {
println!("Local database version: {version}");
} else {
println!("Local database is uninitialized");
}
}
Subcommands::Path => {
println!("{}", db_path.display());
}
}
Ok(())
}
}
impl<C: ChainSpecParser> Command<C> {
/// Returns the underlying chain being used to run this command
pub fn chain_spec(&self) -> Option<&Arc<C::ChainSpec>> {
Some(&self.env.chain)
}
}
#[cfg(test)]
mod tests {
use super::*;
use reth_ethereum_cli::chainspec::{EthereumChainSpecParser, SUPPORTED_CHAINS};
use std::path::Path;
#[test]
fn parse_stats_globals() {
let path = format!("../{}", SUPPORTED_CHAINS[0]);
let cmd = Command::<EthereumChainSpecParser>::try_parse_from([
"reth",
"--datadir",
&path,
"stats",
])
.unwrap();
assert_eq!(cmd.env.datadir.resolve_datadir(cmd.env.chain.chain).as_ref(), Path::new(&path));
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/db/repair_trie.rs | crates/cli/commands/src/db/repair_trie.rs | use clap::Parser;
use reth_db_api::{
cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO},
database::Database,
tables,
transaction::{DbTx, DbTxMut},
};
use reth_node_builder::NodeTypesWithDB;
use reth_provider::ProviderFactory;
use reth_trie::{
verify::{Output, Verifier},
Nibbles,
};
use reth_trie_common::{StorageTrieEntry, StoredNibbles, StoredNibblesSubKey};
use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory};
use std::time::{Duration, Instant};
use tracing::{info, warn};
const PROGRESS_PERIOD: Duration = Duration::from_secs(5);
/// The arguments for the `reth db repair-trie` command
#[derive(Parser, Debug)]
pub struct Command {
/// Only show inconsistencies without making any repairs
#[arg(long)]
pub(crate) dry_run: bool,
}
impl Command {
/// Execute `db repair-trie` command
pub fn execute<N: NodeTypesWithDB>(
self,
provider_factory: ProviderFactory<N>,
) -> eyre::Result<()> {
if self.dry_run {
verify_only(provider_factory)?
} else {
verify_and_repair(provider_factory)?
}
Ok(())
}
}
fn verify_only<N: NodeTypesWithDB>(provider_factory: ProviderFactory<N>) -> eyre::Result<()> {
// Get a database transaction directly from the database
let db = provider_factory.db_ref();
let mut tx = db.tx()?;
tx.disable_long_read_transaction_safety();
// Create the verifier
let hashed_cursor_factory = DatabaseHashedCursorFactory::new(&tx);
let trie_cursor_factory = DatabaseTrieCursorFactory::new(&tx);
let verifier = Verifier::new(trie_cursor_factory, hashed_cursor_factory)?;
let mut inconsistent_nodes = 0;
let start_time = Instant::now();
let mut last_progress_time = Instant::now();
// Iterate over the verifier and repair inconsistencies
for output_result in verifier {
let output = output_result?;
if let Output::Progress(path) = output {
if last_progress_time.elapsed() > PROGRESS_PERIOD {
output_progress(path, start_time, inconsistent_nodes);
last_progress_time = Instant::now();
}
} else {
warn!("Inconsistency found: {output:?}");
inconsistent_nodes += 1;
}
}
info!("Found {} inconsistencies (dry run - no changes made)", inconsistent_nodes);
Ok(())
}
fn verify_and_repair<N: NodeTypesWithDB>(provider_factory: ProviderFactory<N>) -> eyre::Result<()> {
// Get a database transaction directly from the database
let db = provider_factory.db_ref();
let mut tx = db.tx_mut()?;
tx.disable_long_read_transaction_safety();
// Create the hashed cursor factory
let hashed_cursor_factory = DatabaseHashedCursorFactory::new(&tx);
// Create the trie cursor factory
let trie_cursor_factory = DatabaseTrieCursorFactory::new(&tx);
// Create the verifier
let verifier = Verifier::new(trie_cursor_factory, hashed_cursor_factory)?;
let mut account_trie_cursor = tx.cursor_write::<tables::AccountsTrie>()?;
let mut storage_trie_cursor = tx.cursor_dup_write::<tables::StoragesTrie>()?;
let mut inconsistent_nodes = 0;
let start_time = Instant::now();
let mut last_progress_time = Instant::now();
// Iterate over the verifier and repair inconsistencies
for output_result in verifier {
let output = output_result?;
if !matches!(output, Output::Progress(_)) {
warn!("Inconsistency found, will repair: {output:?}");
inconsistent_nodes += 1;
}
match output {
Output::AccountExtra(path, _node) => {
// Extra account node in trie, remove it
let nibbles = StoredNibbles(path);
if account_trie_cursor.seek_exact(nibbles)?.is_some() {
account_trie_cursor.delete_current()?;
}
}
Output::StorageExtra(account, path, _node) => {
// Extra storage node in trie, remove it
let nibbles = StoredNibblesSubKey(path);
if storage_trie_cursor
.seek_by_key_subkey(account, nibbles.clone())?
.filter(|e| e.nibbles == nibbles)
.is_some()
{
storage_trie_cursor.delete_current()?;
}
}
Output::AccountWrong { path, expected: node, .. } |
Output::AccountMissing(path, node) => {
// Wrong/missing account node value, upsert it
let nibbles = StoredNibbles(path);
account_trie_cursor.upsert(nibbles, &node)?;
}
Output::StorageWrong { account, path, expected: node, .. } |
Output::StorageMissing(account, path, node) => {
// Wrong/missing storage node value, upsert it
let nibbles = StoredNibblesSubKey(path);
let entry = StorageTrieEntry { nibbles, node };
storage_trie_cursor.upsert(account, &entry)?;
}
Output::Progress(path) => {
if last_progress_time.elapsed() > PROGRESS_PERIOD {
output_progress(path, start_time, inconsistent_nodes);
last_progress_time = Instant::now();
}
}
}
}
if inconsistent_nodes == 0 {
info!("No inconsistencies found");
} else {
info!("Repaired {} inconsistencies", inconsistent_nodes);
tx.commit()?;
info!("Changes committed to database");
}
Ok(())
}
/// Output progress information based on the last seen account path.
fn output_progress(last_account: Nibbles, start_time: Instant, inconsistent_nodes: u64) {
// Calculate percentage based on position in the trie path space
// For progress estimation, we'll use the first few nibbles as an approximation
// Convert the first 16 nibbles (8 bytes) to a u64 for progress calculation
let mut current_value: u64 = 0;
let nibbles_to_use = last_account.len().min(16);
for i in 0..nibbles_to_use {
current_value = (current_value << 4) | (last_account.get(i).unwrap_or(0) as u64);
}
// Shift left to fill remaining bits if we have fewer than 16 nibbles
if nibbles_to_use < 16 {
current_value <<= (16 - nibbles_to_use) * 4;
}
let progress_percent = current_value as f64 / u64::MAX as f64 * 100.0;
let progress_percent_str = format!("{progress_percent:.2}");
// Calculate ETA based on current speed
let elapsed = start_time.elapsed();
let elapsed_secs = elapsed.as_secs_f64();
let estimated_total_time =
if progress_percent > 0.0 { elapsed_secs / (progress_percent / 100.0) } else { 0.0 };
let remaining_time = estimated_total_time - elapsed_secs;
let eta_duration = Duration::from_secs(remaining_time as u64);
info!(
progress_percent = progress_percent_str,
eta = %humantime::format_duration(eta_duration),
inconsistent_nodes,
"Repairing trie tables",
);
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/db/tui.rs | crates/cli/commands/src/db/tui.rs | use crossterm::{
event::{self, Event, KeyCode, MouseEventKind},
execute,
terminal::{disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen},
};
use ratatui::{
backend::{Backend, CrosstermBackend},
layout::{Alignment, Constraint, Direction, Layout},
style::{Color, Modifier, Style},
widgets::{Block, Borders, List, ListItem, ListState, Paragraph, Wrap},
Frame, Terminal,
};
use reth_db_api::{
table::{Table, TableRow},
RawValue,
};
use std::{
io,
time::{Duration, Instant},
};
use tracing::error;
/// Available keybindings for the [`DbListTUI`]
static CMDS: [(&str, &str); 6] = [
("q", "Quit"),
("↑", "Entry above"),
("↓", "Entry below"),
("←", "Previous page"),
("→", "Next page"),
("G", "Go to a specific page"),
];
#[derive(Default, Eq, PartialEq)]
pub(crate) enum ViewMode {
/// Normal list view mode
#[default]
Normal,
/// Currently wanting to go to a page
GoToPage,
}
enum Entries<T: Table> {
/// Pairs of [`Table::Key`] and [`RawValue<Table::Value>`]
RawValues(Vec<(T::Key, RawValue<T::Value>)>),
/// Pairs of [`Table::Key`] and [`Table::Value`]
Values(Vec<TableRow<T>>),
}
impl<T: Table> Entries<T> {
/// Creates new empty [Entries] as [`Entries::RawValues`] if `raw_values == true` and as
/// [`Entries::Values`] if `raw == false`.
const fn new_with_raw_values(raw_values: bool) -> Self {
if raw_values {
Self::RawValues(Vec::new())
} else {
Self::Values(Vec::new())
}
}
/// Sets the internal entries [Vec], converting the [`Table::Value`] into
/// [`RawValue<Table::Value>`] if needed.
fn set(&mut self, new_entries: Vec<TableRow<T>>) {
match self {
Self::RawValues(old_entries) => {
*old_entries =
new_entries.into_iter().map(|(key, value)| (key, value.into())).collect()
}
Self::Values(old_entries) => *old_entries = new_entries,
}
}
/// Returns the length of internal [Vec].
fn len(&self) -> usize {
match self {
Self::RawValues(entries) => entries.len(),
Self::Values(entries) => entries.len(),
}
}
/// Returns an iterator over keys of the internal [Vec]. For both [`Entries::RawValues`] and
/// [`Entries::Values`], this iterator will yield [`Table::Key`].
const fn iter_keys(&self) -> EntriesKeyIter<'_, T> {
EntriesKeyIter { entries: self, index: 0 }
}
}
struct EntriesKeyIter<'a, T: Table> {
entries: &'a Entries<T>,
index: usize,
}
impl<'a, T: Table> Iterator for EntriesKeyIter<'a, T> {
type Item = &'a T::Key;
fn next(&mut self) -> Option<Self::Item> {
let item = match self.entries {
Entries::RawValues(values) => values.get(self.index).map(|(key, _)| key),
Entries::Values(values) => values.get(self.index).map(|(key, _)| key),
};
self.index += 1;
item
}
}
pub(crate) struct DbListTUI<F, T: Table>
where
F: FnMut(usize, usize) -> Vec<TableRow<T>>,
{
/// Fetcher for the next page of items.
///
/// The fetcher is passed the index of the first item to fetch, and the number of items to
/// fetch from that item.
fetch: F,
/// Skip N indices of the key list in the DB.
skip: usize,
/// The amount of entries to show per page
count: usize,
/// The total number of entries in the database
total_entries: usize,
/// The current view mode
mode: ViewMode,
/// The current state of the input buffer
input: String,
/// The state of the key list.
list_state: ListState,
/// Entries to show in the TUI.
entries: Entries<T>,
}
impl<F, T: Table> DbListTUI<F, T>
where
F: FnMut(usize, usize) -> Vec<TableRow<T>>,
{
/// Create a new database list TUI
pub(crate) fn new(
fetch: F,
skip: usize,
count: usize,
total_entries: usize,
raw: bool,
) -> Self {
Self {
fetch,
skip,
count,
total_entries,
mode: ViewMode::Normal,
input: String::new(),
list_state: ListState::default(),
entries: Entries::new_with_raw_values(raw),
}
}
/// Move to the next list selection
fn next(&mut self) {
self.list_state.select(Some(
self.list_state
.selected()
.map(|i| if i >= self.entries.len() - 1 { 0 } else { i + 1 })
.unwrap_or(0),
));
}
/// Move to the previous list selection
fn previous(&mut self) {
self.list_state.select(Some(
self.list_state
.selected()
.map(|i| if i == 0 { self.entries.len() - 1 } else { i - 1 })
.unwrap_or(0),
));
}
fn reset(&mut self) {
self.list_state.select(Some(0));
}
/// Fetch the next page of items
fn next_page(&mut self) {
if self.skip + self.count < self.total_entries {
self.skip += self.count;
self.fetch_page();
}
}
/// Fetch the previous page of items
fn previous_page(&mut self) {
if self.skip > 0 {
self.skip = self.skip.saturating_sub(self.count);
self.fetch_page();
}
}
/// Go to a specific page.
fn go_to_page(&mut self, page: usize) {
self.skip = (self.count * page).min(self.total_entries - self.count);
self.fetch_page();
}
/// Fetch the current page
fn fetch_page(&mut self) {
self.entries.set((self.fetch)(self.skip, self.count));
self.reset();
}
/// Show the [`DbListTUI`] in the terminal.
pub(crate) fn run(mut self) -> eyre::Result<()> {
// Setup backend
enable_raw_mode()?;
let mut stdout = io::stdout();
execute!(stdout, EnterAlternateScreen)?;
let backend = CrosstermBackend::new(stdout);
let mut terminal = Terminal::new(backend)?;
// Load initial page
self.fetch_page();
// Run event loop
let tick_rate = Duration::from_millis(250);
let res = event_loop(&mut terminal, &mut self, tick_rate);
// Restore terminal
disable_raw_mode()?;
execute!(terminal.backend_mut(), LeaveAlternateScreen)?;
terminal.show_cursor()?;
// Handle errors
if let Err(err) = res {
error!("{:?}", err)
}
Ok(())
}
}
/// Run the event loop
fn event_loop<B: Backend, F, T: Table>(
terminal: &mut Terminal<B>,
app: &mut DbListTUI<F, T>,
tick_rate: Duration,
) -> io::Result<()>
where
F: FnMut(usize, usize) -> Vec<TableRow<T>>,
{
let mut last_tick = Instant::now();
let mut running = true;
while running {
// Render
terminal.draw(|f| ui(f, app))?;
// Calculate timeout
let timeout =
tick_rate.checked_sub(last_tick.elapsed()).unwrap_or_else(|| Duration::from_secs(0));
// Poll events
if crossterm::event::poll(timeout)? {
running = !handle_event(app, event::read()?)?;
}
if last_tick.elapsed() >= tick_rate {
last_tick = Instant::now();
}
}
Ok(())
}
/// Handle incoming events
fn handle_event<F, T: Table>(app: &mut DbListTUI<F, T>, event: Event) -> io::Result<bool>
where
F: FnMut(usize, usize) -> Vec<TableRow<T>>,
{
if app.mode == ViewMode::GoToPage {
if let Event::Key(key) = event {
match key.code {
KeyCode::Enter => {
let input = std::mem::take(&mut app.input);
if let Ok(page) = input.parse() {
app.go_to_page(page);
}
app.mode = ViewMode::Normal;
}
KeyCode::Char(c) => {
app.input.push(c);
}
KeyCode::Backspace => {
app.input.pop();
}
KeyCode::Esc => app.mode = ViewMode::Normal,
_ => {}
}
}
return Ok(false)
}
match event {
Event::Key(key) => {
if key.kind == event::KeyEventKind::Press {
match key.code {
KeyCode::Char('q') | KeyCode::Char('Q') => return Ok(true),
KeyCode::Down => app.next(),
KeyCode::Up => app.previous(),
KeyCode::Right => app.next_page(),
KeyCode::Left => app.previous_page(),
KeyCode::Char('G') => {
app.mode = ViewMode::GoToPage;
}
_ => {}
}
}
}
Event::Mouse(e) => match e.kind {
MouseEventKind::ScrollDown => app.next(),
MouseEventKind::ScrollUp => app.previous(),
// TODO: This click event can be triggered outside of the list widget.
MouseEventKind::Down(_) => {
let new_idx = (e.row as usize + app.list_state.offset()).saturating_sub(1);
if new_idx < app.entries.len() {
app.list_state.select(Some(new_idx));
}
}
_ => {}
},
_ => {}
}
Ok(false)
}
/// Render the UI
fn ui<F, T: Table>(f: &mut Frame<'_>, app: &mut DbListTUI<F, T>)
where
F: FnMut(usize, usize) -> Vec<TableRow<T>>,
{
let outer_chunks = Layout::default()
.direction(Direction::Vertical)
.constraints([Constraint::Percentage(95), Constraint::Percentage(5)].as_ref())
.split(f.area());
// Columns
{
let inner_chunks = Layout::default()
.direction(Direction::Horizontal)
.constraints([Constraint::Percentage(50), Constraint::Percentage(50)])
.split(outer_chunks[0]);
let key_length = format!("{}", (app.skip + app.count).saturating_sub(1)).len();
let formatted_keys = app
.entries
.iter_keys()
.enumerate()
.map(|(i, k)| {
ListItem::new(format!("[{:0>width$}]: {k:?}", i + app.skip, width = key_length))
})
.collect::<Vec<_>>();
let key_list = List::new(formatted_keys)
.block(Block::default().borders(Borders::ALL).title(format!(
"Keys (Showing entries {}-{} out of {} entries)",
app.skip,
(app.skip + app.entries.len()).saturating_sub(1),
app.total_entries
)))
.style(Style::default().fg(Color::White))
.highlight_style(Style::default().fg(Color::Cyan).add_modifier(Modifier::ITALIC))
.highlight_symbol("➜ ");
f.render_stateful_widget(key_list, inner_chunks[0], &mut app.list_state);
let value_display = Paragraph::new(
app.list_state
.selected()
.and_then(|selected| {
let maybe_serialized = match &app.entries {
Entries::RawValues(entries) => {
entries.get(selected).map(|(_, v)| serde_json::to_string(v.raw_value()))
}
Entries::Values(entries) => {
entries.get(selected).map(|(_, v)| serde_json::to_string_pretty(v))
}
};
maybe_serialized.map(|ser| {
ser.unwrap_or_else(|error| format!("Error serializing value: {error}"))
})
})
.unwrap_or_else(|| "No value selected".to_string()),
)
.block(Block::default().borders(Borders::ALL).title("Value (JSON)"))
.wrap(Wrap { trim: false })
.alignment(Alignment::Left);
f.render_widget(value_display, inner_chunks[1]);
}
// Footer
let footer = match app.mode {
ViewMode::Normal => Paragraph::new(
CMDS.iter().map(|(k, v)| format!("[{k}] {v}")).collect::<Vec<_>>().join(" | "),
),
ViewMode::GoToPage => Paragraph::new(format!(
"Go to page (max {}): {}",
app.total_entries / app.count,
app.input
)),
}
.block(Block::default().borders(Borders::ALL))
.alignment(match app.mode {
ViewMode::Normal => Alignment::Center,
ViewMode::GoToPage => Alignment::Left,
})
.style(Style::default().fg(Color::Cyan).add_modifier(Modifier::BOLD));
f.render_widget(footer, outer_chunks[1]);
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/db/get.rs | crates/cli/commands/src/db/get.rs | use alloy_consensus::Header;
use alloy_primitives::{hex, BlockHash};
use clap::Parser;
use reth_db::{
static_file::{
ColumnSelectorOne, ColumnSelectorTwo, HeaderWithHashMask, ReceiptMask, TransactionMask,
},
RawDupSort,
};
use reth_db_api::{
table::{Decompress, DupSort, Table},
tables, RawKey, RawTable, Receipts, TableViewer, Transactions,
};
use reth_db_common::DbTool;
use reth_node_api::{ReceiptTy, TxTy};
use reth_node_builder::NodeTypesWithDB;
use reth_provider::{providers::ProviderNodeTypes, StaticFileProviderFactory};
use reth_static_file_types::StaticFileSegment;
use tracing::error;
/// The arguments for the `reth db get` command
#[derive(Parser, Debug)]
pub struct Command {
#[command(subcommand)]
subcommand: Subcommand,
}
#[derive(clap::Subcommand, Debug)]
enum Subcommand {
/// Gets the content of a database table for the given key
Mdbx {
table: tables::Tables,
/// The key to get content for
#[arg(value_parser = maybe_json_value_parser)]
key: String,
/// The subkey to get content for
#[arg(value_parser = maybe_json_value_parser)]
subkey: Option<String>,
/// Output bytes instead of human-readable decoded value
#[arg(long)]
raw: bool,
},
/// Gets the content of a static file segment for the given key
StaticFile {
segment: StaticFileSegment,
/// The key to get content for
#[arg(value_parser = maybe_json_value_parser)]
key: String,
/// Output bytes instead of human-readable decoded value
#[arg(long)]
raw: bool,
},
}
impl Command {
/// Execute `db get` command
pub fn execute<N: ProviderNodeTypes>(self, tool: &DbTool<N>) -> eyre::Result<()> {
match self.subcommand {
Subcommand::Mdbx { table, key, subkey, raw } => {
table.view(&GetValueViewer { tool, key, subkey, raw })?
}
Subcommand::StaticFile { segment, key, raw } => {
let (key, mask): (u64, _) = match segment {
StaticFileSegment::Headers => {
(table_key::<tables::Headers>(&key)?, <HeaderWithHashMask<Header>>::MASK)
}
StaticFileSegment::Transactions => {
(table_key::<tables::Transactions>(&key)?, <TransactionMask<TxTy<N>>>::MASK)
}
StaticFileSegment::Receipts => {
(table_key::<tables::Receipts>(&key)?, <ReceiptMask<ReceiptTy<N>>>::MASK)
}
};
let content = tool.provider_factory.static_file_provider().find_static_file(
segment,
|provider| {
let mut cursor = provider.cursor()?;
cursor.get(key.into(), mask).map(|result| {
result.map(|vec| {
vec.iter().map(|slice| slice.to_vec()).collect::<Vec<_>>()
})
})
},
)?;
match content {
Some(content) => {
if raw {
println!("{}", hex::encode_prefixed(&content[0]));
} else {
match segment {
StaticFileSegment::Headers => {
let header = Header::decompress(content[0].as_slice())?;
let block_hash = BlockHash::decompress(content[1].as_slice())?;
println!(
"Header\n{}\n\nBlockHash\n{}",
serde_json::to_string_pretty(&header)?,
serde_json::to_string_pretty(&block_hash)?
);
}
StaticFileSegment::Transactions => {
let transaction = <<Transactions as Table>::Value>::decompress(
content[0].as_slice(),
)?;
println!("{}", serde_json::to_string_pretty(&transaction)?);
}
StaticFileSegment::Receipts => {
let receipt = <<Receipts as Table>::Value>::decompress(
content[0].as_slice(),
)?;
println!("{}", serde_json::to_string_pretty(&receipt)?);
}
}
}
}
None => {
error!(target: "reth::cli", "No content for the given table key.");
}
};
}
}
Ok(())
}
}
/// Get an instance of key for given table
pub(crate) fn table_key<T: Table>(key: &str) -> Result<T::Key, eyre::Error> {
serde_json::from_str(key).map_err(|e| eyre::eyre!(e))
}
/// Get an instance of subkey for given dupsort table
fn table_subkey<T: DupSort>(subkey: Option<&str>) -> Result<T::SubKey, eyre::Error> {
serde_json::from_str(subkey.unwrap_or_default()).map_err(|e| eyre::eyre!(e))
}
struct GetValueViewer<'a, N: NodeTypesWithDB> {
tool: &'a DbTool<N>,
key: String,
subkey: Option<String>,
raw: bool,
}
impl<N: ProviderNodeTypes> TableViewer<()> for GetValueViewer<'_, N> {
type Error = eyre::Report;
fn view<T: Table>(&self) -> Result<(), Self::Error> {
let key = table_key::<T>(&self.key)?;
let content = if self.raw {
self.tool
.get::<RawTable<T>>(RawKey::from(key))?
.map(|content| hex::encode_prefixed(content.raw_value()))
} else {
self.tool.get::<T>(key)?.as_ref().map(serde_json::to_string_pretty).transpose()?
};
match content {
Some(content) => {
println!("{content}");
}
None => {
error!(target: "reth::cli", "No content for the given table key.");
}
};
Ok(())
}
fn view_dupsort<T: DupSort>(&self) -> Result<(), Self::Error> {
// get a key for given table
let key = table_key::<T>(&self.key)?;
// process dupsort table
let subkey = table_subkey::<T>(self.subkey.as_deref())?;
let content = if self.raw {
self.tool
.get_dup::<RawDupSort<T>>(RawKey::from(key), RawKey::from(subkey))?
.map(|content| hex::encode_prefixed(content.raw_value()))
} else {
self.tool
.get_dup::<T>(key, subkey)?
.as_ref()
.map(serde_json::to_string_pretty)
.transpose()?
};
match content {
Some(content) => {
println!("{content}");
}
None => {
error!(target: "reth::cli", "No content for the given table subkey.");
}
};
Ok(())
}
}
/// Map the user input value to json
pub(crate) fn maybe_json_value_parser(value: &str) -> Result<String, eyre::Error> {
if serde_json::from_str::<serde::de::IgnoredAny>(value).is_ok() {
Ok(value.to_string())
} else {
serde_json::to_string(&value).map_err(|e| eyre::eyre!(e))
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_primitives::{address, B256};
use clap::{Args, Parser};
use reth_db_api::{
models::{storage_sharded_key::StorageShardedKey, ShardedKey},
AccountsHistory, HashedAccounts, Headers, StageCheckpoints, StoragesHistory,
};
use std::str::FromStr;
/// A helper type to parse Args more easily
#[derive(Parser)]
struct CommandParser<T: Args> {
#[command(flatten)]
args: T,
}
#[test]
fn parse_numeric_key_args() {
assert_eq!(table_key::<Headers>("123").unwrap(), 123);
assert_eq!(
table_key::<HashedAccounts>(
"\"0x0ac361fe774b78f8fc4e86c1916930d150865c3fc2e21dca2e58833557608bac\""
)
.unwrap(),
B256::from_str("0x0ac361fe774b78f8fc4e86c1916930d150865c3fc2e21dca2e58833557608bac")
.unwrap()
);
}
#[test]
fn parse_string_key_args() {
assert_eq!(
table_key::<StageCheckpoints>("\"MerkleExecution\"").unwrap(),
"MerkleExecution"
);
}
#[test]
fn parse_json_key_args() {
assert_eq!(
table_key::<StoragesHistory>(r#"{ "address": "0x01957911244e546ce519fbac6f798958fafadb41", "sharded_key": { "key": "0x0000000000000000000000000000000000000000000000000000000000000003", "highest_block_number": 18446744073709551615 } }"#).unwrap(),
StorageShardedKey::new(
address!("0x01957911244e546ce519fbac6f798958fafadb41"),
B256::from_str(
"0x0000000000000000000000000000000000000000000000000000000000000003"
)
.unwrap(),
18446744073709551615
)
);
}
#[test]
fn parse_json_key_for_account_history() {
assert_eq!(
table_key::<AccountsHistory>(r#"{ "key": "0x4448e1273fd5a8bfdb9ed111e96889c960eee145", "highest_block_number": 18446744073709551615 }"#).unwrap(),
ShardedKey::new(
address!("0x4448e1273fd5a8bfdb9ed111e96889c960eee145"),
18446744073709551615
)
);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/init_state/without_evm.rs | crates/cli/commands/src/init_state/without_evm.rs | use alloy_consensus::BlockHeader;
use alloy_primitives::{BlockNumber, B256, U256};
use alloy_rlp::Decodable;
use reth_codecs::Compact;
use reth_node_builder::NodePrimitives;
use reth_primitives_traits::{SealedBlock, SealedHeader, SealedHeaderFor};
use reth_provider::{
providers::StaticFileProvider, BlockWriter, ProviderResult, StageCheckpointWriter,
StaticFileProviderFactory, StaticFileWriter, StorageLocation,
};
use reth_stages::{StageCheckpoint, StageId};
use reth_static_file_types::StaticFileSegment;
use std::{fs::File, io::Read, path::PathBuf};
use tracing::info;
/// Reads the header RLP from a file and returns the Header.
pub(crate) fn read_header_from_file<H>(path: PathBuf) -> Result<H, eyre::Error>
where
H: Decodable,
{
let mut file = File::open(path)?;
let mut buf = Vec::new();
file.read_to_end(&mut buf)?;
let header = H::decode(&mut &buf[..])?;
Ok(header)
}
/// Creates a dummy chain (with no transactions) up to the last EVM block and appends the
/// first valid block.
pub fn setup_without_evm<Provider, F>(
provider_rw: &Provider,
header: SealedHeader<<Provider::Primitives as NodePrimitives>::BlockHeader>,
total_difficulty: U256,
header_factory: F,
) -> ProviderResult<()>
where
Provider: StaticFileProviderFactory
+ StageCheckpointWriter
+ BlockWriter<Block = <Provider::Primitives as NodePrimitives>::Block>,
F: Fn(BlockNumber) -> <Provider::Primitives as NodePrimitives>::BlockHeader
+ Send
+ Sync
+ 'static,
{
info!(target: "reth::cli", new_tip = ?header.num_hash(), "Setting up dummy EVM chain before importing state.");
let static_file_provider = provider_rw.static_file_provider();
// Write EVM dummy data up to `header - 1` block
append_dummy_chain(&static_file_provider, header.number() - 1, header_factory)?;
info!(target: "reth::cli", "Appending first valid block.");
append_first_block(provider_rw, &header, total_difficulty)?;
for stage in StageId::ALL {
provider_rw.save_stage_checkpoint(stage, StageCheckpoint::new(header.number()))?;
}
info!(target: "reth::cli", "Set up finished.");
Ok(())
}
/// Appends the first block.
///
/// By appending it, static file writer also verifies that all segments are at the same
/// height.
fn append_first_block<Provider>(
provider_rw: &Provider,
header: &SealedHeaderFor<Provider::Primitives>,
total_difficulty: U256,
) -> ProviderResult<()>
where
Provider: BlockWriter<Block = <Provider::Primitives as NodePrimitives>::Block>
+ StaticFileProviderFactory<Primitives: NodePrimitives<BlockHeader: Compact>>,
{
provider_rw.insert_block(
SealedBlock::<<Provider::Primitives as NodePrimitives>::Block>::from_sealed_parts(
header.clone(),
Default::default(),
)
.try_recover()
.expect("no senders or txes"),
StorageLocation::Database,
)?;
let sf_provider = provider_rw.static_file_provider();
sf_provider.latest_writer(StaticFileSegment::Headers)?.append_header(
header,
total_difficulty,
&header.hash(),
)?;
sf_provider.latest_writer(StaticFileSegment::Receipts)?.increment_block(header.number())?;
sf_provider.latest_writer(StaticFileSegment::Transactions)?.increment_block(header.number())?;
Ok(())
}
/// Creates a dummy chain with no transactions/receipts up to `target_height` block inclusive.
///
/// * Headers: It will push an empty block.
/// * Transactions: It will not push any tx, only increments the end block range.
/// * Receipts: It will not push any receipt, only increments the end block range.
fn append_dummy_chain<N, F>(
sf_provider: &StaticFileProvider<N>,
target_height: BlockNumber,
header_factory: F,
) -> ProviderResult<()>
where
N: NodePrimitives,
F: Fn(BlockNumber) -> N::BlockHeader + Send + Sync + 'static,
{
let (tx, rx) = std::sync::mpsc::channel();
// Spawn jobs for incrementing the block end range of transactions and receipts
for segment in [StaticFileSegment::Transactions, StaticFileSegment::Receipts] {
let tx_clone = tx.clone();
let provider = sf_provider.clone();
std::thread::spawn(move || {
let result = provider.latest_writer(segment).and_then(|mut writer| {
for block_num in 1..=target_height {
writer.increment_block(block_num)?;
}
Ok(())
});
tx_clone.send(result).unwrap();
});
}
// Spawn job for appending empty headers
let provider = sf_provider.clone();
std::thread::spawn(move || {
let result = provider.latest_writer(StaticFileSegment::Headers).and_then(|mut writer| {
for block_num in 1..=target_height {
// TODO: should we fill with real parent_hash?
let header = header_factory(block_num);
writer.append_header(&header, U256::ZERO, &B256::ZERO)?;
}
Ok(())
});
tx.send(result).unwrap();
});
// Catches any StaticFileWriter error.
while let Ok(append_result) = rx.recv() {
if let Err(err) = append_result {
tracing::error!(target: "reth::cli", "Error appending dummy chain: {err}");
return Err(err)
}
}
// If, for any reason, rayon crashes this verifies if all segments are at the same
// target_height.
for segment in
[StaticFileSegment::Headers, StaticFileSegment::Receipts, StaticFileSegment::Transactions]
{
assert_eq!(
sf_provider.latest_writer(segment)?.user_header().block_end(),
Some(target_height),
"Static file segment {segment} was unsuccessful advancing its block height."
);
}
Ok(())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/init_state/mod.rs | crates/cli/commands/src/init_state/mod.rs | //! Command that initializes the node from a genesis file.
use crate::common::{AccessRights, CliHeader, CliNodeTypes, Environment, EnvironmentArgs};
use alloy_consensus::BlockHeader as AlloyBlockHeader;
use alloy_primitives::{B256, U256};
use clap::Parser;
use reth_chainspec::{EthChainSpec, EthereumHardforks};
use reth_cli::chainspec::ChainSpecParser;
use reth_db_common::init::init_from_state_dump;
use reth_node_api::NodePrimitives;
use reth_primitives_traits::{BlockHeader, SealedHeader};
use reth_provider::{
BlockNumReader, DatabaseProviderFactory, StaticFileProviderFactory, StaticFileWriter,
};
use std::{io::BufReader, path::PathBuf, str::FromStr, sync::Arc};
use tracing::info;
pub mod without_evm;
/// Initializes the database with the genesis block.
#[derive(Debug, Parser)]
pub struct InitStateCommand<C: ChainSpecParser> {
#[command(flatten)]
pub env: EnvironmentArgs<C>,
/// JSONL file with state dump.
///
/// Must contain accounts in following format, additional account fields are ignored. Must
/// also contain { "root": \<state-root\> } as first line.
/// {
/// "balance": "\<balance\>",
/// "nonce": \<nonce\>,
/// "code": "\<bytecode\>",
/// "storage": {
/// "\<key\>": "\<value\>",
/// ..
/// },
/// "address": "\<address\>",
/// }
///
/// Allows init at a non-genesis block. Caution! Blocks must be manually imported up until
/// and including the non-genesis block to init chain at. See 'import' command.
#[arg(value_name = "STATE_DUMP_FILE", verbatim_doc_comment)]
pub state: PathBuf,
/// Specifies whether to initialize the state without relying on EVM historical data.
///
/// When enabled, and before inserting the state, it creates a dummy chain up to the last EVM
/// block specified. It then, appends the first block provided block.
///
/// - **Note**: **Do not** import receipts and blocks beforehand, or this will fail or be
/// ignored.
#[arg(long, default_value = "false")]
pub without_evm: bool,
/// Header file containing the header in an RLP encoded format.
#[arg(long, value_name = "HEADER_FILE", verbatim_doc_comment)]
pub header: Option<PathBuf>,
/// Total difficulty of the header.
#[arg(long, value_name = "TOTAL_DIFFICULTY", verbatim_doc_comment)]
pub total_difficulty: Option<String>,
/// Hash of the header.
#[arg(long, value_name = "HEADER_HASH", verbatim_doc_comment)]
pub header_hash: Option<String>,
}
impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> InitStateCommand<C> {
/// Execute the `init` command
pub async fn execute<N>(self) -> eyre::Result<()>
where
N: CliNodeTypes<
ChainSpec = C::ChainSpec,
Primitives: NodePrimitives<BlockHeader: BlockHeader + CliHeader>,
>,
{
info!(target: "reth::cli", "Reth init-state starting");
let Environment { config, provider_factory, .. } = self.env.init::<N>(AccessRights::RW)?;
let static_file_provider = provider_factory.static_file_provider();
let provider_rw = provider_factory.database_provider_rw()?;
if self.without_evm {
// ensure header, total difficulty and header hash are provided
let header = self.header.ok_or_else(|| eyre::eyre!("Header file must be provided"))?;
let header = without_evm::read_header_from_file::<
<N::Primitives as NodePrimitives>::BlockHeader,
>(header)?;
let header_hash =
self.header_hash.ok_or_else(|| eyre::eyre!("Header hash must be provided"))?;
let header_hash = B256::from_str(&header_hash)?;
let total_difficulty = self
.total_difficulty
.ok_or_else(|| eyre::eyre!("Total difficulty must be provided"))?;
let total_difficulty = U256::from_str(&total_difficulty)?;
let last_block_number = provider_rw.last_block_number()?;
if last_block_number == 0 {
without_evm::setup_without_evm(
&provider_rw,
SealedHeader::new(header, header_hash),
total_difficulty,
|number| {
let mut header =
<<N::Primitives as NodePrimitives>::BlockHeader>::default();
header.set_number(number);
header
},
)?;
// SAFETY: it's safe to commit static files, since in the event of a crash, they
// will be unwound according to database checkpoints.
//
// Necessary to commit, so the header is accessible to provider_rw and
// init_state_dump
static_file_provider.commit()?;
} else if last_block_number > 0 && last_block_number < header.number() {
return Err(eyre::eyre!(
"Data directory should be empty when calling init-state with --without-evm-history."
));
}
}
info!(target: "reth::cli", "Initiating state dump");
let reader = BufReader::new(reth_fs_util::open(self.state)?);
let hash = init_from_state_dump(reader, &provider_rw, config.stages.etl)?;
provider_rw.commit()?;
info!(target: "reth::cli", hash = ?hash, "Genesis block written");
Ok(())
}
}
impl<C: ChainSpecParser> InitStateCommand<C> {
/// Returns the underlying chain being used to run this command
pub fn chain_spec(&self) -> Option<&Arc<C::ChainSpec>> {
Some(&self.env.chain)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/stage/mod.rs | crates/cli/commands/src/stage/mod.rs | //! `reth stage` command
use std::sync::Arc;
use crate::common::{CliNodeComponents, CliNodeTypes};
use clap::{Parser, Subcommand};
use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks};
use reth_cli::chainspec::ChainSpecParser;
use reth_cli_runner::CliContext;
pub mod drop;
pub mod dump;
pub mod run;
pub mod unwind;
/// `reth stage` command
#[derive(Debug, Parser)]
pub struct Command<C: ChainSpecParser> {
#[command(subcommand)]
command: Subcommands<C>,
}
/// `reth stage` subcommands
#[derive(Subcommand, Debug)]
pub enum Subcommands<C: ChainSpecParser> {
/// Run a single stage.
///
/// Note that this won't use the Pipeline and as a result runs stages
/// assuming that all the data can be held in memory. It is not recommended
/// to run a stage for really large block ranges if your computer does not have
/// a lot of memory to store all the data.
Run(Box<run::Command<C>>),
/// Drop a stage's tables from the database.
Drop(drop::Command<C>),
/// Dumps a stage from a range into a new database.
Dump(dump::Command<C>),
/// Unwinds a certain block range, deleting it from the database.
Unwind(unwind::Command<C>),
}
impl<C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>> Command<C> {
/// Execute `stage` command
pub async fn execute<N, Comp>(
self,
ctx: CliContext,
components: impl FnOnce(Arc<C::ChainSpec>) -> Comp,
) -> eyre::Result<()>
where
N: CliNodeTypes<ChainSpec = C::ChainSpec>,
Comp: CliNodeComponents<N>,
{
match self.command {
Subcommands::Run(command) => command.execute::<N, _, _>(ctx, components).await,
Subcommands::Drop(command) => command.execute::<N>().await,
Subcommands::Dump(command) => command.execute::<N, _, _>(components).await,
Subcommands::Unwind(command) => command.execute::<N, _, _>(components).await,
}
}
}
impl<C: ChainSpecParser> Command<C> {
/// Returns the underlying chain being used to run this command
pub fn chain_spec(&self) -> Option<&Arc<C::ChainSpec>> {
match self.command {
Subcommands::Run(ref command) => command.chain_spec(),
Subcommands::Drop(ref command) => command.chain_spec(),
Subcommands::Dump(ref command) => command.chain_spec(),
Subcommands::Unwind(ref command) => command.chain_spec(),
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/stage/drop.rs | crates/cli/commands/src/stage/drop.rs | //! Database debugging tool
use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs};
use clap::Parser;
use itertools::Itertools;
use reth_chainspec::EthChainSpec;
use reth_cli::chainspec::ChainSpecParser;
use reth_db::{mdbx::tx::Tx, static_file::iter_static_files, DatabaseError};
use reth_db_api::{
tables,
transaction::{DbTx, DbTxMut},
};
use reth_db_common::{
init::{insert_genesis_header, insert_genesis_history, insert_genesis_state},
DbTool,
};
use reth_node_api::{HeaderTy, ReceiptTy, TxTy};
use reth_node_core::args::StageEnum;
use reth_provider::{
writer::UnifiedStorageWriter, DatabaseProviderFactory, StaticFileProviderFactory,
};
use reth_prune::PruneSegment;
use reth_stages::StageId;
use reth_static_file_types::StaticFileSegment;
use std::sync::Arc;
/// `reth drop-stage` command
#[derive(Debug, Parser)]
pub struct Command<C: ChainSpecParser> {
#[command(flatten)]
env: EnvironmentArgs<C>,
stage: StageEnum,
}
impl<C: ChainSpecParser> Command<C> {
/// Execute `db` command
pub async fn execute<N: CliNodeTypes>(self) -> eyre::Result<()>
where
C: ChainSpecParser<ChainSpec = N::ChainSpec>,
{
let Environment { provider_factory, .. } = self.env.init::<N>(AccessRights::RW)?;
let tool = DbTool::new(provider_factory)?;
let static_file_segment = match self.stage {
StageEnum::Headers => Some(StaticFileSegment::Headers),
StageEnum::Bodies => Some(StaticFileSegment::Transactions),
StageEnum::Execution => Some(StaticFileSegment::Receipts),
_ => None,
};
// Delete static file segment data before inserting the genesis header below
if let Some(static_file_segment) = static_file_segment {
let static_file_provider = tool.provider_factory.static_file_provider();
let static_files = iter_static_files(static_file_provider.directory())?;
if let Some(segment_static_files) = static_files.get(&static_file_segment) {
// Delete static files from the highest to the lowest block range
for (block_range, _) in segment_static_files
.iter()
.sorted_by_key(|(block_range, _)| block_range.start())
.rev()
{
static_file_provider.delete_jar(static_file_segment, block_range.start())?;
}
}
}
let provider_rw = tool.provider_factory.database_provider_rw()?;
let tx = provider_rw.tx_ref();
match self.stage {
StageEnum::Headers => {
tx.clear::<tables::CanonicalHeaders>()?;
tx.clear::<tables::Headers<HeaderTy<N>>>()?;
tx.clear::<tables::HeaderTerminalDifficulties>()?;
tx.clear::<tables::HeaderNumbers>()?;
reset_stage_checkpoint(tx, StageId::Headers)?;
insert_genesis_header(&provider_rw, &self.env.chain)?;
}
StageEnum::Bodies => {
tx.clear::<tables::BlockBodyIndices>()?;
tx.clear::<tables::Transactions<TxTy<N>>>()?;
reset_prune_checkpoint(tx, PruneSegment::Transactions)?;
tx.clear::<tables::TransactionBlocks>()?;
tx.clear::<tables::BlockOmmers<HeaderTy<N>>>()?;
tx.clear::<tables::BlockWithdrawals>()?;
reset_stage_checkpoint(tx, StageId::Bodies)?;
insert_genesis_header(&provider_rw, &self.env.chain)?;
}
StageEnum::Senders => {
tx.clear::<tables::TransactionSenders>()?;
// Reset pruned numbers to not count them in the next rerun's stage progress
reset_prune_checkpoint(tx, PruneSegment::SenderRecovery)?;
reset_stage_checkpoint(tx, StageId::SenderRecovery)?;
}
StageEnum::Execution => {
tx.clear::<tables::PlainAccountState>()?;
tx.clear::<tables::PlainStorageState>()?;
tx.clear::<tables::AccountChangeSets>()?;
tx.clear::<tables::StorageChangeSets>()?;
tx.clear::<tables::Bytecodes>()?;
tx.clear::<tables::Receipts<ReceiptTy<N>>>()?;
reset_prune_checkpoint(tx, PruneSegment::Receipts)?;
reset_prune_checkpoint(tx, PruneSegment::ContractLogs)?;
reset_stage_checkpoint(tx, StageId::Execution)?;
let alloc = &self.env.chain.genesis().alloc;
insert_genesis_state(&provider_rw, alloc.iter())?;
}
StageEnum::AccountHashing => {
tx.clear::<tables::HashedAccounts>()?;
reset_stage_checkpoint(tx, StageId::AccountHashing)?;
}
StageEnum::StorageHashing => {
tx.clear::<tables::HashedStorages>()?;
reset_stage_checkpoint(tx, StageId::StorageHashing)?;
}
StageEnum::Hashing => {
// Clear hashed accounts
tx.clear::<tables::HashedAccounts>()?;
reset_stage_checkpoint(tx, StageId::AccountHashing)?;
// Clear hashed storages
tx.clear::<tables::HashedStorages>()?;
reset_stage_checkpoint(tx, StageId::StorageHashing)?;
}
StageEnum::Merkle => {
tx.clear::<tables::AccountsTrie>()?;
tx.clear::<tables::StoragesTrie>()?;
reset_stage_checkpoint(tx, StageId::MerkleExecute)?;
reset_stage_checkpoint(tx, StageId::MerkleUnwind)?;
tx.delete::<tables::StageCheckpointProgresses>(
StageId::MerkleExecute.to_string(),
None,
)?;
}
StageEnum::AccountHistory | StageEnum::StorageHistory => {
tx.clear::<tables::AccountsHistory>()?;
tx.clear::<tables::StoragesHistory>()?;
reset_stage_checkpoint(tx, StageId::IndexAccountHistory)?;
reset_stage_checkpoint(tx, StageId::IndexStorageHistory)?;
insert_genesis_history(&provider_rw, self.env.chain.genesis().alloc.iter())?;
}
StageEnum::TxLookup => {
tx.clear::<tables::TransactionHashNumbers>()?;
reset_prune_checkpoint(tx, PruneSegment::TransactionLookup)?;
reset_stage_checkpoint(tx, StageId::TransactionLookup)?;
insert_genesis_header(&provider_rw, &self.env.chain)?;
}
}
tx.put::<tables::StageCheckpoints>(StageId::Finish.to_string(), Default::default())?;
UnifiedStorageWriter::commit_unwind(provider_rw)?;
Ok(())
}
/// Returns the underlying chain being used to run this command
pub fn chain_spec(&self) -> Option<&Arc<C::ChainSpec>> {
Some(&self.env.chain)
}
}
fn reset_prune_checkpoint(
tx: &Tx<reth_db::mdbx::RW>,
prune_segment: PruneSegment,
) -> Result<(), DatabaseError> {
if let Some(mut prune_checkpoint) = tx.get::<tables::PruneCheckpoints>(prune_segment)? {
prune_checkpoint.block_number = None;
prune_checkpoint.tx_number = None;
tx.put::<tables::PruneCheckpoints>(prune_segment, prune_checkpoint)?;
}
Ok(())
}
fn reset_stage_checkpoint(
tx: &Tx<reth_db::mdbx::RW>,
stage_id: StageId,
) -> Result<(), DatabaseError> {
tx.put::<tables::StageCheckpoints>(stage_id.to_string(), Default::default())?;
Ok(())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/stage/unwind.rs | crates/cli/commands/src/stage/unwind.rs | //! Unwinding a certain block range
use crate::{
common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs},
stage::CliNodeComponents,
};
use alloy_eips::BlockHashOrNumber;
use alloy_primitives::B256;
use clap::{Parser, Subcommand};
use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks};
use reth_cli::chainspec::ChainSpecParser;
use reth_config::Config;
use reth_consensus::noop::NoopConsensus;
use reth_db::DatabaseEnv;
use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader};
use reth_evm::ConfigureEvm;
use reth_exex::ExExManagerHandle;
use reth_provider::{
providers::ProviderNodeTypes, BlockExecutionWriter, BlockNumReader, ChainStateBlockReader,
ChainStateBlockWriter, ProviderFactory, StaticFileProviderFactory, StorageLocation,
};
use reth_stages::{
sets::{DefaultStages, OfflineStages},
stages::ExecutionStage,
ExecutionStageThresholds, Pipeline, StageSet,
};
use reth_static_file::StaticFileProducer;
use std::sync::Arc;
use tokio::sync::watch;
use tracing::info;
/// `reth stage unwind` command
#[derive(Debug, Parser)]
pub struct Command<C: ChainSpecParser> {
#[command(flatten)]
env: EnvironmentArgs<C>,
#[command(subcommand)]
command: Subcommands,
/// If this is enabled, then all stages except headers, bodies, and sender recovery will be
/// unwound.
#[arg(long)]
offline: bool,
}
impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> Command<C> {
/// Execute `db stage unwind` command
pub async fn execute<N: CliNodeTypes<ChainSpec = C::ChainSpec>, F, Comp>(
self,
components: F,
) -> eyre::Result<()>
where
Comp: CliNodeComponents<N>,
F: FnOnce(Arc<C::ChainSpec>) -> Comp,
{
let Environment { provider_factory, config, .. } = self.env.init::<N>(AccessRights::RW)?;
let target = self.command.unwind_target(provider_factory.clone())?;
let components = components(provider_factory.chain_spec());
let highest_static_file_block = provider_factory
.static_file_provider()
.get_highest_static_files()
.max_block_num()
.filter(|highest_static_file_block| *highest_static_file_block > target);
// Execute a pipeline unwind if the start of the range overlaps the existing static
// files. If that's the case, then copy all available data from MDBX to static files, and
// only then, proceed with the unwind.
//
// We also execute a pipeline unwind if `offline` is specified, because we need to only
// unwind the data associated with offline stages.
if highest_static_file_block.is_some() || self.offline {
if self.offline {
info!(target: "reth::cli", "Performing an unwind for offline-only data!");
}
if let Some(highest_static_file_block) = highest_static_file_block {
info!(target: "reth::cli", ?target, ?highest_static_file_block, "Executing a pipeline unwind.");
} else {
info!(target: "reth::cli", ?target, "Executing a pipeline unwind.");
}
info!(target: "reth::cli", prune_config=?config.prune, "Using prune settings");
// This will build an offline-only pipeline if the `offline` flag is enabled
let mut pipeline =
self.build_pipeline(config, provider_factory, components.evm_config().clone())?;
// Move all applicable data from database to static files.
pipeline.move_to_static_files()?;
pipeline.unwind(target, None)?;
} else {
info!(target: "reth::cli", ?target, "Executing a database unwind.");
let provider = provider_factory.provider_rw()?;
provider
.remove_block_and_execution_above(target, StorageLocation::Both)
.map_err(|err| eyre::eyre!("Transaction error on unwind: {err}"))?;
// update finalized block if needed
let last_saved_finalized_block_number = provider.last_finalized_block_number()?;
if last_saved_finalized_block_number.is_none_or(|f| f > target) {
provider.save_finalized_block_number(target)?;
}
provider.commit()?;
}
info!(target: "reth::cli", ?target, "Unwound blocks");
Ok(())
}
fn build_pipeline<N: ProviderNodeTypes<ChainSpec = C::ChainSpec>>(
self,
config: Config,
provider_factory: ProviderFactory<N>,
evm_config: impl ConfigureEvm<Primitives = N::Primitives> + 'static,
) -> Result<Pipeline<N>, eyre::Error> {
let stage_conf = &config.stages;
let prune_modes = config.prune.clone().map(|prune| prune.segments).unwrap_or_default();
let (tip_tx, tip_rx) = watch::channel(B256::ZERO);
let builder = if self.offline {
Pipeline::<N>::builder().add_stages(
OfflineStages::new(
evm_config,
NoopConsensus::arc(),
config.stages,
prune_modes.clone(),
)
.builder()
.disable(reth_stages::StageId::SenderRecovery),
)
} else {
Pipeline::<N>::builder().with_tip_sender(tip_tx).add_stages(
DefaultStages::new(
provider_factory.clone(),
tip_rx,
Arc::new(NoopConsensus::default()),
NoopHeaderDownloader::default(),
NoopBodiesDownloader::default(),
evm_config.clone(),
stage_conf.clone(),
prune_modes.clone(),
None,
)
.set(ExecutionStage::new(
evm_config,
Arc::new(NoopConsensus::default()),
ExecutionStageThresholds {
max_blocks: None,
max_changes: None,
max_cumulative_gas: None,
max_duration: None,
},
stage_conf.execution_external_clean_threshold(),
ExExManagerHandle::empty(),
)),
)
};
let pipeline = builder.build(
provider_factory.clone(),
StaticFileProducer::new(provider_factory, prune_modes),
);
Ok(pipeline)
}
}
impl<C: ChainSpecParser> Command<C> {
/// Return the underlying chain being used to run this command
pub fn chain_spec(&self) -> Option<&Arc<C::ChainSpec>> {
Some(&self.env.chain)
}
}
/// `reth stage unwind` subcommand
#[derive(Subcommand, Debug, Eq, PartialEq)]
enum Subcommands {
/// Unwinds the database from the latest block, until the given block number or hash has been
/// reached, that block is not included.
#[command(name = "to-block")]
ToBlock { target: BlockHashOrNumber },
/// Unwinds the database from the latest block, until the given number of blocks have been
/// reached.
#[command(name = "num-blocks")]
NumBlocks { amount: u64 },
}
impl Subcommands {
/// Returns the block to unwind to. The returned block will stay in database.
fn unwind_target<N: ProviderNodeTypes<DB = Arc<DatabaseEnv>>>(
&self,
factory: ProviderFactory<N>,
) -> eyre::Result<u64> {
let provider = factory.provider()?;
let last = provider.last_block_number()?;
let target = match self {
Self::ToBlock { target } => match target {
BlockHashOrNumber::Hash(hash) => provider
.block_number(*hash)?
.ok_or_else(|| eyre::eyre!("Block hash not found in database: {hash:?}"))?,
BlockHashOrNumber::Number(num) => *num,
},
Self::NumBlocks { amount } => last.saturating_sub(*amount),
};
if target > last {
eyre::bail!(
"Target block number {target} is higher than the latest block number {last}"
)
}
Ok(target)
}
}
#[cfg(test)]
mod tests {
use reth_ethereum_cli::chainspec::EthereumChainSpecParser;
use super::*;
#[test]
fn parse_unwind() {
let cmd = Command::<EthereumChainSpecParser>::parse_from([
"reth",
"--datadir",
"dir",
"to-block",
"100",
]);
assert_eq!(cmd.command, Subcommands::ToBlock { target: BlockHashOrNumber::Number(100) });
let cmd = Command::<EthereumChainSpecParser>::parse_from([
"reth",
"--datadir",
"dir",
"num-blocks",
"100",
]);
assert_eq!(cmd.command, Subcommands::NumBlocks { amount: 100 });
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/stage/run.rs | crates/cli/commands/src/stage/run.rs | //! Main `stage` command
//!
//! Stage debugging tool
use crate::common::{AccessRights, CliNodeComponents, CliNodeTypes, Environment, EnvironmentArgs};
use alloy_eips::BlockHashOrNumber;
use alloy_primitives::Sealable;
use clap::Parser;
use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks};
use reth_cli::chainspec::ChainSpecParser;
use reth_cli_runner::CliContext;
use reth_cli_util::get_secret_key;
use reth_config::config::{HashingConfig, SenderRecoveryConfig, TransactionLookupConfig};
use reth_db_api::database_metrics::DatabaseMetrics;
use reth_downloaders::{
bodies::bodies::BodiesDownloaderBuilder,
headers::reverse_headers::ReverseHeadersDownloaderBuilder,
};
use reth_exex::ExExManagerHandle;
use reth_network::BlockDownloaderProvider;
use reth_network_p2p::HeadersClient;
use reth_node_core::{
args::{NetworkArgs, StageEnum},
version::version_metadata,
};
use reth_node_metrics::{
chain::ChainSpecInfo,
hooks::Hooks,
server::{MetricServer, MetricServerConfig},
version::VersionInfo,
};
use reth_provider::{
writer::UnifiedStorageWriter, ChainSpecProvider, DatabaseProviderFactory,
StageCheckpointReader, StageCheckpointWriter, StaticFileProviderFactory,
};
use reth_stages::{
stages::{
AccountHashingStage, BodyStage, ExecutionStage, HeaderStage, IndexAccountHistoryStage,
IndexStorageHistoryStage, MerkleStage, SenderRecoveryStage, StorageHashingStage,
TransactionLookupStage,
},
ExecInput, ExecOutput, ExecutionStageThresholds, Stage, StageExt, UnwindInput, UnwindOutput,
};
use std::{any::Any, net::SocketAddr, sync::Arc, time::Instant};
use tokio::sync::watch;
use tracing::*;
/// `reth stage` command
#[derive(Debug, Parser)]
pub struct Command<C: ChainSpecParser> {
#[command(flatten)]
env: EnvironmentArgs<C>,
/// Enable Prometheus metrics.
///
/// The metrics will be served at the given interface and port.
#[arg(long, value_name = "SOCKET")]
metrics: Option<SocketAddr>,
/// The name of the stage to run
#[arg(value_enum)]
stage: StageEnum,
/// The height to start at
#[arg(long)]
from: u64,
/// The end of the stage
#[arg(long, short)]
to: u64,
/// Batch size for stage execution and unwind
#[arg(long)]
batch_size: Option<u64>,
/// Normally, running the stage requires unwinding for stages that already
/// have been run, in order to not rewrite to the same database slots.
///
/// You can optionally skip the unwinding phase if you're syncing a block
/// range that has not been synced before.
#[arg(long, short)]
skip_unwind: bool,
/// Commits the changes in the database. WARNING: potentially destructive.
///
/// Useful when you want to run diagnostics on the database.
// TODO: We should consider allowing to run hooks at the end of the stage run,
// e.g. query the DB size, or any table data.
#[arg(long, short)]
commit: bool,
/// Save stage checkpoints
#[arg(long)]
checkpoints: bool,
#[command(flatten)]
network: NetworkArgs,
}
impl<C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>> Command<C> {
/// Execute `stage` command
pub async fn execute<N, Comp, F>(self, ctx: CliContext, components: F) -> eyre::Result<()>
where
N: CliNodeTypes<ChainSpec = C::ChainSpec>,
Comp: CliNodeComponents<N>,
F: FnOnce(Arc<C::ChainSpec>) -> Comp,
{
// Raise the fd limit of the process.
// Does not do anything on windows.
let _ = fdlimit::raise_fd_limit();
let Environment { provider_factory, config, data_dir } =
self.env.init::<N>(AccessRights::RW)?;
let mut provider_rw = provider_factory.database_provider_rw()?;
let components = components(provider_factory.chain_spec());
if let Some(listen_addr) = self.metrics {
info!(target: "reth::cli", "Starting metrics endpoint at {}", listen_addr);
let config = MetricServerConfig::new(
listen_addr,
VersionInfo {
version: version_metadata().cargo_pkg_version.as_ref(),
build_timestamp: version_metadata().vergen_build_timestamp.as_ref(),
cargo_features: version_metadata().vergen_cargo_features.as_ref(),
git_sha: version_metadata().vergen_git_sha.as_ref(),
target_triple: version_metadata().vergen_cargo_target_triple.as_ref(),
build_profile: version_metadata().build_profile_name.as_ref(),
},
ChainSpecInfo { name: provider_factory.chain_spec().chain().to_string() },
ctx.task_executor,
Hooks::builder()
.with_hook({
let db = provider_factory.db_ref().clone();
move || db.report_metrics()
})
.with_hook({
let sfp = provider_factory.static_file_provider();
move || {
if let Err(error) = sfp.report_metrics() {
error!(%error, "Failed to report metrics from static file provider");
}
}
})
.build(),
);
MetricServer::new(config).serve().await?;
}
let batch_size = self.batch_size.unwrap_or(self.to.saturating_sub(self.from) + 1);
let etl_config = config.stages.etl.clone();
let prune_modes = config.prune.clone().map(|prune| prune.segments).unwrap_or_default();
let (mut exec_stage, mut unwind_stage): (Box<dyn Stage<_>>, Option<Box<dyn Stage<_>>>) =
match self.stage {
StageEnum::Headers => {
let consensus = Arc::new(components.consensus().clone());
let network_secret_path = self
.network
.p2p_secret_key
.clone()
.unwrap_or_else(|| data_dir.p2p_secret());
let p2p_secret_key = get_secret_key(&network_secret_path)?;
let default_peers_path = data_dir.known_peers();
let network = self
.network
.network_config::<N::NetworkPrimitives>(
&config,
provider_factory.chain_spec(),
p2p_secret_key,
default_peers_path,
)
.build(provider_factory.clone())
.start_network()
.await?;
let fetch_client = Arc::new(network.fetch_client().await?);
// Use `to` as the tip for the stage
let tip = loop {
match fetch_client.get_header(BlockHashOrNumber::Number(self.to)).await {
Ok(header) => {
if let Some(header) = header.into_data() {
break header
}
}
Err(error) if error.is_retryable() => {
warn!(target: "reth::cli", "Error requesting header: {error}. Retrying...")
}
Err(error) => return Err(error.into()),
}
};
let (_, rx) = watch::channel(tip.hash_slow());
(
Box::new(HeaderStage::new(
provider_factory.clone(),
ReverseHeadersDownloaderBuilder::new(config.stages.headers)
.build(fetch_client, consensus.clone()),
rx,
etl_config,
)),
None,
)
}
StageEnum::Bodies => {
let consensus = Arc::new(components.consensus().clone());
let mut config = config;
config.peers.trusted_nodes_only = self.network.trusted_only;
config.peers.trusted_nodes.extend(self.network.trusted_peers.clone());
let network_secret_path = self
.network
.p2p_secret_key
.clone()
.unwrap_or_else(|| data_dir.p2p_secret());
let p2p_secret_key = get_secret_key(&network_secret_path)?;
let default_peers_path = data_dir.known_peers();
let network = self
.network
.network_config::<N::NetworkPrimitives>(
&config,
provider_factory.chain_spec(),
p2p_secret_key,
default_peers_path,
)
.build(provider_factory.clone())
.start_network()
.await?;
let fetch_client = Arc::new(network.fetch_client().await?);
let stage = BodyStage::new(
BodiesDownloaderBuilder::default()
.with_stream_batch_size(batch_size as usize)
.with_request_limit(config.stages.bodies.downloader_request_limit)
.with_max_buffered_blocks_size_bytes(
config.stages.bodies.downloader_max_buffered_blocks_size_bytes,
)
.with_concurrent_requests_range(
config.stages.bodies.downloader_min_concurrent_requests..=
config.stages.bodies.downloader_max_concurrent_requests,
)
.build(fetch_client, consensus.clone(), provider_factory.clone()),
);
(Box::new(stage), None)
}
StageEnum::Senders => (
Box::new(SenderRecoveryStage::new(SenderRecoveryConfig {
commit_threshold: batch_size,
})),
None,
),
StageEnum::Execution => (
Box::new(ExecutionStage::new(
components.evm_config().clone(),
Arc::new(components.consensus().clone()),
ExecutionStageThresholds {
max_blocks: Some(batch_size),
max_changes: None,
max_cumulative_gas: None,
max_duration: None,
},
config.stages.merkle.incremental_threshold,
ExExManagerHandle::empty(),
)),
None,
),
StageEnum::TxLookup => (
Box::new(TransactionLookupStage::new(
TransactionLookupConfig { chunk_size: batch_size },
etl_config,
prune_modes.transaction_lookup,
)),
None,
),
StageEnum::AccountHashing => (
Box::new(AccountHashingStage::new(
HashingConfig { clean_threshold: 1, commit_threshold: batch_size },
etl_config,
)),
None,
),
StageEnum::StorageHashing => (
Box::new(StorageHashingStage::new(
HashingConfig { clean_threshold: 1, commit_threshold: batch_size },
etl_config,
)),
None,
),
StageEnum::Merkle => (
Box::new(MerkleStage::new_execution(
config.stages.merkle.rebuild_threshold,
config.stages.merkle.incremental_threshold,
)),
Some(Box::new(MerkleStage::default_unwind())),
),
StageEnum::AccountHistory => (
Box::new(IndexAccountHistoryStage::new(
config.stages.index_account_history,
etl_config,
prune_modes.account_history,
)),
None,
),
StageEnum::StorageHistory => (
Box::new(IndexStorageHistoryStage::new(
config.stages.index_storage_history,
etl_config,
prune_modes.storage_history,
)),
None,
),
_ => return Ok(()),
};
if let Some(unwind_stage) = &unwind_stage {
assert_eq!((*exec_stage).type_id(), (**unwind_stage).type_id());
}
let checkpoint = provider_rw.get_stage_checkpoint(exec_stage.id())?.unwrap_or_default();
let unwind_stage = unwind_stage.as_mut().unwrap_or(&mut exec_stage);
let mut unwind = UnwindInput {
checkpoint: checkpoint.with_block_number(self.to),
unwind_to: self.from,
bad_block: None,
};
if !self.skip_unwind {
while unwind.checkpoint.block_number > self.from {
let UnwindOutput { checkpoint } = unwind_stage.unwind(&provider_rw, unwind)?;
unwind.checkpoint = checkpoint;
if self.checkpoints {
provider_rw.save_stage_checkpoint(unwind_stage.id(), checkpoint)?;
}
if self.commit {
UnifiedStorageWriter::commit_unwind(provider_rw)?;
provider_rw = provider_factory.database_provider_rw()?;
}
}
}
let mut input = ExecInput {
target: Some(self.to),
checkpoint: Some(checkpoint.with_block_number(self.from)),
};
let start = Instant::now();
info!(target: "reth::cli", stage = %self.stage, "Executing stage");
loop {
exec_stage.execute_ready(input).await?;
let ExecOutput { checkpoint, done } = exec_stage.execute(&provider_rw, input)?;
input.checkpoint = Some(checkpoint);
if self.checkpoints {
provider_rw.save_stage_checkpoint(exec_stage.id(), checkpoint)?;
}
if self.commit {
UnifiedStorageWriter::commit(provider_rw)?;
provider_rw = provider_factory.database_provider_rw()?;
}
if done {
break
}
}
info!(target: "reth::cli", stage = %self.stage, time = ?start.elapsed(), "Finished stage");
Ok(())
}
}
impl<C: ChainSpecParser> Command<C> {
/// Returns the underlying chain being used to run this command
pub fn chain_spec(&self) -> Option<&Arc<C::ChainSpec>> {
Some(&self.env.chain)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/stage/dump/merkle.rs | crates/cli/commands/src/stage/dump/merkle.rs | use std::sync::Arc;
use super::setup;
use alloy_primitives::BlockNumber;
use eyre::Result;
use reth_config::config::EtlConfig;
use reth_consensus::{ConsensusError, FullConsensus};
use reth_db::DatabaseEnv;
use reth_db_api::{database::Database, table::TableImporter, tables};
use reth_db_common::DbTool;
use reth_evm::ConfigureEvm;
use reth_exex::ExExManagerHandle;
use reth_node_core::dirs::{ChainPath, DataDirPath};
use reth_provider::{
providers::{ProviderNodeTypes, StaticFileProvider},
DatabaseProviderFactory, ProviderFactory,
};
use reth_stages::{
stages::{
AccountHashingStage, ExecutionStage, MerkleStage, StorageHashingStage,
MERKLE_STAGE_DEFAULT_REBUILD_THRESHOLD,
},
ExecutionStageThresholds, Stage, StageCheckpoint, UnwindInput,
};
use tracing::info;
pub(crate) async fn dump_merkle_stage<N>(
db_tool: &DbTool<N>,
from: BlockNumber,
to: BlockNumber,
output_datadir: ChainPath<DataDirPath>,
should_run: bool,
evm_config: impl ConfigureEvm<Primitives = N::Primitives>,
consensus: impl FullConsensus<N::Primitives, Error = ConsensusError> + 'static,
) -> Result<()>
where
N: ProviderNodeTypes<DB = Arc<DatabaseEnv>>,
{
let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?;
output_db.update(|tx| {
tx.import_table_with_range::<tables::Headers, _>(
&db_tool.provider_factory.db_ref().tx()?,
Some(from),
to,
)
})??;
output_db.update(|tx| {
tx.import_table_with_range::<tables::AccountChangeSets, _>(
&db_tool.provider_factory.db_ref().tx()?,
Some(from),
to,
)
})??;
unwind_and_copy(db_tool, (from, to), tip_block_number, &output_db, evm_config, consensus)?;
if should_run {
dry_run(
ProviderFactory::<N>::new(
Arc::new(output_db),
db_tool.chain(),
StaticFileProvider::read_write(output_datadir.static_files())?,
),
to,
from,
)?;
}
Ok(())
}
/// Dry-run an unwind to FROM block and copy the necessary table data to the new database.
fn unwind_and_copy<N: ProviderNodeTypes>(
db_tool: &DbTool<N>,
range: (u64, u64),
tip_block_number: u64,
output_db: &DatabaseEnv,
evm_config: impl ConfigureEvm<Primitives = N::Primitives>,
consensus: impl FullConsensus<N::Primitives, Error = ConsensusError> + 'static,
) -> eyre::Result<()> {
let (from, to) = range;
let provider = db_tool.provider_factory.database_provider_rw()?;
let unwind = UnwindInput {
unwind_to: from,
checkpoint: StageCheckpoint::new(tip_block_number),
bad_block: None,
};
let execute_input =
reth_stages::ExecInput { target: Some(to), checkpoint: Some(StageCheckpoint::new(from)) };
// Unwind hashes all the way to FROM
StorageHashingStage::default().unwind(&provider, unwind).unwrap();
AccountHashingStage::default().unwind(&provider, unwind).unwrap();
MerkleStage::default_unwind().unwind(&provider, unwind)?;
// Bring Plainstate to TO (hashing stage execution requires it)
let mut exec_stage = ExecutionStage::new(
evm_config, // Not necessary for unwinding.
Arc::new(consensus),
ExecutionStageThresholds {
max_blocks: Some(u64::MAX),
max_changes: None,
max_cumulative_gas: None,
max_duration: None,
},
MERKLE_STAGE_DEFAULT_REBUILD_THRESHOLD,
ExExManagerHandle::empty(),
);
exec_stage.unwind(
&provider,
UnwindInput {
unwind_to: to,
checkpoint: StageCheckpoint::new(tip_block_number),
bad_block: None,
},
)?;
// Bring hashes to TO
AccountHashingStage {
clean_threshold: u64::MAX,
commit_threshold: u64::MAX,
etl_config: EtlConfig::default(),
}
.execute(&provider, execute_input)
.unwrap();
StorageHashingStage {
clean_threshold: u64::MAX,
commit_threshold: u64::MAX,
etl_config: EtlConfig::default(),
}
.execute(&provider, execute_input)
.unwrap();
let unwind_inner_tx = provider.into_tx();
// TODO optimize we can actually just get the entries we need
output_db
.update(|tx| tx.import_dupsort::<tables::StorageChangeSets, _>(&unwind_inner_tx))??;
output_db.update(|tx| tx.import_table::<tables::HashedAccounts, _>(&unwind_inner_tx))??;
output_db.update(|tx| tx.import_dupsort::<tables::HashedStorages, _>(&unwind_inner_tx))??;
output_db.update(|tx| tx.import_table::<tables::AccountsTrie, _>(&unwind_inner_tx))??;
output_db.update(|tx| tx.import_dupsort::<tables::StoragesTrie, _>(&unwind_inner_tx))??;
Ok(())
}
/// Try to re-execute the stage straight away
fn dry_run<N>(output_provider_factory: ProviderFactory<N>, to: u64, from: u64) -> eyre::Result<()>
where
N: ProviderNodeTypes,
{
info!(target: "reth::cli", "Executing stage.");
let provider = output_provider_factory.database_provider_rw()?;
let mut stage = MerkleStage::Execution {
// Forces updating the root instead of calculating from scratch
rebuild_threshold: u64::MAX,
incremental_threshold: u64::MAX,
};
loop {
let input = reth_stages::ExecInput {
target: Some(to),
checkpoint: Some(StageCheckpoint::new(from)),
};
if stage.execute(&provider, input)?.done {
break
}
}
info!(target: "reth::cli", "Success");
Ok(())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/stage/dump/hashing_account.rs | crates/cli/commands/src/stage/dump/hashing_account.rs | use super::setup;
use alloy_primitives::BlockNumber;
use eyre::Result;
use reth_db::DatabaseEnv;
use reth_db_api::{database::Database, table::TableImporter, tables};
use reth_db_common::DbTool;
use reth_node_core::dirs::{ChainPath, DataDirPath};
use reth_provider::{
providers::{ProviderNodeTypes, StaticFileProvider},
DatabaseProviderFactory, ProviderFactory,
};
use reth_stages::{stages::AccountHashingStage, Stage, StageCheckpoint, UnwindInput};
use std::sync::Arc;
use tracing::info;
pub(crate) async fn dump_hashing_account_stage<N: ProviderNodeTypes<DB = Arc<DatabaseEnv>>>(
db_tool: &DbTool<N>,
from: BlockNumber,
to: BlockNumber,
output_datadir: ChainPath<DataDirPath>,
should_run: bool,
) -> Result<()> {
let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?;
// Import relevant AccountChangeSets
output_db.update(|tx| {
tx.import_table_with_range::<tables::AccountChangeSets, _>(
&db_tool.provider_factory.db_ref().tx()?,
Some(from),
to,
)
})??;
unwind_and_copy(db_tool, from, tip_block_number, &output_db)?;
if should_run {
dry_run(
ProviderFactory::<N>::new(
Arc::new(output_db),
db_tool.chain(),
StaticFileProvider::read_write(output_datadir.static_files())?,
),
to,
from,
)?;
}
Ok(())
}
/// Dry-run an unwind to FROM block and copy the necessary table data to the new database.
fn unwind_and_copy<N: ProviderNodeTypes>(
db_tool: &DbTool<N>,
from: u64,
tip_block_number: u64,
output_db: &DatabaseEnv,
) -> eyre::Result<()> {
let provider = db_tool.provider_factory.database_provider_rw()?;
let mut exec_stage = AccountHashingStage::default();
exec_stage.unwind(
&provider,
UnwindInput {
unwind_to: from,
checkpoint: StageCheckpoint::new(tip_block_number),
bad_block: None,
},
)?;
let unwind_inner_tx = provider.into_tx();
output_db.update(|tx| tx.import_table::<tables::PlainAccountState, _>(&unwind_inner_tx))??;
Ok(())
}
/// Try to re-execute the stage straight away
fn dry_run<N: ProviderNodeTypes>(
output_provider_factory: ProviderFactory<N>,
to: u64,
from: u64,
) -> eyre::Result<()> {
info!(target: "reth::cli", "Executing stage.");
let provider = output_provider_factory.database_provider_rw()?;
let mut stage = AccountHashingStage {
clean_threshold: 1, // Forces hashing from scratch
..Default::default()
};
loop {
let input = reth_stages::ExecInput {
target: Some(to),
checkpoint: Some(StageCheckpoint::new(from)),
};
if stage.execute(&provider, input)?.done {
break
}
}
info!(target: "reth::cli", "Success.");
Ok(())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/stage/dump/mod.rs | crates/cli/commands/src/stage/dump/mod.rs | //! Database debugging tool
use crate::common::{AccessRights, CliNodeComponents, CliNodeTypes, Environment, EnvironmentArgs};
use clap::Parser;
use reth_chainspec::{EthChainSpec, EthereumHardforks};
use reth_cli::chainspec::ChainSpecParser;
use reth_db::{init_db, mdbx::DatabaseArguments, DatabaseEnv};
use reth_db_api::{
cursor::DbCursorRO, database::Database, models::ClientVersion, table::TableImporter, tables,
transaction::DbTx,
};
use reth_db_common::DbTool;
use reth_node_builder::NodeTypesWithDB;
use reth_node_core::{
args::DatadirArgs,
dirs::{DataDirPath, PlatformPath},
};
use std::{path::PathBuf, sync::Arc};
use tracing::info;
mod hashing_storage;
use hashing_storage::dump_hashing_storage_stage;
mod hashing_account;
use hashing_account::dump_hashing_account_stage;
mod execution;
use execution::dump_execution_stage;
mod merkle;
use merkle::dump_merkle_stage;
/// `reth dump-stage` command
#[derive(Debug, Parser)]
pub struct Command<C: ChainSpecParser> {
#[command(flatten)]
env: EnvironmentArgs<C>,
#[command(subcommand)]
command: Stages,
}
/// Supported stages to be dumped
#[derive(Debug, Clone, Parser)]
pub enum Stages {
/// Execution stage.
Execution(StageCommand),
/// `StorageHashing` stage.
StorageHashing(StageCommand),
/// `AccountHashing` stage.
AccountHashing(StageCommand),
/// Merkle stage.
Merkle(StageCommand),
}
/// Stage command that takes a range
#[derive(Debug, Clone, Parser)]
pub struct StageCommand {
/// The path to the new datadir folder.
#[arg(long, value_name = "OUTPUT_PATH", verbatim_doc_comment)]
output_datadir: PlatformPath<DataDirPath>,
/// From which block.
#[arg(long, short)]
from: u64,
/// To which block.
#[arg(long, short)]
to: u64,
/// If passed, it will dry-run a stage execution from the newly created database right after
/// dumping.
#[arg(long, short, default_value = "false")]
dry_run: bool,
}
macro_rules! handle_stage {
($stage_fn:ident, $tool:expr, $command:expr) => {{
let StageCommand { output_datadir, from, to, dry_run, .. } = $command;
let output_datadir =
output_datadir.with_chain($tool.chain().chain(), DatadirArgs::default());
$stage_fn($tool, *from, *to, output_datadir, *dry_run).await?
}};
($stage_fn:ident, $tool:expr, $command:expr, $executor:expr, $consensus:expr) => {{
let StageCommand { output_datadir, from, to, dry_run, .. } = $command;
let output_datadir =
output_datadir.with_chain($tool.chain().chain(), DatadirArgs::default());
$stage_fn($tool, *from, *to, output_datadir, *dry_run, $executor, $consensus).await?
}};
}
impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> Command<C> {
/// Execute `dump-stage` command
pub async fn execute<N, Comp, F>(self, components: F) -> eyre::Result<()>
where
N: CliNodeTypes<ChainSpec = C::ChainSpec>,
Comp: CliNodeComponents<N>,
F: FnOnce(Arc<C::ChainSpec>) -> Comp,
{
let Environment { provider_factory, .. } = self.env.init::<N>(AccessRights::RO)?;
let tool = DbTool::new(provider_factory)?;
let components = components(tool.chain());
let evm_config = components.evm_config().clone();
let consensus = components.consensus().clone();
match &self.command {
Stages::Execution(cmd) => {
handle_stage!(dump_execution_stage, &tool, cmd, evm_config, consensus)
}
Stages::StorageHashing(cmd) => handle_stage!(dump_hashing_storage_stage, &tool, cmd),
Stages::AccountHashing(cmd) => handle_stage!(dump_hashing_account_stage, &tool, cmd),
Stages::Merkle(cmd) => {
handle_stage!(dump_merkle_stage, &tool, cmd, evm_config, consensus)
}
}
Ok(())
}
}
impl<C: ChainSpecParser> Command<C> {
/// Returns the underlying chain being used to run this command
pub fn chain_spec(&self) -> Option<&Arc<C::ChainSpec>> {
Some(&self.env.chain)
}
}
/// Sets up the database and initial state on [`tables::BlockBodyIndices`]. Also returns the tip
/// block number.
pub(crate) fn setup<N: NodeTypesWithDB>(
from: u64,
to: u64,
output_db: &PathBuf,
db_tool: &DbTool<N>,
) -> eyre::Result<(DatabaseEnv, u64)> {
assert!(from < to, "FROM block should be lower than TO block.");
info!(target: "reth::cli", ?output_db, "Creating separate db");
let output_datadir = init_db(output_db, DatabaseArguments::new(ClientVersion::default()))?;
output_datadir.update(|tx| {
tx.import_table_with_range::<tables::BlockBodyIndices, _>(
&db_tool.provider_factory.db_ref().tx()?,
Some(from - 1),
to + 1,
)
})??;
let (tip_block_number, _) = db_tool
.provider_factory
.db_ref()
.view(|tx| tx.cursor_read::<tables::BlockBodyIndices>()?.last())??
.expect("some");
Ok((output_datadir, tip_block_number))
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/stage/dump/hashing_storage.rs | crates/cli/commands/src/stage/dump/hashing_storage.rs | use super::setup;
use eyre::Result;
use reth_db::DatabaseEnv;
use reth_db_api::{database::Database, table::TableImporter, tables};
use reth_db_common::DbTool;
use reth_node_core::dirs::{ChainPath, DataDirPath};
use reth_provider::{
providers::{ProviderNodeTypes, StaticFileProvider},
DatabaseProviderFactory, ProviderFactory,
};
use reth_stages::{stages::StorageHashingStage, Stage, StageCheckpoint, UnwindInput};
use std::sync::Arc;
use tracing::info;
pub(crate) async fn dump_hashing_storage_stage<N: ProviderNodeTypes<DB = Arc<DatabaseEnv>>>(
db_tool: &DbTool<N>,
from: u64,
to: u64,
output_datadir: ChainPath<DataDirPath>,
should_run: bool,
) -> Result<()> {
let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?;
unwind_and_copy(db_tool, from, tip_block_number, &output_db)?;
if should_run {
dry_run(
ProviderFactory::<N>::new(
Arc::new(output_db),
db_tool.chain(),
StaticFileProvider::read_write(output_datadir.static_files())?,
),
to,
from,
)?;
}
Ok(())
}
/// Dry-run an unwind to FROM block and copy the necessary table data to the new database.
fn unwind_and_copy<N: ProviderNodeTypes>(
db_tool: &DbTool<N>,
from: u64,
tip_block_number: u64,
output_db: &DatabaseEnv,
) -> eyre::Result<()> {
let provider = db_tool.provider_factory.database_provider_rw()?;
let mut exec_stage = StorageHashingStage::default();
exec_stage.unwind(
&provider,
UnwindInput {
unwind_to: from,
checkpoint: StageCheckpoint::new(tip_block_number),
bad_block: None,
},
)?;
let unwind_inner_tx = provider.into_tx();
// TODO optimize we can actually just get the entries we need for both these tables
output_db
.update(|tx| tx.import_dupsort::<tables::PlainStorageState, _>(&unwind_inner_tx))??;
output_db
.update(|tx| tx.import_dupsort::<tables::StorageChangeSets, _>(&unwind_inner_tx))??;
Ok(())
}
/// Try to re-execute the stage straight away
fn dry_run<N: ProviderNodeTypes>(
output_provider_factory: ProviderFactory<N>,
to: u64,
from: u64,
) -> eyre::Result<()> {
info!(target: "reth::cli", "Executing stage.");
let provider = output_provider_factory.database_provider_rw()?;
let mut stage = StorageHashingStage {
clean_threshold: 1, // Forces hashing from scratch
..Default::default()
};
loop {
let input = reth_stages::ExecInput {
target: Some(to),
checkpoint: Some(StageCheckpoint::new(from)),
};
if stage.execute(&provider, input)?.done {
break
}
}
info!(target: "reth::cli", "Success.");
Ok(())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/stage/dump/execution.rs | crates/cli/commands/src/stage/dump/execution.rs | use super::setup;
use reth_consensus::{noop::NoopConsensus, ConsensusError, FullConsensus};
use reth_db::DatabaseEnv;
use reth_db_api::{
cursor::DbCursorRO, database::Database, table::TableImporter, tables, transaction::DbTx,
};
use reth_db_common::DbTool;
use reth_evm::ConfigureEvm;
use reth_node_builder::NodeTypesWithDB;
use reth_node_core::dirs::{ChainPath, DataDirPath};
use reth_provider::{
providers::{ProviderNodeTypes, StaticFileProvider},
DatabaseProviderFactory, ProviderFactory,
};
use reth_stages::{stages::ExecutionStage, Stage, StageCheckpoint, UnwindInput};
use std::sync::Arc;
use tracing::info;
pub(crate) async fn dump_execution_stage<N, E, C>(
db_tool: &DbTool<N>,
from: u64,
to: u64,
output_datadir: ChainPath<DataDirPath>,
should_run: bool,
evm_config: E,
consensus: C,
) -> eyre::Result<()>
where
N: ProviderNodeTypes<DB = Arc<DatabaseEnv>>,
E: ConfigureEvm<Primitives = N::Primitives> + 'static,
C: FullConsensus<E::Primitives, Error = ConsensusError> + 'static,
{
let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?;
import_tables_with_range(&output_db, db_tool, from, to)?;
unwind_and_copy(db_tool, from, tip_block_number, &output_db, evm_config.clone())?;
if should_run {
dry_run(
ProviderFactory::<N>::new(
Arc::new(output_db),
db_tool.chain(),
StaticFileProvider::read_write(output_datadir.static_files())?,
),
to,
from,
evm_config,
consensus,
)?;
}
Ok(())
}
/// Imports all the tables that can be copied over a range.
fn import_tables_with_range<N: NodeTypesWithDB>(
output_db: &DatabaseEnv,
db_tool: &DbTool<N>,
from: u64,
to: u64,
) -> eyre::Result<()> {
// We're not sharing the transaction in case the memory grows too much.
output_db.update(|tx| {
tx.import_table_with_range::<tables::CanonicalHeaders, _>(
&db_tool.provider_factory.db_ref().tx()?,
Some(from),
to,
)
})??;
output_db.update(|tx| {
tx.import_table_with_range::<tables::HeaderTerminalDifficulties, _>(
&db_tool.provider_factory.db_ref().tx()?,
Some(from),
to,
)
})??;
output_db.update(|tx| {
tx.import_table_with_range::<tables::Headers, _>(
&db_tool.provider_factory.db_ref().tx()?,
Some(from),
to,
)
})??;
output_db.update(|tx| {
tx.import_table_with_range::<tables::BlockBodyIndices, _>(
&db_tool.provider_factory.db_ref().tx()?,
Some(from),
to,
)
})??;
output_db.update(|tx| {
tx.import_table_with_range::<tables::BlockOmmers, _>(
&db_tool.provider_factory.db_ref().tx()?,
Some(from),
to,
)
})??;
// Find range of transactions that need to be copied over
let (from_tx, to_tx) = db_tool.provider_factory.db_ref().view(|read_tx| {
let mut read_cursor = read_tx.cursor_read::<tables::BlockBodyIndices>()?;
let (_, from_block) =
read_cursor.seek(from)?.ok_or(eyre::eyre!("BlockBody {from} does not exist."))?;
let (_, to_block) =
read_cursor.seek(to)?.ok_or(eyre::eyre!("BlockBody {to} does not exist."))?;
Ok::<(u64, u64), eyre::ErrReport>((
from_block.first_tx_num,
to_block.first_tx_num + to_block.tx_count,
))
})??;
output_db.update(|tx| {
tx.import_table_with_range::<tables::Transactions, _>(
&db_tool.provider_factory.db_ref().tx()?,
Some(from_tx),
to_tx,
)
})??;
output_db.update(|tx| {
tx.import_table_with_range::<tables::TransactionSenders, _>(
&db_tool.provider_factory.db_ref().tx()?,
Some(from_tx),
to_tx,
)
})??;
Ok(())
}
/// Dry-run an unwind to FROM block, so we can get the `PlainStorageState` and
/// `PlainAccountState` safely. There might be some state dependency from an address
/// which hasn't been changed in the given range.
fn unwind_and_copy<N: ProviderNodeTypes>(
db_tool: &DbTool<N>,
from: u64,
tip_block_number: u64,
output_db: &DatabaseEnv,
evm_config: impl ConfigureEvm<Primitives = N::Primitives>,
) -> eyre::Result<()> {
let provider = db_tool.provider_factory.database_provider_rw()?;
let mut exec_stage = ExecutionStage::new_with_executor(evm_config, NoopConsensus::arc());
exec_stage.unwind(
&provider,
UnwindInput {
unwind_to: from,
checkpoint: StageCheckpoint::new(tip_block_number),
bad_block: None,
},
)?;
let unwind_inner_tx = provider.into_tx();
output_db
.update(|tx| tx.import_dupsort::<tables::PlainStorageState, _>(&unwind_inner_tx))??;
output_db.update(|tx| tx.import_table::<tables::PlainAccountState, _>(&unwind_inner_tx))??;
output_db.update(|tx| tx.import_table::<tables::Bytecodes, _>(&unwind_inner_tx))??;
Ok(())
}
/// Try to re-execute the stage without committing
fn dry_run<N, E, C>(
output_provider_factory: ProviderFactory<N>,
to: u64,
from: u64,
evm_config: E,
consensus: C,
) -> eyre::Result<()>
where
N: ProviderNodeTypes,
E: ConfigureEvm<Primitives = N::Primitives> + 'static,
C: FullConsensus<E::Primitives, Error = ConsensusError> + 'static,
{
info!(target: "reth::cli", "Executing stage. [dry-run]");
let mut exec_stage = ExecutionStage::new_with_executor(evm_config, Arc::new(consensus));
let input =
reth_stages::ExecInput { target: Some(to), checkpoint: Some(StageCheckpoint::new(from)) };
exec_stage.execute(&output_provider_factory.database_provider_rw()?, input)?;
info!(target: "reth::cli", "Success");
Ok(())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/test_vectors/compact.rs | crates/cli/commands/src/test_vectors/compact.rs | use alloy_eips::eip4895::Withdrawals;
use alloy_primitives::{hex, Signature, TxKind, B256};
use arbitrary::Arbitrary;
use eyre::{Context, Result};
use proptest::{
prelude::{ProptestConfig, RngCore},
test_runner::{TestRng, TestRunner},
};
use reth_codecs::alloy::{
authorization_list::Authorization,
genesis_account::GenesisAccount,
header::{Header, HeaderExt},
transaction::{
eip1559::TxEip1559, eip2930::TxEip2930, eip4844::TxEip4844, eip7702::TxEip7702,
legacy::TxLegacy,
},
withdrawal::Withdrawal,
};
use reth_db::{
models::{
AccountBeforeTx, StaticFileBlockWithdrawals, StoredBlockBodyIndices, StoredBlockOmmers,
StoredBlockWithdrawals,
},
ClientVersion,
};
use reth_ethereum_primitives::{Receipt, Transaction, TransactionSigned, TxType};
use reth_fs_util as fs;
use reth_primitives_traits::{Account, Log, LogData, StorageEntry};
use reth_prune_types::{PruneCheckpoint, PruneMode};
use reth_stages_types::{
AccountHashingCheckpoint, CheckpointBlockRange, EntitiesCheckpoint, ExecutionCheckpoint,
HeadersCheckpoint, IndexHistoryCheckpoint, StageCheckpoint, StageUnitCheckpoint,
StorageHashingCheckpoint,
};
use reth_trie::{hash_builder::HashBuilderValue, TrieMask};
use reth_trie_common::{hash_builder::HashBuilderState, StoredNibbles, StoredNibblesSubKey};
use std::{fs::File, io::BufReader};
pub const VECTORS_FOLDER: &str = "testdata/micro/compact";
pub const VECTOR_SIZE: usize = 100;
#[macro_export]
macro_rules! compact_types {
(regular: [$($regular_ty:ident),*], identifier: [$($id_ty:ident),*]) => {
pub const GENERATE_VECTORS: &[fn(&mut TestRunner) -> eyre::Result<()>] = &[
$(
generate_vector::<$regular_ty> as fn(&mut TestRunner) -> eyre::Result<()>,
)*
$(
generate_vector::<$id_ty> as fn(&mut TestRunner) -> eyre::Result<()>,
)*
];
pub const READ_VECTORS: &[fn() -> eyre::Result<()>] = &[
$(
read_vector::<$regular_ty> as fn() -> eyre::Result<()>,
)*
$(
read_vector::<$id_ty> as fn() -> eyre::Result<()>,
)*
];
pub static IDENTIFIER_TYPE: std::sync::LazyLock<std::collections::HashSet<String>> = std::sync::LazyLock::new(|| {
let mut map = std::collections::HashSet::new();
$(
map.insert(type_name::<$id_ty>());
)*
map
});
};
}
// The type that **actually** implements `Compact` should go here. If it's an alloy type, import the
// auxiliary type from reth_codecs::alloy instead.
compact_types!(
regular: [
// reth-primitives
Account,
Receipt,
// reth_codecs::alloy
Authorization,
GenesisAccount,
Header,
HeaderExt,
Withdrawal,
Withdrawals,
TxEip2930,
TxEip1559,
TxEip4844,
TxEip7702,
TxLegacy,
HashBuilderValue,
LogData,
Log,
// BranchNodeCompact, // todo requires arbitrary
TrieMask,
// reth_prune_types
PruneCheckpoint,
PruneMode,
// reth_stages_types
AccountHashingCheckpoint,
StorageHashingCheckpoint,
ExecutionCheckpoint,
HeadersCheckpoint,
IndexHistoryCheckpoint,
EntitiesCheckpoint,
CheckpointBlockRange,
StageCheckpoint,
StageUnitCheckpoint,
// reth_db_api
StoredBlockOmmers,
StoredBlockBodyIndices,
StoredBlockWithdrawals,
StaticFileBlockWithdrawals,
// Manual implementations
TransactionSigned,
// Bytecode, // todo revm arbitrary
StorageEntry,
// MerkleCheckpoint, // todo storedsubnode -> branchnodecompact arbitrary
AccountBeforeTx,
ClientVersion,
StoredNibbles,
StoredNibblesSubKey,
// StorageTrieEntry, // todo branchnodecompact arbitrary
// StoredSubNode, // todo branchnodecompact arbitrary
HashBuilderState
],
// These types require an extra identifier which is usually stored elsewhere (eg. parent type).
identifier: [
Signature,
Transaction,
TxType,
TxKind
]
);
/// Generates a vector of type `T` to a file.
pub fn generate_vectors() -> Result<()> {
generate_vectors_with(GENERATE_VECTORS)
}
pub fn read_vectors() -> Result<()> {
read_vectors_with(READ_VECTORS)
}
/// Generates a vector of type `T` to a file.
pub fn generate_vectors_with(generator: &[fn(&mut TestRunner) -> eyre::Result<()>]) -> Result<()> {
// Prepare random seed for test (same method as used by proptest)
let seed = B256::random();
println!("Seed for compact test vectors: {:?}", hex::encode_prefixed(seed));
// Start the runner with the seed
let config = ProptestConfig::default();
let rng = TestRng::from_seed(config.rng_algorithm, &seed.0);
let mut runner = TestRunner::new_with_rng(config, rng);
fs::create_dir_all(VECTORS_FOLDER)?;
for generate_fn in generator {
generate_fn(&mut runner)?;
}
Ok(())
}
/// Reads multiple vectors of different types ensuring their correctness by decoding and
/// re-encoding.
pub fn read_vectors_with(read: &[fn() -> eyre::Result<()>]) -> Result<()> {
fs::create_dir_all(VECTORS_FOLDER)?;
let mut errors = None;
for read_fn in read {
if let Err(err) = read_fn() {
errors.get_or_insert_with(Vec::new).push(err);
}
}
if let Some(err_list) = errors {
for error in err_list {
eprintln!("{error:?}");
}
return Err(eyre::eyre!(
"If there are missing types, make sure to run `reth test-vectors compact --write` first.\n
If it happened during CI, ignore IF it's a new proposed type that `main` branch does not have."
));
}
Ok(())
}
/// Generates test vectors for a specific type `T`.
pub fn generate_vector<T>(runner: &mut TestRunner) -> Result<()>
where
T: for<'a> Arbitrary<'a> + reth_codecs::Compact,
{
let type_name = type_name::<T>();
print!("{}", &type_name);
let mut bytes = std::iter::repeat_n(0u8, 256).collect::<Vec<u8>>();
let mut compact_buffer = vec![];
let mut values = Vec::with_capacity(VECTOR_SIZE);
for _ in 0..VECTOR_SIZE {
runner.rng().fill_bytes(&mut bytes);
compact_buffer.clear();
// Sometimes type T, might require extra arbitrary data, so we retry it a few times.
let mut tries = 0;
let obj = loop {
match T::arbitrary(&mut arbitrary::Unstructured::new(&bytes)) {
Ok(obj) => break obj,
Err(err) => {
if tries < 5 && matches!(err, arbitrary::Error::NotEnoughData) {
tries += 1;
bytes.extend(std::iter::repeat_n(0u8, 256));
} else {
return Err(err)?
}
}
}
};
let res = obj.to_compact(&mut compact_buffer);
if IDENTIFIER_TYPE.contains(&type_name) {
compact_buffer.push(res as u8);
}
values.push(hex::encode(&compact_buffer));
}
serde_json::to_writer(
std::io::BufWriter::new(
std::fs::File::create(format!("{VECTORS_FOLDER}/{}.json", &type_name)).unwrap(),
),
&values,
)?;
println!(" ✅");
Ok(())
}
/// Reads a vector of type `T` from a file and compares each item with its reconstructed version
/// using `T::from_compact`.
pub fn read_vector<T>() -> Result<()>
where
T: reth_codecs::Compact,
{
let type_name = type_name::<T>();
print!("{}", &type_name);
// Read the file where the vectors are stored
let file_path = format!("{VECTORS_FOLDER}/{}.json", &type_name);
let file =
File::open(&file_path).wrap_err_with(|| format!("Failed to open vector {type_name}."))?;
let reader = BufReader::new(file);
let stored_values: Vec<String> = serde_json::from_reader(reader)?;
let mut buffer = vec![];
for hex_str in stored_values {
let mut compact_bytes = hex::decode(hex_str)?;
let mut identifier = None;
buffer.clear();
if IDENTIFIER_TYPE.contains(&type_name) {
identifier = compact_bytes.pop().map(|b| b as usize);
}
let len_or_identifier = identifier.unwrap_or(compact_bytes.len());
let (reconstructed, _) = T::from_compact(&compact_bytes, len_or_identifier);
reconstructed.to_compact(&mut buffer);
assert_eq!(buffer, compact_bytes, "mismatch {type_name}");
}
println!(" ✅");
Ok(())
}
/// Returns the type name for the given type.
pub fn type_name<T>() -> String {
// With alloy type transition <https://github.com/paradigmxyz/reth/pull/15768> the types are renamed, we map them here to the original name so that test vector files remain consistent
let name = std::any::type_name::<T>();
match name {
"alloy_consensus::transaction::typed::EthereumTypedTransaction<alloy_consensus::transaction::eip4844::TxEip4844>" => "Transaction".to_string(),
"alloy_consensus::transaction::envelope::EthereumTxEnvelope<alloy_consensus::transaction::eip4844::TxEip4844>" => "TransactionSigned".to_string(),
name => {
name.split("::").last().unwrap_or(std::any::type_name::<T>()).to_string()
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/test_vectors/mod.rs | crates/cli/commands/src/test_vectors/mod.rs | //! Command for generating test vectors.
use clap::{Parser, Subcommand};
pub mod compact;
pub mod tables;
/// Generate test-vectors for different data types.
#[derive(Debug, Parser)]
pub struct Command {
#[command(subcommand)]
command: Subcommands,
}
#[derive(Subcommand, Debug)]
/// `reth test-vectors` subcommands
pub enum Subcommands {
/// Generates test vectors for specified tables. If no table is specified, generate for all.
Tables {
/// List of table names. Case-sensitive.
names: Vec<String>,
},
/// Randomly generate test vectors for each `Compact` type using the `--write` flag.
///
/// The generated vectors are serialized in both `json` and `Compact` formats and saved to a
/// file.
///
/// Use the `--read` flag to read and validate the previously generated vectors from a file.
#[group(multiple = false, required = true)]
Compact {
/// Write test vectors to a file.
#[arg(long)]
write: bool,
/// Read test vectors from a file.
#[arg(long)]
read: bool,
},
}
impl Command {
/// Execute the command
pub async fn execute(self) -> eyre::Result<()> {
match self.command {
Subcommands::Tables { names } => {
tables::generate_vectors(names)?;
}
Subcommands::Compact { write, .. } => {
if write {
compact::generate_vectors()?;
} else {
compact::read_vectors()?;
}
}
}
Ok(())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/test_vectors/tables.rs | crates/cli/commands/src/test_vectors/tables.rs | use alloy_consensus::Header;
use alloy_primitives::{hex, B256};
use arbitrary::Arbitrary;
use eyre::Result;
use proptest::{
prelude::ProptestConfig,
strategy::{Strategy, ValueTree},
test_runner::{TestRng, TestRunner},
};
use proptest_arbitrary_interop::arb;
use reth_db_api::{
table::{DupSort, Table, TableRow},
tables,
};
use reth_ethereum_primitives::TransactionSigned;
use reth_fs_util as fs;
use std::collections::HashSet;
use tracing::error;
const VECTORS_FOLDER: &str = "testdata/micro/db";
const PER_TABLE: usize = 1000;
/// Generates test vectors for specified `tables`. If list is empty, then generate for all tables.
pub fn generate_vectors(mut tables: Vec<String>) -> Result<()> {
// Prepare random seed for test (same method as used by proptest)
let seed = B256::random();
println!("Seed for table test vectors: {:?}", hex::encode_prefixed(seed));
// Start the runner with the seed
let config = ProptestConfig::default();
let rng = TestRng::from_seed(config.rng_algorithm, &seed.0);
let mut runner = TestRunner::new_with_rng(config, rng);
fs::create_dir_all(VECTORS_FOLDER)?;
macro_rules! generate_vector {
($table_type:ident$(<$($generic:ident),+>)?, $per_table:expr, TABLE) => {
generate_table_vector::<tables::$table_type$(<$($generic),+>)?>(&mut runner, $per_table)?;
};
($table_type:ident$(<$($generic:ident),+>)?, $per_table:expr, DUPSORT) => {
generate_dupsort_vector::<tables::$table_type$(<$($generic),+>)?>(&mut runner, $per_table)?;
};
}
macro_rules! generate {
([$(($table_type:ident$(<$($generic:ident),+>)?, $per_table:expr, $table_or_dup:tt)),*]) => {
let all_tables = vec![$(stringify!($table_type).to_string(),)*];
if tables.is_empty() {
tables = all_tables;
}
for table in tables {
match table.as_str() {
$(
stringify!($table_type) => {
println!("Generating test vectors for {} <{}>.", stringify!($table_or_dup), tables::$table_type$(::<$($generic),+>)?::NAME);
generate_vector!($table_type$(<$($generic),+>)?, $per_table, $table_or_dup);
},
)*
_ => {
error!(target: "reth::cli", "Unknown table: {}", table);
}
}
}
}
}
generate!([
(CanonicalHeaders, PER_TABLE, TABLE),
(HeaderTerminalDifficulties, PER_TABLE, TABLE),
(HeaderNumbers, PER_TABLE, TABLE),
(Headers<Header>, PER_TABLE, TABLE),
(BlockBodyIndices, PER_TABLE, TABLE),
(BlockOmmers<Header>, 100, TABLE),
(TransactionHashNumbers, PER_TABLE, TABLE),
(Transactions<TransactionSigned>, 100, TABLE),
(PlainStorageState, PER_TABLE, DUPSORT),
(PlainAccountState, PER_TABLE, TABLE)
]);
Ok(())
}
/// Generates test-vectors for normal tables. Keys are sorted and not repeated.
fn generate_table_vector<T>(runner: &mut TestRunner, per_table: usize) -> Result<()>
where
T: Table,
T::Key: for<'a> Arbitrary<'a> + serde::Serialize + Ord + std::hash::Hash + Clone,
T::Value: for<'a> Arbitrary<'a> + serde::Serialize + Clone,
{
let mut rows = vec![];
let mut seen_keys = HashSet::new();
let strategy =
proptest::collection::vec(arb::<TableRow<T>>(), per_table - rows.len()).no_shrink().boxed();
while rows.len() < per_table {
// Generate all `per_table` rows: (Key, Value)
rows.extend(
&mut strategy
.new_tree(runner)
.map_err(|e| eyre::eyre!("{e}"))?
.current()
.into_iter()
.filter(|e| seen_keys.insert(e.0.clone())),
);
}
// Sort them by `Key`
rows.sort_by(|a, b| a.0.cmp(&b.0));
save_to_file::<T>(rows)
}
/// Generates test-vectors for DUPSORT tables. Each key has multiple (subkey, value). Keys and
/// subkeys are sorted.
fn generate_dupsort_vector<T>(runner: &mut TestRunner, per_table: usize) -> Result<()>
where
T: Table + DupSort,
T::Key: for<'a> Arbitrary<'a> + serde::Serialize + Ord + std::hash::Hash + Clone,
T::Value: for<'a> Arbitrary<'a> + serde::Serialize + Ord + Clone,
{
let mut rows = vec![];
// We want to control our repeated keys
let mut seen_keys = HashSet::new();
let start_values = proptest::collection::vec(arb::<T::Value>(), 100..300).no_shrink().boxed();
let start_keys = arb::<T::Key>().no_shrink().boxed();
while rows.len() < per_table {
let key: T::Key = start_keys.new_tree(runner).map_err(|e| eyre::eyre!("{e}"))?.current();
if !seen_keys.insert(key.clone()) {
continue
}
let mut values: Vec<T::Value> =
start_values.new_tree(runner).map_err(|e| eyre::eyre!("{e}"))?.current();
values.sort();
for value in values {
rows.push((key.clone(), value));
}
}
// Sort them by `Key`
rows.sort_by(|a, b| a.0.cmp(&b.0));
save_to_file::<T>(rows)
}
/// Save rows to file.
fn save_to_file<T: Table>(rows: Vec<TableRow<T>>) -> eyre::Result<()>
where
T::Key: serde::Serialize,
T::Value: serde::Serialize,
{
serde_json::to_writer_pretty(
std::io::BufWriter::new(
std::fs::File::create(format!("{VECTORS_FOLDER}/{}.json", T::NAME)).unwrap(),
),
&rows,
)
.map_err(|e| eyre::eyre!({ e }))
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/p2p/bootnode.rs | crates/cli/commands/src/p2p/bootnode.rs | //! Standalone bootnode command
use clap::Parser;
use reth_discv4::{DiscoveryUpdate, Discv4, Discv4Config};
use reth_discv5::{discv5::Event, Config, Discv5};
use reth_net_nat::NatResolver;
use reth_network_peers::NodeRecord;
use std::{net::SocketAddr, str::FromStr};
use tokio::select;
use tokio_stream::StreamExt;
use tracing::info;
/// Start a discovery only bootnode.
#[derive(Parser, Debug)]
pub struct Command {
/// Listen address for the bootnode (default: ":30301").
#[arg(long, default_value = ":30301")]
pub addr: String,
/// Generate a new node key and save it to the specified file.
#[arg(long, default_value = "")]
pub gen_key: String,
/// Private key filename for the node.
#[arg(long, default_value = "")]
pub node_key: String,
/// NAT resolution method (any|none|upnp|publicip|extip:\<IP\>)
#[arg(long, default_value = "any")]
pub nat: NatResolver,
/// Run a v5 topic discovery bootnode.
#[arg(long)]
pub v5: bool,
}
impl Command {
/// Execute the bootnode command.
pub async fn execute(self) -> eyre::Result<()> {
info!("Bootnode started with config: {:?}", self);
let sk = reth_network::config::rng_secret_key();
let socket_addr = SocketAddr::from_str(&self.addr)?;
let local_enr = NodeRecord::from_secret_key(socket_addr, &sk);
let config = Discv4Config::builder().external_ip_resolver(Some(self.nat)).build();
let (_discv4, mut discv4_service) =
Discv4::bind(socket_addr, local_enr, sk, config).await?;
info!("Started discv4 at address:{:?}", socket_addr);
let mut discv4_updates = discv4_service.update_stream();
discv4_service.spawn();
// Optional discv5 update event listener if v5 is enabled
let mut discv5_updates = None;
if self.v5 {
info!("Starting discv5");
let config = Config::builder(socket_addr).build();
let (_discv5, updates, _local_enr_discv5) = Discv5::start(&sk, config).await?;
discv5_updates = Some(updates);
};
// event info loop for logging
loop {
select! {
//discv4 updates
update = discv4_updates.next() => {
if let Some(update) = update {
match update {
DiscoveryUpdate::Added(record) => {
info!("(Discv4) new peer added, peer_id={:?}", record.id);
}
DiscoveryUpdate::Removed(peer_id) => {
info!("(Discv4) peer with peer-id={:?} removed", peer_id);
}
_ => {}
}
} else {
info!("(Discv4) update stream ended.");
break;
}
}
//if discv5, discv5 update stream, else do nothing
update = async {
if let Some(updates) = &mut discv5_updates {
updates.recv().await
} else {
futures::future::pending().await
}
} => {
if let Some(update) = update {
if let Event::SessionEstablished(enr, _) = update {
info!("(Discv5) new peer added, peer_id={:?}", enr.id());
}
} else {
info!("(Discv5) update stream ended.");
break;
}
}
}
}
Ok(())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/p2p/rlpx.rs | crates/cli/commands/src/p2p/rlpx.rs | //! RLPx subcommand of P2P Debugging tool.
use clap::{Parser, Subcommand};
use reth_ecies::stream::ECIESStream;
use reth_eth_wire::{HelloMessage, UnauthedP2PStream};
use reth_network::config::rng_secret_key;
use reth_network_peers::{pk2id, AnyNode};
use secp256k1::SECP256K1;
use tokio::net::TcpStream;
/// RLPx commands
#[derive(Parser, Debug)]
pub struct Command {
#[command(subcommand)]
subcommand: Subcommands,
}
impl Command {
// Execute `p2p rlpx` command.
pub async fn execute(self) -> eyre::Result<()> {
match self.subcommand {
Subcommands::Ping { node } => {
let key = rng_secret_key();
let node_record = node
.node_record()
.ok_or_else(|| eyre::eyre!("failed to parse node {}", node))?;
let outgoing =
TcpStream::connect((node_record.address, node_record.tcp_port)).await?;
let ecies_stream = ECIESStream::connect(outgoing, key, node_record.id).await?;
let peer_id = pk2id(&key.public_key(SECP256K1));
let hello = HelloMessage::builder(peer_id).build();
let (_, their_hello) =
UnauthedP2PStream::new(ecies_stream).handshake(hello).await?;
println!("{their_hello:#?}");
}
}
Ok(())
}
}
#[derive(Subcommand, Debug)]
enum Subcommands {
/// ping node
Ping {
/// The node to ping.
node: AnyNode,
},
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/commands/src/p2p/mod.rs | crates/cli/commands/src/p2p/mod.rs | //! P2P Debugging tool
use std::{path::PathBuf, sync::Arc};
use crate::common::CliNodeTypes;
use alloy_eips::BlockHashOrNumber;
use backon::{ConstantBuilder, Retryable};
use clap::{Parser, Subcommand};
use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks};
use reth_cli::chainspec::ChainSpecParser;
use reth_cli_util::{get_secret_key, hash_or_num_value_parser};
use reth_config::Config;
use reth_network::{BlockDownloaderProvider, NetworkConfigBuilder};
use reth_network_p2p::bodies::client::BodiesClient;
use reth_node_core::{
args::{DatadirArgs, NetworkArgs},
utils::get_single_header,
};
pub mod bootnode;
pub mod rlpx;
/// `reth p2p` command
#[derive(Debug, Parser)]
pub struct Command<C: ChainSpecParser> {
#[command(subcommand)]
command: Subcommands<C>,
}
impl<C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>> Command<C> {
/// Execute `p2p` command
pub async fn execute<N: CliNodeTypes<ChainSpec = C::ChainSpec>>(self) -> eyre::Result<()> {
match self.command {
Subcommands::Header { args, id } => {
let handle = args.launch_network::<N>().await?;
let fetch_client = handle.fetch_client().await?;
let backoff = args.backoff();
let header = (move || get_single_header(fetch_client.clone(), id))
.retry(backoff)
.notify(|err, _| println!("Error requesting header: {err}. Retrying..."))
.await?;
println!("Successfully downloaded header: {header:?}");
}
Subcommands::Body { args, id } => {
let handle = args.launch_network::<N>().await?;
let fetch_client = handle.fetch_client().await?;
let backoff = args.backoff();
let hash = match id {
BlockHashOrNumber::Hash(hash) => hash,
BlockHashOrNumber::Number(number) => {
println!("Block number provided. Downloading header first...");
let client = fetch_client.clone();
let header = (move || {
get_single_header(client.clone(), BlockHashOrNumber::Number(number))
})
.retry(backoff)
.notify(|err, _| println!("Error requesting header: {err}. Retrying..."))
.await?;
header.hash()
}
};
let (_, result) = (move || {
let client = fetch_client.clone();
client.get_block_bodies(vec![hash])
})
.retry(backoff)
.notify(|err, _| println!("Error requesting block: {err}. Retrying..."))
.await?
.split();
if result.len() != 1 {
eyre::bail!(
"Invalid number of headers received. Expected: 1. Received: {}",
result.len()
)
}
let body = result.into_iter().next().unwrap();
println!("Successfully downloaded body: {body:?}")
}
Subcommands::Rlpx(command) => {
command.execute().await?;
}
Subcommands::Bootnode(command) => {
command.execute().await?;
}
}
Ok(())
}
}
impl<C: ChainSpecParser> Command<C> {
/// Returns the underlying chain being used to run this command
pub fn chain_spec(&self) -> Option<&Arc<C::ChainSpec>> {
match &self.command {
Subcommands::Header { args, .. } => Some(&args.chain),
Subcommands::Body { args, .. } => Some(&args.chain),
Subcommands::Rlpx(_) => None,
Subcommands::Bootnode(_) => None,
}
}
}
/// `reth p2p` subcommands
#[derive(Subcommand, Debug)]
pub enum Subcommands<C: ChainSpecParser> {
/// Download block header
Header {
#[command(flatten)]
args: DownloadArgs<C>,
/// The header number or hash
#[arg(value_parser = hash_or_num_value_parser)]
id: BlockHashOrNumber,
},
/// Download block body
Body {
#[command(flatten)]
args: DownloadArgs<C>,
/// The block number or hash
#[arg(value_parser = hash_or_num_value_parser)]
id: BlockHashOrNumber,
},
// RLPx utilities
Rlpx(rlpx::Command),
/// Bootnode command
Bootnode(bootnode::Command),
}
#[derive(Debug, Clone, Parser)]
pub struct DownloadArgs<C: ChainSpecParser> {
/// The number of retries per request
#[arg(long, default_value = "5")]
retries: usize,
#[command(flatten)]
network: NetworkArgs,
#[command(flatten)]
datadir: DatadirArgs,
/// The path to the configuration file to use.
#[arg(long, value_name = "FILE", verbatim_doc_comment)]
config: Option<PathBuf>,
/// The chain this node is running.
///
/// Possible values are either a built-in chain or the path to a chain specification file.
#[arg(
long,
value_name = "CHAIN_OR_PATH",
long_help = C::help_message(),
default_value = C::SUPPORTED_CHAINS[0],
value_parser = C::parser()
)]
chain: Arc<C::ChainSpec>,
}
impl<C: ChainSpecParser> DownloadArgs<C> {
/// Creates and spawns the network and returns the handle.
pub async fn launch_network<N>(
&self,
) -> eyre::Result<reth_network::NetworkHandle<N::NetworkPrimitives>>
where
C::ChainSpec: EthChainSpec + Hardforks + EthereumHardforks + Send + Sync + 'static,
N: CliNodeTypes<ChainSpec = C::ChainSpec>,
{
let data_dir = self.datadir.clone().resolve_datadir(self.chain.chain());
let config_path = self.config.clone().unwrap_or_else(|| data_dir.config());
// Load configuration
let mut config = Config::from_path(&config_path).unwrap_or_default();
config.peers.trusted_nodes.extend(self.network.trusted_peers.clone());
if config.peers.trusted_nodes.is_empty() && self.network.trusted_only {
eyre::bail!(
"No trusted nodes. Set trusted peer with `--trusted-peer <enode record>` or set `--trusted-only` to `false`"
)
}
config.peers.trusted_nodes_only = self.network.trusted_only;
let default_secret_key_path = data_dir.p2p_secret();
let secret_key_path =
self.network.p2p_secret_key.clone().unwrap_or(default_secret_key_path);
let p2p_secret_key = get_secret_key(&secret_key_path)?;
let rlpx_socket = (self.network.addr, self.network.port).into();
let boot_nodes = self.chain.bootnodes().unwrap_or_default();
let net = NetworkConfigBuilder::<N::NetworkPrimitives>::new(p2p_secret_key)
.peer_config(config.peers_config_with_basic_nodes_from_file(None))
.external_ip_resolver(self.network.nat)
.boot_nodes(boot_nodes.clone())
.apply(|builder| {
self.network.discovery.apply_to_builder(builder, rlpx_socket, boot_nodes)
})
.build_with_noop_provider(self.chain.clone())
.manager()
.await?;
let handle = net.handle().clone();
tokio::task::spawn(net);
Ok(handle)
}
pub fn backoff(&self) -> ConstantBuilder {
ConstantBuilder::default().with_max_times(self.retries.max(1))
}
}
#[cfg(test)]
mod tests {
use super::*;
use reth_ethereum_cli::chainspec::EthereumChainSpecParser;
#[test]
fn parse_header_cmd() {
let _args: Command<EthereumChainSpecParser> =
Command::parse_from(["reth", "header", "--chain", "mainnet", "1000"]);
}
#[test]
fn parse_body_cmd() {
let _args: Command<EthereumChainSpecParser> =
Command::parse_from(["reth", "body", "--chain", "mainnet", "1000"]);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/runner/src/lib.rs | crates/cli/runner/src/lib.rs | //! A tokio based CLI runner.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
//! Entrypoint for running commands.
use reth_tasks::{TaskExecutor, TaskManager};
use std::{future::Future, pin::pin, sync::mpsc, time::Duration};
use tracing::{debug, error, trace};
/// Executes CLI commands.
///
/// Provides utilities for running a cli command to completion.
#[derive(Debug)]
#[non_exhaustive]
pub struct CliRunner {
tokio_runtime: tokio::runtime::Runtime,
}
impl CliRunner {
/// Attempts to create a new [`CliRunner`] using the default tokio
/// [`Runtime`](tokio::runtime::Runtime).
///
/// The default tokio runtime is multi-threaded, with both I/O and time drivers enabled.
pub fn try_default_runtime() -> Result<Self, std::io::Error> {
Ok(Self { tokio_runtime: tokio_runtime()? })
}
/// Create a new [`CliRunner`] from a provided tokio [`Runtime`](tokio::runtime::Runtime).
pub const fn from_runtime(tokio_runtime: tokio::runtime::Runtime) -> Self {
Self { tokio_runtime }
}
}
// === impl CliRunner ===
impl CliRunner {
/// Executes the given _async_ command on the tokio runtime until the command future resolves or
/// until the process receives a `SIGINT` or `SIGTERM` signal.
///
/// Tasks spawned by the command via the [`TaskExecutor`] are shut down and an attempt is made
/// to drive their shutdown to completion after the command has finished.
pub fn run_command_until_exit<F, E>(
self,
command: impl FnOnce(CliContext) -> F,
) -> Result<(), E>
where
F: Future<Output = Result<(), E>>,
E: Send + Sync + From<std::io::Error> + From<reth_tasks::PanickedTaskError> + 'static,
{
let AsyncCliRunner { context, mut task_manager, tokio_runtime } =
AsyncCliRunner::new(self.tokio_runtime);
// Executes the command until it finished or ctrl-c was fired
let command_res = tokio_runtime.block_on(run_to_completion_or_panic(
&mut task_manager,
run_until_ctrl_c(command(context)),
));
if command_res.is_err() {
error!(target: "reth::cli", "shutting down due to error");
} else {
debug!(target: "reth::cli", "shutting down gracefully");
// after the command has finished or exit signal was received we shutdown the task
// manager which fires the shutdown signal to all tasks spawned via the task
// executor and awaiting on tasks spawned with graceful shutdown
task_manager.graceful_shutdown_with_timeout(Duration::from_secs(5));
}
// `drop(tokio_runtime)` would block the current thread until its pools
// (including blocking pool) are shutdown. Since we want to exit as soon as possible, drop
// it on a separate thread and wait for up to 5 seconds for this operation to
// complete.
let (tx, rx) = mpsc::channel();
std::thread::Builder::new()
.name("tokio-runtime-shutdown".to_string())
.spawn(move || {
drop(tokio_runtime);
let _ = tx.send(());
})
.unwrap();
let _ = rx.recv_timeout(Duration::from_secs(5)).inspect_err(|err| {
debug!(target: "reth::cli", %err, "tokio runtime shutdown timed out");
});
command_res
}
/// Executes a regular future until completion or until external signal received.
pub fn run_until_ctrl_c<F, E>(self, fut: F) -> Result<(), E>
where
F: Future<Output = Result<(), E>>,
E: Send + Sync + From<std::io::Error> + 'static,
{
self.tokio_runtime.block_on(run_until_ctrl_c(fut))?;
Ok(())
}
/// Executes a regular future as a spawned blocking task until completion or until external
/// signal received.
///
/// See [`Runtime::spawn_blocking`](tokio::runtime::Runtime::spawn_blocking) .
pub fn run_blocking_until_ctrl_c<F, E>(self, fut: F) -> Result<(), E>
where
F: Future<Output = Result<(), E>> + Send + 'static,
E: Send + Sync + From<std::io::Error> + 'static,
{
let tokio_runtime = self.tokio_runtime;
let handle = tokio_runtime.handle().clone();
let fut = tokio_runtime.handle().spawn_blocking(move || handle.block_on(fut));
tokio_runtime
.block_on(run_until_ctrl_c(async move { fut.await.expect("Failed to join task") }))?;
// drop the tokio runtime on a separate thread because drop blocks until its pools
// (including blocking pool) are shutdown. In other words `drop(tokio_runtime)` would block
// the current thread but we want to exit right away.
std::thread::Builder::new()
.name("tokio-runtime-shutdown".to_string())
.spawn(move || drop(tokio_runtime))
.unwrap();
Ok(())
}
}
/// [`CliRunner`] configuration when executing commands asynchronously
struct AsyncCliRunner {
context: CliContext,
task_manager: TaskManager,
tokio_runtime: tokio::runtime::Runtime,
}
// === impl AsyncCliRunner ===
impl AsyncCliRunner {
/// Given a tokio [`Runtime`](tokio::runtime::Runtime), creates additional context required to
/// execute commands asynchronously.
fn new(tokio_runtime: tokio::runtime::Runtime) -> Self {
let task_manager = TaskManager::new(tokio_runtime.handle().clone());
let task_executor = task_manager.executor();
Self { context: CliContext { task_executor }, task_manager, tokio_runtime }
}
}
/// Additional context provided by the [`CliRunner`] when executing commands
#[derive(Debug)]
pub struct CliContext {
/// Used to execute/spawn tasks
pub task_executor: TaskExecutor,
}
/// Creates a new default tokio multi-thread [Runtime](tokio::runtime::Runtime) with all features
/// enabled
pub fn tokio_runtime() -> Result<tokio::runtime::Runtime, std::io::Error> {
tokio::runtime::Builder::new_multi_thread().enable_all().build()
}
/// Runs the given future to completion or until a critical task panicked.
///
/// Returns the error if a task panicked, or the given future returned an error.
async fn run_to_completion_or_panic<F, E>(tasks: &mut TaskManager, fut: F) -> Result<(), E>
where
F: Future<Output = Result<(), E>>,
E: Send + Sync + From<reth_tasks::PanickedTaskError> + 'static,
{
{
let fut = pin!(fut);
tokio::select! {
task_manager_result = tasks => {
if let Err(panicked_error) = task_manager_result {
return Err(panicked_error.into());
}
},
res = fut => res?,
}
}
Ok(())
}
/// Runs the future to completion or until:
/// - `ctrl-c` is received.
/// - `SIGTERM` is received (unix only).
async fn run_until_ctrl_c<F, E>(fut: F) -> Result<(), E>
where
F: Future<Output = Result<(), E>>,
E: Send + Sync + 'static + From<std::io::Error>,
{
let ctrl_c = tokio::signal::ctrl_c();
#[cfg(unix)]
{
let mut stream = tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate())?;
let sigterm = stream.recv();
let sigterm = pin!(sigterm);
let ctrl_c = pin!(ctrl_c);
let fut = pin!(fut);
tokio::select! {
_ = ctrl_c => {
trace!(target: "reth::cli", "Received ctrl-c");
},
_ = sigterm => {
trace!(target: "reth::cli", "Received SIGTERM");
},
res = fut => res?,
}
}
#[cfg(not(unix))]
{
let ctrl_c = pin!(ctrl_c);
let fut = pin!(fut);
tokio::select! {
_ = ctrl_c => {
trace!(target: "reth::cli", "Received ctrl-c");
},
res = fut => res?,
}
}
Ok(())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/cli/src/lib.rs | crates/cli/cli/src/lib.rs | //! Cli abstraction for reth based nodes.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
use clap::{Error, Parser};
use reth_cli_runner::CliRunner;
use reth_db::ClientVersion;
use std::{borrow::Cow, ffi::OsString};
/// The chainspec module defines the different chainspecs that can be used by the node.
pub mod chainspec;
use crate::chainspec::ChainSpecParser;
/// Reth based node cli.
///
/// This trait is supposed to be implemented by the main struct of the CLI.
///
/// It provides commonly used functionality for running commands and information about the CL, such
/// as the name and version.
pub trait RethCli: Sized {
/// The associated `ChainSpecParser` type
type ChainSpecParser: ChainSpecParser;
/// The name of the implementation, eg. `reth`, `op-reth`, etc.
fn name(&self) -> Cow<'static, str>;
/// The version of the node, such as `reth/v1.0.0`
fn version(&self) -> Cow<'static, str>;
/// Parse args from iterator from [`std::env::args_os()`].
fn parse_args() -> Result<Self, Error>
where
Self: Parser,
{
<Self as RethCli>::try_parse_from(std::env::args_os())
}
/// Parse args from the given iterator.
fn try_parse_from<I, T>(itr: I) -> Result<Self, Error>
where
Self: Parser,
I: IntoIterator<Item = T>,
T: Into<OsString> + Clone,
{
<Self as Parser>::try_parse_from(itr)
}
/// Executes a command.
fn with_runner<F, R>(self, f: F, runner: CliRunner) -> R
where
F: FnOnce(Self, CliRunner) -> R,
{
f(self, runner)
}
/// Parses and executes a command.
fn execute<F, R>(f: F) -> Result<R, Error>
where
Self: Parser,
F: FnOnce(Self, CliRunner) -> R,
{
let cli = Self::parse_args()?;
let runner = CliRunner::try_default_runtime()?;
Ok(cli.with_runner(f, runner))
}
/// The client version of the node.
fn client_version() -> ClientVersion;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/cli/cli/src/chainspec.rs | crates/cli/cli/src/chainspec.rs | use std::{fs, path::PathBuf, sync::Arc};
use clap::builder::TypedValueParser;
#[derive(Debug, Clone)]
struct Parser<C>(std::marker::PhantomData<C>);
impl<C: ChainSpecParser> TypedValueParser for Parser<C> {
type Value = Arc<C::ChainSpec>;
fn parse_ref(
&self,
_cmd: &clap::Command,
arg: Option<&clap::Arg>,
value: &std::ffi::OsStr,
) -> Result<Self::Value, clap::Error> {
let val =
value.to_str().ok_or_else(|| clap::Error::new(clap::error::ErrorKind::InvalidUtf8))?;
C::parse(val).map_err(|err| {
let arg = arg.map(|a| a.to_string()).unwrap_or_else(|| "...".to_owned());
let possible_values = C::SUPPORTED_CHAINS.join(",");
let msg = format!(
"Invalid value '{val}' for {arg}: {err}.\n [possible values: {possible_values}]"
);
clap::Error::raw(clap::error::ErrorKind::InvalidValue, msg)
})
}
}
/// Trait for parsing chain specifications.
///
/// This trait extends [`clap::builder::TypedValueParser`] to provide a parser for chain
/// specifications. Implementers of this trait must provide a list of supported chains and a
/// function to parse a given string into a chain spec.
pub trait ChainSpecParser: Clone + Send + Sync + 'static {
/// The chain specification type.
type ChainSpec: std::fmt::Debug + Send + Sync;
/// List of supported chains.
const SUPPORTED_CHAINS: &'static [&'static str];
/// Parses the given string into a chain spec.
///
/// # Arguments
///
/// * `s` - A string slice that holds the chain spec to be parsed.
///
/// # Errors
///
/// This function will return an error if the input string cannot be parsed into a valid
/// chain spec.
fn parse(s: &str) -> eyre::Result<Arc<Self::ChainSpec>>;
/// Produces a [`TypedValueParser`] for this chain spec parser.
fn parser() -> impl TypedValueParser<Value = Arc<Self::ChainSpec>> {
Parser(std::marker::PhantomData::<Self>)
}
/// Produces a help message for the chain spec argument.
fn help_message() -> String {
format!(
"The chain this node is running.\nPossible values are either a built-in chain or the path to a chain specification file.\n\nBuilt-in chains:\n {}",
Self::SUPPORTED_CHAINS.join(", ")
)
}
}
/// A helper to parse a [`Genesis`](seismic_alloy_genesis::Genesis) as argument or from disk.
pub fn parse_genesis(s: &str) -> eyre::Result<seismic_alloy_genesis::Genesis> {
// try to read json from path first
let raw = match fs::read_to_string(PathBuf::from(shellexpand::full(s)?.into_owned())) {
Ok(raw) => raw,
Err(io_err) => {
// valid json may start with "\n", but must contain "{"
if s.contains('{') {
s.to_string()
} else {
return Err(io_err.into()) // assume invalid path
}
}
};
Ok(serde_json::from_str(&raw)?)
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/chainspec/src/lib.rs | crates/chainspec/src/lib.rs | //! The spec of an Ethereum network
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(feature = "std"), no_std)]
use alloy_genesis as _;
extern crate alloc;
/// Chain specific constants
mod constants;
pub use constants::*;
mod api;
/// The chain info module.
mod info;
/// The chain spec module.
mod spec;
pub use alloy_chains::{Chain, ChainKind, NamedChain};
/// Re-export for convenience
pub use reth_ethereum_forks::*;
pub use api::EthChainSpec;
pub use info::ChainInfo;
#[cfg(any(test, feature = "test-utils"))]
pub use spec::test_fork_ids;
pub use spec::{
make_genesis_header, BaseFeeParams, BaseFeeParamsKind, ChainSpec, ChainSpecBuilder,
ChainSpecProvider, DepositContract, ForkBaseFeeParams, DEV, HOLESKY, HOODI, MAINNET, SEPOLIA,
};
use reth_primitives_traits::sync::OnceLock;
/// Simple utility to create a thread-safe sync cell with a value set.
pub fn once_cell_set<T>(value: T) -> OnceLock<T> {
let once = OnceLock::new();
let _ = once.set(value);
once
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_primitives::U256;
use alloy_rlp::Encodable;
use std::str::FromStr;
#[test]
fn test_id() {
let chain = Chain::from(1234);
assert_eq!(chain.id(), 1234);
}
#[test]
fn test_named_id() {
let chain = Chain::from_named(NamedChain::Holesky);
assert_eq!(chain.id(), 17000);
}
#[test]
fn test_display_named_chain() {
let chain = Chain::from_named(NamedChain::Mainnet);
assert_eq!(format!("{chain}"), "mainnet");
}
#[test]
fn test_display_id_chain() {
let chain = Chain::from(1234);
assert_eq!(format!("{chain}"), "1234");
}
#[test]
fn test_from_u256() {
let n = U256::from(1234);
let chain = Chain::from(n.to::<u64>());
let expected = Chain::from(1234);
assert_eq!(chain, expected);
}
#[test]
fn test_into_u256() {
let chain = Chain::from_named(NamedChain::Holesky);
let n: U256 = U256::from(chain.id());
let expected = U256::from(17000);
assert_eq!(n, expected);
}
#[test]
fn test_from_str_named_chain() {
let result = Chain::from_str("mainnet");
let expected = Chain::from_named(NamedChain::Mainnet);
assert!(result.is_ok());
assert_eq!(result.unwrap(), expected);
}
#[test]
fn test_from_str_named_chain_error() {
let result = Chain::from_str("chain");
assert!(result.is_err());
}
#[test]
fn test_from_str_id_chain() {
let result = Chain::from_str("1234");
let expected = Chain::from(1234);
assert!(result.is_ok());
assert_eq!(result.unwrap(), expected);
}
#[test]
fn test_default() {
let default = Chain::default();
let expected = Chain::from_named(NamedChain::Mainnet);
assert_eq!(default, expected);
}
#[test]
fn test_id_chain_encodable_length() {
let chain = Chain::from(1234);
assert_eq!(chain.length(), 3);
}
#[test]
fn test_dns_main_network() {
let s = "enrtree://AKA3AM6LPBYEUDMVNU3BSVQJ5AD45Y7YPOHJLEF6W26QOE4VTUDPE@all.mainnet.ethdisco.net";
let chain: Chain = NamedChain::Mainnet.into();
assert_eq!(s, chain.public_dns_network_protocol().unwrap().as_str());
}
#[test]
fn test_dns_holesky_network() {
let s = "enrtree://AKA3AM6LPBYEUDMVNU3BSVQJ5AD45Y7YPOHJLEF6W26QOE4VTUDPE@all.holesky.ethdisco.net";
let chain: Chain = NamedChain::Holesky.into();
assert_eq!(s, chain.public_dns_network_protocol().unwrap().as_str());
}
#[test]
fn test_centralized_base_fee_calculation() {
use crate::{ChainSpec, EthChainSpec};
use alloy_consensus::Header;
use alloy_eips::eip1559::INITIAL_BASE_FEE;
fn parent_header() -> Header {
Header {
gas_used: 15_000_000,
gas_limit: 30_000_000,
base_fee_per_gas: Some(INITIAL_BASE_FEE),
timestamp: 1_000,
..Default::default()
}
}
let spec = ChainSpec::default();
let parent = parent_header();
// For testing, assume next block has timestamp 12 seconds later
let next_timestamp = parent.timestamp + 12;
let expected = parent
.next_block_base_fee(spec.base_fee_params_at_timestamp(next_timestamp))
.unwrap_or_default();
let got = spec.next_block_base_fee(&parent, next_timestamp).unwrap_or_default();
assert_eq!(expected, got, "Base fee calculation does not match expected value");
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/chainspec/src/info.rs | crates/chainspec/src/info.rs | use alloy_eips::BlockNumHash;
use alloy_primitives::{BlockNumber, B256};
/// Current status of the blockchain's head.
#[derive(Default, Copy, Clone, Debug, Eq, PartialEq)]
pub struct ChainInfo {
/// The block hash of the highest fully synced block.
pub best_hash: B256,
/// The block number of the highest fully synced block.
pub best_number: BlockNumber,
}
impl From<ChainInfo> for BlockNumHash {
fn from(value: ChainInfo) -> Self {
Self { number: value.best_number, hash: value.best_hash }
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/chainspec/src/api.rs | crates/chainspec/src/api.rs | use crate::{ChainSpec, DepositContract};
use alloc::{boxed::Box, vec::Vec};
use alloy_chains::Chain;
use alloy_consensus::Header;
use alloy_eips::{calc_next_block_base_fee, eip1559::BaseFeeParams, eip7840::BlobParams};
use alloy_primitives::{B256, U256};
use core::fmt::{Debug, Display};
use reth_ethereum_forks::EthereumHardforks;
use reth_network_peers::NodeRecord;
use reth_primitives_traits::{AlloyBlockHeader, BlockHeader};
use seismic_alloy_genesis::Genesis;
/// Trait representing type configuring a chain spec.
#[auto_impl::auto_impl(&, Arc)]
pub trait EthChainSpec: Send + Sync + Unpin + Debug {
/// The header type of the network.
type Header: BlockHeader;
/// Returns the [`Chain`] object this spec targets.
fn chain(&self) -> Chain;
/// Returns the chain id number
fn chain_id(&self) -> u64 {
self.chain().id()
}
/// Get the [`BaseFeeParams`] for the chain at the given timestamp.
fn base_fee_params_at_timestamp(&self, timestamp: u64) -> BaseFeeParams;
/// Get the [`BlobParams`] for the given timestamp
fn blob_params_at_timestamp(&self, timestamp: u64) -> Option<BlobParams>;
/// Returns the deposit contract data for the chain, if it's present
fn deposit_contract(&self) -> Option<&DepositContract>;
/// The genesis hash.
fn genesis_hash(&self) -> B256;
/// The delete limit for pruner, per run.
fn prune_delete_limit(&self) -> usize;
/// Returns a string representation of the hardforks.
fn display_hardforks(&self) -> Box<dyn Display>;
/// The genesis header.
fn genesis_header(&self) -> &Self::Header;
/// The genesis block specification.
fn genesis(&self) -> &Genesis;
/// The bootnodes for the chain, if any.
fn bootnodes(&self) -> Option<Vec<NodeRecord>>;
/// Returns `true` if this chain contains Optimism configuration.
fn is_optimism(&self) -> bool {
self.chain().is_optimism()
}
/// Returns `true` if this chain contains Ethereum configuration.
fn is_ethereum(&self) -> bool {
self.chain().is_ethereum()
}
/// Returns the final total difficulty if the Paris hardfork is known.
fn final_paris_total_difficulty(&self) -> Option<U256>;
/// See [`calc_next_block_base_fee`].
fn next_block_base_fee(&self, parent: &Self::Header, target_timestamp: u64) -> Option<u64> {
Some(calc_next_block_base_fee(
parent.gas_used(),
parent.gas_limit(),
parent.base_fee_per_gas()?,
self.base_fee_params_at_timestamp(target_timestamp),
))
}
}
impl EthChainSpec for ChainSpec {
type Header = Header;
fn chain(&self) -> Chain {
self.chain
}
fn base_fee_params_at_timestamp(&self, timestamp: u64) -> BaseFeeParams {
self.base_fee_params_at_timestamp(timestamp)
}
fn blob_params_at_timestamp(&self, timestamp: u64) -> Option<BlobParams> {
if let Some(blob_param) = self.blob_params.active_scheduled_params_at_timestamp(timestamp) {
Some(*blob_param)
} else if self.is_osaka_active_at_timestamp(timestamp) {
Some(self.blob_params.osaka)
} else if self.is_prague_active_at_timestamp(timestamp) {
Some(self.blob_params.prague)
} else if self.is_cancun_active_at_timestamp(timestamp) {
Some(self.blob_params.cancun)
} else {
None
}
}
fn deposit_contract(&self) -> Option<&DepositContract> {
self.deposit_contract.as_ref()
}
fn genesis_hash(&self) -> B256 {
self.genesis_hash()
}
fn prune_delete_limit(&self) -> usize {
self.prune_delete_limit
}
fn display_hardforks(&self) -> Box<dyn Display> {
Box::new(Self::display_hardforks(self))
}
fn genesis_header(&self) -> &Self::Header {
self.genesis_header()
}
fn genesis(&self) -> &Genesis {
self.genesis()
}
fn bootnodes(&self) -> Option<Vec<NodeRecord>> {
self.bootnodes()
}
fn is_optimism(&self) -> bool {
false
}
fn final_paris_total_difficulty(&self) -> Option<U256> {
self.paris_block_and_final_difficulty.map(|(_, final_difficulty)| final_difficulty)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/chainspec/src/constants.rs | crates/chainspec/src/constants.rs | use crate::spec::DepositContract;
use alloy_eips::eip6110::MAINNET_DEPOSIT_CONTRACT_ADDRESS;
use alloy_primitives::b256;
/// Gas per transaction not creating a contract.
pub const MIN_TRANSACTION_GAS: u64 = 21_000u64;
/// Mainnet prune delete limit.
pub const MAINNET_PRUNE_DELETE_LIMIT: usize = 20000;
/// Deposit contract address: `0x00000000219ab540356cbb839cbe05303d7705fa`
pub(crate) const MAINNET_DEPOSIT_CONTRACT: DepositContract = DepositContract::new(
MAINNET_DEPOSIT_CONTRACT_ADDRESS,
11052984,
b256!("0x649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5"),
);
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/chainspec/src/spec.rs | crates/chainspec/src/spec.rs | pub use alloy_eips::eip1559::BaseFeeParams;
use alloy_evm::eth::spec::EthExecutorSpec;
use crate::{
constants::{MAINNET_DEPOSIT_CONTRACT, MAINNET_PRUNE_DELETE_LIMIT},
EthChainSpec,
};
use alloc::{boxed::Box, sync::Arc, vec::Vec};
use alloy_chains::{Chain, NamedChain};
use alloy_consensus::{
constants::{
EMPTY_WITHDRAWALS, HOLESKY_GENESIS_HASH, HOODI_GENESIS_HASH, MAINNET_GENESIS_HASH,
SEPOLIA_GENESIS_HASH,
},
Header,
};
use alloy_eips::{
eip1559::INITIAL_BASE_FEE, eip7685::EMPTY_REQUESTS_HASH, eip7892::BlobScheduleBlobParams,
};
use alloy_primitives::{address, b256, Address, BlockNumber, B256, U256};
use alloy_seismic_evm::hardfork::{SeismicHardfork, SeismicHardforks};
use alloy_trie::root::state_root_ref_unhashed;
use core::fmt::Debug;
use derive_more::From;
use reth_ethereum_forks::{
ChainHardforks, DisplayHardforks, EthereumHardfork, EthereumHardforks, ForkCondition,
ForkFilter, ForkFilterKey, ForkHash, ForkId, Hardfork, Hardforks, Head, DEV_HARDFORKS,
};
use reth_network_peers::{
holesky_nodes, hoodi_nodes, mainnet_nodes, op_nodes, op_testnet_nodes, sepolia_nodes,
NodeRecord,
};
use reth_primitives_traits::{sync::LazyLock, SealedHeader};
use seismic_alloy_genesis::Genesis;
/// Helper method building a [`Header`] given [`Genesis`] and [`ChainHardforks`].
pub fn make_genesis_header(genesis: &Genesis, hardforks: &ChainHardforks) -> Header {
// If London is activated at genesis, we set the initial base fee as per EIP-1559.
let base_fee_per_gas = hardforks
.fork(EthereumHardfork::London)
.active_at_block(0)
.then(|| genesis.base_fee_per_gas.map(|fee| fee as u64).unwrap_or(INITIAL_BASE_FEE));
let genesis_timestamp_seconds = if cfg!(feature = "timestamp-in-seconds") {
genesis.timestamp
} else {
genesis.timestamp / 1000
};
// If shanghai is activated, initialize the header with an empty withdrawals hash, and
// empty withdrawals list.
let withdrawals_root = hardforks
.fork(EthereumHardfork::Shanghai)
.active_at_timestamp(genesis_timestamp_seconds)
.then_some(EMPTY_WITHDRAWALS);
// If Cancun is activated at genesis, we set:
// * parent beacon block root to 0x0
// * blob gas used to provided genesis or 0x0
// * excess blob gas to provided genesis or 0x0
let (parent_beacon_block_root, blob_gas_used, excess_blob_gas) = if hardforks
.fork(EthereumHardfork::Cancun)
.active_at_timestamp(genesis_timestamp_seconds)
{
let blob_gas_used = genesis.blob_gas_used.unwrap_or(0);
let excess_blob_gas = genesis.excess_blob_gas.unwrap_or(0);
(Some(B256::ZERO), Some(blob_gas_used), Some(excess_blob_gas))
} else {
(None, None, None)
};
// If Prague is activated at genesis we set requests root to an empty trie root.
let requests_hash = hardforks
.fork(EthereumHardfork::Prague)
.active_at_timestamp(genesis_timestamp_seconds)
.then_some(EMPTY_REQUESTS_HASH);
Header {
gas_limit: genesis.gas_limit,
difficulty: genesis.difficulty,
nonce: genesis.nonce.into(),
extra_data: genesis.extra_data.clone(),
state_root: state_root_ref_unhashed(&genesis.alloc),
timestamp: genesis.timestamp,
mix_hash: genesis.mix_hash,
beneficiary: genesis.coinbase,
base_fee_per_gas,
withdrawals_root,
parent_beacon_block_root,
blob_gas_used,
excess_blob_gas,
requests_hash,
..Default::default()
}
}
/// The Ethereum mainnet spec
pub static MAINNET: LazyLock<Arc<ChainSpec>> = LazyLock::new(|| {
let genesis = serde_json::from_str(include_str!("../res/genesis/mainnet.json"))
.expect("Can't deserialize Mainnet genesis json");
let hardforks = EthereumHardfork::mainnet().into();
let mut spec = ChainSpec {
chain: Chain::mainnet(),
genesis_header: SealedHeader::new(
make_genesis_header(&genesis, &hardforks),
MAINNET_GENESIS_HASH,
),
genesis,
// <https://etherscan.io/block/15537394>
paris_block_and_final_difficulty: Some((
15537394,
U256::from(58_750_003_716_598_352_816_469u128),
)),
hardforks,
// https://etherscan.io/tx/0xe75fb554e433e03763a1560646ee22dcb74e5274b34c5ad644e7c0f619a7e1d0
deposit_contract: Some(MAINNET_DEPOSIT_CONTRACT),
base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()),
prune_delete_limit: MAINNET_PRUNE_DELETE_LIMIT,
blob_params: BlobScheduleBlobParams::default(),
};
spec.genesis.config.dao_fork_support = true;
spec.into()
});
/// The Sepolia spec
pub static SEPOLIA: LazyLock<Arc<ChainSpec>> = LazyLock::new(|| {
let genesis = serde_json::from_str(include_str!("../res/genesis/sepolia.json"))
.expect("Can't deserialize Sepolia genesis json");
let hardforks = EthereumHardfork::sepolia().into();
let mut spec = ChainSpec {
chain: Chain::sepolia(),
genesis_header: SealedHeader::new(
make_genesis_header(&genesis, &hardforks),
SEPOLIA_GENESIS_HASH,
),
genesis,
// <https://sepolia.etherscan.io/block/1450409>
paris_block_and_final_difficulty: Some((1450409, U256::from(17_000_018_015_853_232u128))),
hardforks,
// https://sepolia.etherscan.io/tx/0x025ecbf81a2f1220da6285d1701dc89fb5a956b62562ee922e1a9efd73eb4b14
deposit_contract: Some(DepositContract::new(
address!("0x7f02c3e3c98b133055b8b348b2ac625669ed295d"),
1273020,
b256!("0x649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5"),
)),
base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()),
prune_delete_limit: 10000,
blob_params: BlobScheduleBlobParams::default(),
};
spec.genesis.config.dao_fork_support = true;
spec.into()
});
/// The Holesky spec
pub static HOLESKY: LazyLock<Arc<ChainSpec>> = LazyLock::new(|| {
let genesis = serde_json::from_str(include_str!("../res/genesis/holesky.json"))
.expect("Can't deserialize Holesky genesis json");
let hardforks = EthereumHardfork::holesky().into();
let mut spec = ChainSpec {
chain: Chain::holesky(),
genesis_header: SealedHeader::new(
make_genesis_header(&genesis, &hardforks),
HOLESKY_GENESIS_HASH,
),
genesis,
paris_block_and_final_difficulty: Some((0, U256::from(1))),
hardforks,
deposit_contract: Some(DepositContract::new(
address!("0x4242424242424242424242424242424242424242"),
0,
b256!("0x649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5"),
)),
base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()),
prune_delete_limit: 10000,
blob_params: BlobScheduleBlobParams::default(),
};
spec.genesis.config.dao_fork_support = true;
spec.into()
});
/// The Hoodi spec
///
/// Genesis files from: <https://github.com/eth-clients/hoodi>
pub static HOODI: LazyLock<Arc<ChainSpec>> = LazyLock::new(|| {
let genesis = serde_json::from_str(include_str!("../res/genesis/hoodi.json"))
.expect("Can't deserialize Hoodi genesis json");
let hardforks = EthereumHardfork::hoodi().into();
let mut spec = ChainSpec {
chain: Chain::hoodi(),
genesis_header: SealedHeader::new(
make_genesis_header(&genesis, &hardforks),
HOODI_GENESIS_HASH,
),
genesis,
paris_block_and_final_difficulty: Some((0, U256::from(0))),
hardforks,
deposit_contract: Some(DepositContract::new(
address!("0x00000000219ab540356cBB839Cbe05303d7705Fa"),
0,
b256!("0x649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5"),
)),
base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()),
prune_delete_limit: 10000,
blob_params: BlobScheduleBlobParams::default(),
};
spec.genesis.config.dao_fork_support = true;
spec.into()
});
/// Dev testnet specification
///
/// Includes 20 prefunded accounts with `10_000` ETH each derived from mnemonic "test test test test
/// test test test test test test test junk".
pub static DEV: LazyLock<Arc<ChainSpec>> = LazyLock::new(|| {
let genesis = serde_json::from_str(include_str!("../res/genesis/dev.json"))
.expect("Can't deserialize Dev testnet genesis json");
let hardforks = DEV_HARDFORKS.clone();
ChainSpec {
chain: Chain::dev(),
genesis_header: SealedHeader::seal_slow(make_genesis_header(&genesis, &hardforks)),
genesis,
paris_block_and_final_difficulty: Some((0, U256::from(0))),
hardforks: DEV_HARDFORKS.clone(),
base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()),
deposit_contract: None, // TODO: do we even have?
..Default::default()
}
.into()
});
/// A wrapper around [`BaseFeeParams`] that allows for specifying constant or dynamic EIP-1559
/// parameters based on the active [Hardfork].
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum BaseFeeParamsKind {
/// Constant [`BaseFeeParams`]; used for chains that don't have dynamic EIP-1559 parameters
Constant(BaseFeeParams),
/// Variable [`BaseFeeParams`]; used for chains that have dynamic EIP-1559 parameters like
/// Optimism
Variable(ForkBaseFeeParams),
}
impl Default for BaseFeeParamsKind {
fn default() -> Self {
BaseFeeParams::ethereum().into()
}
}
impl From<BaseFeeParams> for BaseFeeParamsKind {
fn from(params: BaseFeeParams) -> Self {
Self::Constant(params)
}
}
impl From<ForkBaseFeeParams> for BaseFeeParamsKind {
fn from(params: ForkBaseFeeParams) -> Self {
Self::Variable(params)
}
}
/// A type alias to a vector of tuples of [Hardfork] and [`BaseFeeParams`], sorted by [Hardfork]
/// activation order. This is used to specify dynamic EIP-1559 parameters for chains like Optimism.
#[derive(Clone, Debug, PartialEq, Eq, From)]
pub struct ForkBaseFeeParams(Vec<(Box<dyn Hardfork>, BaseFeeParams)>);
impl core::ops::Deref for ChainSpec {
type Target = ChainHardforks;
fn deref(&self) -> &Self::Target {
&self.hardforks
}
}
/// An Ethereum chain specification.
///
/// A chain specification describes:
///
/// - Meta-information about the chain (the chain ID)
/// - The genesis block of the chain ([`Genesis`])
/// - What hardforks are activated, and under which conditions
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ChainSpec {
/// The chain ID
pub chain: Chain,
/// The genesis block.
pub genesis: Genesis,
/// The header corresponding to the genesis block.
pub genesis_header: SealedHeader,
/// The block at which [`EthereumHardfork::Paris`] was activated and the final difficulty at
/// this block.
pub paris_block_and_final_difficulty: Option<(u64, U256)>,
/// The active hard forks and their activation conditions
pub hardforks: ChainHardforks,
/// The deposit contract deployed for `PoS`
pub deposit_contract: Option<DepositContract>,
/// The parameters that configure how a block's base fee is computed
pub base_fee_params: BaseFeeParamsKind,
/// The delete limit for pruner, per run.
pub prune_delete_limit: usize,
/// The settings passed for blob configurations for specific hardforks.
pub blob_params: BlobScheduleBlobParams,
}
impl Default for ChainSpec {
fn default() -> Self {
Self {
chain: Default::default(),
genesis: Default::default(),
genesis_header: Default::default(),
paris_block_and_final_difficulty: Default::default(),
hardforks: Default::default(),
deposit_contract: Default::default(),
base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()),
prune_delete_limit: MAINNET_PRUNE_DELETE_LIMIT,
blob_params: Default::default(),
}
}
}
impl ChainSpec {
/// Converts the given [`Genesis`] into a [`ChainSpec`].
pub fn from_genesis(genesis: Genesis) -> Self {
genesis.into()
}
/// Get information about the chain itself
pub const fn chain(&self) -> Chain {
self.chain
}
/// Returns `true` if this chain contains Ethereum configuration.
#[inline]
pub const fn is_ethereum(&self) -> bool {
self.chain.is_ethereum()
}
/// Returns `true` if this chain is Optimism mainnet.
#[inline]
pub fn is_optimism_mainnet(&self) -> bool {
self.chain == Chain::optimism_mainnet()
}
/// Returns the known paris block, if it exists.
#[inline]
pub fn paris_block(&self) -> Option<u64> {
self.paris_block_and_final_difficulty.map(|(block, _)| block)
}
/// Get the genesis block specification.
///
/// To get the header for the genesis block, use [`Self::genesis_header`] instead.
pub const fn genesis(&self) -> &Genesis {
&self.genesis
}
/// Get the header for the genesis block.
pub fn genesis_header(&self) -> &Header {
&self.genesis_header
}
/// Get the sealed header for the genesis block.
pub fn sealed_genesis_header(&self) -> SealedHeader {
SealedHeader::new(self.genesis_header().clone(), self.genesis_hash())
}
/// Get the initial base fee of the genesis block.
pub fn initial_base_fee(&self) -> Option<u64> {
// If the base fee is set in the genesis block, we use that instead of the default.
let genesis_base_fee =
self.genesis.base_fee_per_gas.map(|fee| fee as u64).unwrap_or(INITIAL_BASE_FEE);
// If London is activated at genesis, we set the initial base fee as per EIP-1559.
self.hardforks.fork(EthereumHardfork::London).active_at_block(0).then_some(genesis_base_fee)
}
/// Get the [`BaseFeeParams`] for the chain at the given timestamp.
pub fn base_fee_params_at_timestamp(&self, timestamp: u64) -> BaseFeeParams {
match self.base_fee_params {
BaseFeeParamsKind::Constant(bf_params) => bf_params,
BaseFeeParamsKind::Variable(ForkBaseFeeParams(ref bf_params)) => {
// Walk through the base fee params configuration in reverse order, and return the
// first one that corresponds to a hardfork that is active at the
// given timestamp.
for (fork, params) in bf_params.iter().rev() {
if self.hardforks.is_fork_active_at_timestamp(fork.clone(), timestamp) {
return *params
}
}
bf_params.first().map(|(_, params)| *params).unwrap_or(BaseFeeParams::ethereum())
}
}
}
/// Get the hash of the genesis block.
pub fn genesis_hash(&self) -> B256 {
self.genesis_header.hash()
}
/// Get the timestamp of the genesis block in seconds
pub(crate) fn genesis_timestamp_seconds(&self) -> u64 {
#[cfg(feature = "timestamp-in-seconds")]
return self.genesis.timestamp;
#[cfg(not(feature = "timestamp-in-seconds"))]
return self.genesis.timestamp / 1000;
}
/// Get the timestamp of the genesis block.
pub const fn genesis_timestamp(&self) -> u64 {
self.genesis.timestamp
}
/// Returns the final total difficulty if the Paris hardfork is known.
pub fn get_final_paris_total_difficulty(&self) -> Option<U256> {
self.paris_block_and_final_difficulty.map(|(_, final_difficulty)| final_difficulty)
}
/// Get the fork filter for the given hardfork
pub fn hardfork_fork_filter<H: Hardfork + Clone>(&self, fork: H) -> Option<ForkFilter> {
match self.hardforks.fork(fork.clone()) {
ForkCondition::Never => None,
_ => Some(self.fork_filter(self.satisfy(self.hardforks.fork(fork)))),
}
}
/// Returns the hardfork display helper.
pub fn display_hardforks(&self) -> DisplayHardforks {
DisplayHardforks::new(self.hardforks.forks_iter())
}
/// Get the fork id for the given hardfork.
#[inline]
pub fn hardfork_fork_id<H: Hardfork + Clone>(&self, fork: H) -> Option<ForkId> {
let condition = self.hardforks.fork(fork);
match condition {
ForkCondition::Never => None,
_ => Some(self.fork_id(&self.satisfy(condition))),
}
}
/// Convenience method to get the fork id for [`EthereumHardfork::Shanghai`] from a given
/// chainspec.
#[inline]
pub fn shanghai_fork_id(&self) -> Option<ForkId> {
self.hardfork_fork_id(EthereumHardfork::Shanghai)
}
/// Convenience method to get the fork id for [`EthereumHardfork::Cancun`] from a given
/// chainspec.
#[inline]
pub fn cancun_fork_id(&self) -> Option<ForkId> {
self.hardfork_fork_id(EthereumHardfork::Cancun)
}
/// Convenience method to get the latest fork id from the chainspec. Panics if chainspec has no
/// hardforks.
#[inline]
pub fn latest_fork_id(&self) -> ForkId {
self.hardfork_fork_id(self.hardforks.last().unwrap().0).unwrap()
}
/// Creates a [`ForkFilter`] for the block described by [Head].
pub fn fork_filter(&self, head: Head) -> ForkFilter {
let forks = self.hardforks.forks_iter().filter_map(|(_, condition)| {
// We filter out TTD-based forks w/o a pre-known block since those do not show up in
// the fork filter.
Some(match condition {
ForkCondition::Block(block) |
ForkCondition::TTD { fork_block: Some(block), .. } => ForkFilterKey::Block(block),
ForkCondition::Timestamp(time) => ForkFilterKey::Time(time),
_ => return None,
})
});
ForkFilter::new(head, self.genesis_hash(), self.genesis_timestamp_seconds(), forks)
}
/// Compute the [`ForkId`] for the given [`Head`] following eip-6122 spec.
///
/// Note: In case there are multiple hardforks activated at the same block or timestamp, only
/// the first gets applied.
pub fn fork_id(&self, head: &Head) -> ForkId {
let mut forkhash = ForkHash::from(self.genesis_hash());
// this tracks the last applied block or timestamp fork. This is necessary for optimism,
// because for the optimism hardforks both the optimism and the corresponding ethereum
// hardfork can be configured in `ChainHardforks` if it enables ethereum equivalent
// functionality (e.g. additional header,body fields) This is set to 0 so that all
// block based hardforks are skipped in the following loop
let mut current_applied = 0;
// handle all block forks before handling timestamp based forks. see: https://eips.ethereum.org/EIPS/eip-6122
for (_, cond) in self.hardforks.forks_iter() {
// handle block based forks and the sepolia merge netsplit block edge case (TTD
// ForkCondition with Some(block))
if let ForkCondition::Block(block) |
ForkCondition::TTD { fork_block: Some(block), .. } = cond
{
if head.number >= block {
// skip duplicated hardforks: hardforks enabled at genesis block
if block != current_applied {
forkhash += block;
current_applied = block;
}
} else {
// we can return here because this block fork is not active, so we set the
// `next` value
return ForkId { hash: forkhash, next: block }
}
}
}
// timestamp are ALWAYS applied after the merge.
//
// this filter ensures that no block-based forks are returned
for timestamp in self.hardforks.forks_iter().filter_map(|(_, cond)| {
// ensure we only get timestamp forks activated __after__ the genesis block
let genesis_timestamp_seconds = self.genesis_timestamp_seconds();
cond.as_timestamp().filter(|time| time > &genesis_timestamp_seconds)
}) {
let timestamp_cmp =
if cfg!(feature = "timestamp-in-seconds") { timestamp } else { timestamp * 1000 };
// MODIFIED:: timestamp is in seconds, not milliseconds
if head.timestamp >= timestamp_cmp {
// skip duplicated hardfork activated at the same timestamp
if timestamp != current_applied {
forkhash += timestamp;
current_applied = timestamp;
}
} else {
// can safely return here because we have already handled all block forks and
// have handled all active timestamp forks, and set the next value to the
// timestamp that is known but not active yet
return ForkId { hash: forkhash, next: timestamp }
}
}
ForkId { hash: forkhash, next: 0 }
}
/// An internal helper function that returns a head block that satisfies a given Fork condition.
pub(crate) fn satisfy(&self, cond: ForkCondition) -> Head {
match cond {
ForkCondition::Block(number) => Head { number, ..Default::default() },
// this timestamp is in seconds
ForkCondition::Timestamp(timestamp) => {
// to satisfy every timestamp ForkCondition, we find the last ForkCondition::Block
// if one exists, and include its block_num in the returned Head
let timestamp = if cfg!(feature = "timestamp-in-seconds") {
timestamp
} else {
timestamp * 1000
};
Head {
timestamp,
number: self.last_block_fork_before_merge_or_timestamp().unwrap_or_default(),
..Default::default()
}
}
ForkCondition::TTD { total_difficulty, fork_block, .. } => Head {
total_difficulty,
number: fork_block.unwrap_or_default(),
..Default::default()
},
ForkCondition::Never => unreachable!(),
}
}
/// This internal helper function retrieves the block number of the last block-based fork
/// that occurs before:
/// - Any existing Total Terminal Difficulty (TTD) or
/// - Timestamp-based forks in the current [`ChainSpec`].
///
/// The function operates by examining the configured hard forks in the chain. It iterates
/// through the fork conditions and identifies the most recent block-based fork that
/// precedes any TTD or timestamp-based conditions.
///
/// If there are no block-based forks found before these conditions, or if the [`ChainSpec`]
/// is not configured with a TTD or timestamp fork, this function will return `None`.
pub(crate) fn last_block_fork_before_merge_or_timestamp(&self) -> Option<u64> {
let mut hardforks_iter = self.hardforks.forks_iter().peekable();
while let Some((_, curr_cond)) = hardforks_iter.next() {
if let Some((_, next_cond)) = hardforks_iter.peek() {
// Match against the `next_cond` to see if it represents:
// - A TTD (merge)
// - A timestamp-based fork
match next_cond {
// If the next fork is TTD and specifies a specific block, return that block
// number
ForkCondition::TTD { fork_block: Some(block), .. } => return Some(*block),
// If the next fork is TTD without a specific block or is timestamp-based,
// return the block number of the current condition if it is block-based.
ForkCondition::TTD { .. } | ForkCondition::Timestamp(_) => {
// Check if `curr_cond` is a block-based fork and return its block number if
// true.
if let ForkCondition::Block(block_num) = curr_cond {
return Some(block_num);
}
}
ForkCondition::Block(_) | ForkCondition::Never => {}
}
}
}
None
}
/// Build a chainspec using [`ChainSpecBuilder`]
pub fn builder() -> ChainSpecBuilder {
ChainSpecBuilder::default()
}
/// Returns the known bootnode records for the given chain.
pub fn bootnodes(&self) -> Option<Vec<NodeRecord>> {
use NamedChain as C;
match self.chain.try_into().ok()? {
C::Mainnet => Some(mainnet_nodes()),
C::Sepolia => Some(sepolia_nodes()),
C::Holesky => Some(holesky_nodes()),
C::Hoodi => Some(hoodi_nodes()),
// opstack uses the same bootnodes for all chains: <https://github.com/paradigmxyz/reth/issues/14603>
C::Base | C::Optimism | C::Unichain | C::World => Some(op_nodes()),
C::OptimismSepolia | C::BaseSepolia | C::UnichainSepolia | C::WorldSepolia => {
Some(op_testnet_nodes())
}
// fallback for optimism chains
chain if chain.is_optimism() && chain.is_testnet() => Some(op_testnet_nodes()),
chain if chain.is_optimism() => Some(op_nodes()),
_ => None,
}
}
}
impl From<Genesis> for ChainSpec {
fn from(genesis: Genesis) -> Self {
// Block-based hardforks
let hardfork_opts = [
(EthereumHardfork::Frontier.boxed(), Some(0)),
(EthereumHardfork::Homestead.boxed(), genesis.config.homestead_block),
(EthereumHardfork::Dao.boxed(), genesis.config.dao_fork_block),
(EthereumHardfork::Tangerine.boxed(), genesis.config.eip150_block),
(EthereumHardfork::SpuriousDragon.boxed(), genesis.config.eip155_block),
(EthereumHardfork::Byzantium.boxed(), genesis.config.byzantium_block),
(EthereumHardfork::Constantinople.boxed(), genesis.config.constantinople_block),
(EthereumHardfork::Petersburg.boxed(), genesis.config.petersburg_block),
(EthereumHardfork::Istanbul.boxed(), genesis.config.istanbul_block),
(EthereumHardfork::MuirGlacier.boxed(), genesis.config.muir_glacier_block),
(EthereumHardfork::Berlin.boxed(), genesis.config.berlin_block),
(EthereumHardfork::London.boxed(), genesis.config.london_block),
(EthereumHardfork::ArrowGlacier.boxed(), genesis.config.arrow_glacier_block),
(EthereumHardfork::GrayGlacier.boxed(), genesis.config.gray_glacier_block),
];
let mut hardforks = hardfork_opts
.into_iter()
.filter_map(|(hardfork, opt)| opt.map(|block| (hardfork, ForkCondition::Block(block))))
.collect::<Vec<_>>();
// We expect no new networks to be configured with the merge, so we ignore the TTD field
// and merge netsplit block from external genesis files. All existing networks that have
// merged should have a static ChainSpec already (namely mainnet and sepolia).
let paris_block_and_final_difficulty =
if let Some(ttd) = genesis.config.terminal_total_difficulty {
hardforks.push((
EthereumHardfork::Paris.boxed(),
ForkCondition::TTD {
// NOTE: this will not work properly if the merge is not activated at
// genesis, and there is no merge netsplit block
activation_block_number: genesis
.config
.merge_netsplit_block
.unwrap_or_default(),
total_difficulty: ttd,
fork_block: genesis.config.merge_netsplit_block,
},
));
genesis.config.merge_netsplit_block.map(|block| (block, ttd))
} else {
None
};
// Time-based hardforks
let time_hardfork_opts = [
(EthereumHardfork::Shanghai.boxed(), genesis.config.shanghai_time),
(EthereumHardfork::Cancun.boxed(), genesis.config.cancun_time),
(EthereumHardfork::Prague.boxed(), genesis.config.prague_time),
(EthereumHardfork::Osaka.boxed(), genesis.config.osaka_time),
(EthereumHardfork::Bpo1.boxed(), genesis.config.bpo1_time),
(EthereumHardfork::Bpo2.boxed(), genesis.config.bpo2_time),
(EthereumHardfork::Bpo3.boxed(), genesis.config.bpo3_time),
(EthereumHardfork::Bpo4.boxed(), genesis.config.bpo4_time),
(EthereumHardfork::Bpo5.boxed(), genesis.config.bpo5_time),
];
let mut time_hardforks = time_hardfork_opts
.into_iter()
.filter_map(|(hardfork, opt)| {
opt.map(|time| (hardfork, ForkCondition::Timestamp(time)))
})
.collect::<Vec<_>>();
hardforks.append(&mut time_hardforks);
// Ordered Hardforks
let mainnet_hardforks: ChainHardforks = EthereumHardfork::mainnet().into();
let mainnet_order = mainnet_hardforks.forks_iter();
let mut ordered_hardforks = Vec::with_capacity(hardforks.len());
for (hardfork, _) in mainnet_order {
if let Some(pos) = hardforks.iter().position(|(e, _)| **e == *hardfork) {
ordered_hardforks.push(hardforks.remove(pos));
}
}
// append the remaining unknown hardforks to ensure we don't filter any out
ordered_hardforks.append(&mut hardforks);
// Extract blob parameters directly from blob_schedule
let blob_params = genesis.config.blob_schedule_blob_params();
// NOTE: in full node, we prune all receipts except the deposit contract's. We do not
// have the deployment block in the genesis file, so we use block zero. We use the same
// deposit topic as the mainnet contract if we have the deposit contract address in the
// genesis json.
let deposit_contract = genesis.config.deposit_contract_address.map(|address| {
DepositContract { address, block: 0, topic: MAINNET_DEPOSIT_CONTRACT.topic }
});
let hardforks = ChainHardforks::new(ordered_hardforks);
Self {
chain: genesis.config.chain_id.into(),
genesis_header: SealedHeader::new_unhashed(make_genesis_header(&genesis, &hardforks)),
genesis,
hardforks,
paris_block_and_final_difficulty,
deposit_contract,
blob_params,
..Default::default()
}
}
}
impl Hardforks for ChainSpec {
fn fork<H: Hardfork>(&self, fork: H) -> ForkCondition {
self.hardforks.fork(fork)
}
fn forks_iter(&self) -> impl Iterator<Item = (&dyn Hardfork, ForkCondition)> {
self.hardforks.forks_iter()
}
fn fork_id(&self, head: &Head) -> ForkId {
self.fork_id(head)
}
fn latest_fork_id(&self) -> ForkId {
self.latest_fork_id()
}
fn fork_filter(&self, head: Head) -> ForkFilter {
self.fork_filter(head)
}
}
impl EthereumHardforks for ChainSpec {
fn ethereum_fork_activation(&self, fork: EthereumHardfork) -> ForkCondition {
self.fork(fork)
}
}
impl SeismicHardforks for ChainSpec {
fn seismic_fork_activation(&self, fork: SeismicHardfork) -> ForkCondition {
self.fork(fork)
}
}
/// A trait for reading the current chainspec.
#[auto_impl::auto_impl(&, Arc)]
pub trait ChainSpecProvider: Debug + Send + Sync {
/// The chain spec type.
type ChainSpec: EthChainSpec + 'static;
/// Get an [`Arc`] to the chainspec.
fn chain_spec(&self) -> Arc<Self::ChainSpec>;
}
/// A helper to build custom chain specs
#[derive(Debug, Default, Clone)]
pub struct ChainSpecBuilder {
chain: Option<Chain>,
genesis: Option<Genesis>,
hardforks: ChainHardforks,
}
impl ChainSpecBuilder {
/// Construct a new builder from the mainnet chain spec.
pub fn mainnet() -> Self {
Self {
chain: Some(MAINNET.chain),
genesis: Some(MAINNET.genesis.clone()),
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/era-downloader/src/stream.rs | crates/era-downloader/src/stream.rs | use crate::{client::HttpClient, EraClient, BLOCKS_PER_FILE};
use alloy_primitives::BlockNumber;
use futures_util::{stream::FuturesOrdered, FutureExt, Stream, StreamExt};
use reqwest::Url;
use reth_fs_util as fs;
use std::{
collections::VecDeque,
fmt::{Debug, Formatter},
future::Future,
path::Path,
pin::Pin,
task::{Context, Poll},
};
/// Parameters that alter the behavior of [`EraStream`].
///
/// # Examples
/// ```
/// use reth_era_downloader::EraStreamConfig;
///
/// EraStreamConfig::default().with_max_files(10).with_max_concurrent_downloads(2);
/// ```
#[derive(Debug, Clone)]
pub struct EraStreamConfig {
max_files: usize,
max_concurrent_downloads: usize,
start_from: Option<usize>,
}
impl Default for EraStreamConfig {
fn default() -> Self {
Self { max_files: 5, max_concurrent_downloads: 3, start_from: None }
}
}
impl EraStreamConfig {
/// The maximum amount of downloaded ERA1 files kept in the download directory.
pub const fn with_max_files(mut self, max_files: usize) -> Self {
self.max_files = max_files;
self
}
/// The maximum amount of downloads happening at the same time.
pub const fn with_max_concurrent_downloads(mut self, max_concurrent_downloads: usize) -> Self {
self.max_concurrent_downloads = max_concurrent_downloads;
self
}
/// Overrides the starting ERA file index to be the first one that contains `block_number`.
pub const fn start_from(mut self, block_number: BlockNumber) -> Self {
self.start_from.replace(block_number as usize / BLOCKS_PER_FILE);
self
}
}
/// An asynchronous stream of ERA1 files.
///
/// # Examples
/// ```
/// use futures_util::StreamExt;
/// use reth_era_downloader::{EraMeta, EraStream, HttpClient};
///
/// # async fn import(mut stream: EraStream<impl HttpClient + Clone + Send + Sync + 'static + Unpin>) -> eyre::Result<()> {
/// while let Some(meta) = stream.next().await {
/// let meta = meta?;
/// // Process file at `meta.path(): &Path`
/// meta.mark_as_processed()?;
/// }
/// # Ok(())
/// # }
/// ```
#[derive(Debug)]
pub struct EraStream<Http> {
download_stream: DownloadStream,
starting_stream: StartingStream<Http>,
}
impl<Http> EraStream<Http> {
/// Constructs a new [`EraStream`] that downloads concurrently up to `max_concurrent_downloads`
/// ERA1 files to `client` `folder`, keeping their count up to `max_files`.
pub fn new(client: EraClient<Http>, config: EraStreamConfig) -> Self {
Self {
download_stream: DownloadStream {
downloads: Default::default(),
scheduled: Default::default(),
max_concurrent_downloads: config.max_concurrent_downloads,
ended: false,
},
starting_stream: StartingStream {
client,
files_count: Box::pin(async move { usize::MAX }),
next_url: Box::pin(async move { Ok(None) }),
delete_outside_range: Box::pin(async move { Ok(()) }),
recover_index: Box::pin(async move { None }),
fetch_file_list: Box::pin(async move { Ok(()) }),
state: Default::default(),
max_files: config.max_files,
index: config.start_from.unwrap_or_default(),
last: None,
downloading: 0,
},
}
}
}
/// Contains information about an ERA file.
pub trait EraMeta: Debug {
/// Marking this particular ERA file as "processed" lets the caller hint that it is no longer
/// going to be using it.
///
/// The meaning of that is up to the implementation. The caller should assume that after this
/// point is no longer possible to safely read it.
fn mark_as_processed(&self) -> eyre::Result<()>;
/// A path to the era file.
///
/// File should be openable and treated as read-only.
fn path(&self) -> &Path;
}
impl<T: EraMeta> EraMeta for Box<T> {
fn mark_as_processed(&self) -> eyre::Result<()> {
T::mark_as_processed(self)
}
fn path(&self) -> &Path {
T::path(self)
}
}
/// Contains information about ERA file that is hosted remotely and represented by a temporary
/// local file.
#[derive(Debug)]
pub struct EraRemoteMeta {
path: Box<Path>,
}
impl EraRemoteMeta {
const fn new(path: Box<Path>) -> Self {
Self { path }
}
}
impl AsRef<Path> for EraRemoteMeta {
fn as_ref(&self) -> &Path {
self.path.as_ref()
}
}
impl EraMeta for EraRemoteMeta {
/// Removes a temporary local file representation of the remotely hosted original.
fn mark_as_processed(&self) -> eyre::Result<()> {
Ok(fs::remove_file(&self.path)?)
}
fn path(&self) -> &Path {
&self.path
}
}
impl<Http: HttpClient + Clone + Send + Sync + 'static + Unpin> Stream for EraStream<Http> {
type Item = eyre::Result<EraRemoteMeta>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
if let Poll::Ready(fut) = self.starting_stream.poll_next_unpin(cx) {
if let Some(fut) = fut {
self.download_stream.scheduled.push_back(fut);
} else {
self.download_stream.ended = true;
}
}
let poll = self.download_stream.poll_next_unpin(cx);
if poll.is_ready() {
self.starting_stream.downloaded();
}
poll
}
}
type DownloadFuture =
Pin<Box<dyn Future<Output = eyre::Result<EraRemoteMeta>> + Send + Sync + 'static>>;
struct DownloadStream {
downloads: FuturesOrdered<DownloadFuture>,
scheduled: VecDeque<DownloadFuture>,
max_concurrent_downloads: usize,
ended: bool,
}
impl Debug for DownloadStream {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "DownloadStream({})", self.downloads.len())
}
}
impl Stream for DownloadStream {
type Item = eyre::Result<EraRemoteMeta>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
for _ in 0..self.max_concurrent_downloads - self.downloads.len() {
if let Some(fut) = self.scheduled.pop_front() {
self.downloads.push_back(fut);
}
}
let ended = self.ended;
let poll = self.downloads.poll_next_unpin(cx);
if matches!(poll, Poll::Ready(None)) && !ended {
cx.waker().wake_by_ref();
return Poll::Pending;
}
poll
}
}
struct StartingStream<Http> {
client: EraClient<Http>,
files_count: Pin<Box<dyn Future<Output = usize> + Send + Sync + 'static>>,
next_url: Pin<Box<dyn Future<Output = eyre::Result<Option<Url>>> + Send + Sync + 'static>>,
delete_outside_range: Pin<Box<dyn Future<Output = eyre::Result<()>> + Send + Sync + 'static>>,
recover_index: Pin<Box<dyn Future<Output = Option<usize>> + Send + Sync + 'static>>,
fetch_file_list: Pin<Box<dyn Future<Output = eyre::Result<()>> + Send + Sync + 'static>>,
state: State,
max_files: usize,
index: usize,
last: Option<usize>,
downloading: usize,
}
impl<Http> Debug for StartingStream<Http> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"StartingStream{{ max_files: {}, index: {}, downloading: {} }}",
self.max_files, self.index, self.downloading
)
}
}
#[derive(Debug, PartialEq, Default)]
enum State {
#[default]
Initial,
FetchFileList,
DeleteOutsideRange,
RecoverIndex,
CountFiles,
Missing(usize),
NextUrl(usize),
}
impl<Http: HttpClient + Clone + Send + Sync + 'static + Unpin> Stream for StartingStream<Http> {
type Item = DownloadFuture;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
if self.state == State::Initial {
self.fetch_file_list();
}
if self.state == State::FetchFileList {
if let Poll::Ready(result) = self.fetch_file_list.poll_unpin(cx) {
match result {
Ok(_) => self.delete_outside_range(),
Err(e) => {
self.fetch_file_list();
return Poll::Ready(Some(Box::pin(async move { Err(e) })));
}
}
}
}
if self.state == State::DeleteOutsideRange {
if let Poll::Ready(result) = self.delete_outside_range.poll_unpin(cx) {
match result {
Ok(_) => self.recover_index(),
Err(e) => {
self.delete_outside_range();
return Poll::Ready(Some(Box::pin(async move { Err(e) })));
}
}
}
}
if self.state == State::RecoverIndex {
if let Poll::Ready(last) = self.recover_index.poll_unpin(cx) {
self.last = last;
self.count_files();
}
}
if self.state == State::CountFiles {
if let Poll::Ready(downloaded) = self.files_count.poll_unpin(cx) {
let max_missing = self
.max_files
.saturating_sub(downloaded + self.downloading)
.max(self.last.unwrap_or_default().saturating_sub(self.index));
self.state = State::Missing(max_missing);
}
}
if let State::Missing(max_missing) = self.state {
if max_missing > 0 {
let index = self.index;
self.index += 1;
self.downloading += 1;
self.next_url(index, max_missing);
} else {
self.count_files();
}
}
if let State::NextUrl(max_missing) = self.state {
if let Poll::Ready(url) = self.next_url.poll_unpin(cx) {
self.state = State::Missing(max_missing - 1);
return Poll::Ready(url.transpose().map(|url| -> DownloadFuture {
let mut client = self.client.clone();
Box::pin(
async move { client.download_to_file(url?).await.map(EraRemoteMeta::new) },
)
}));
}
}
Poll::Pending
}
}
impl<Http> StartingStream<Http> {
const fn downloaded(&mut self) {
self.downloading = self.downloading.saturating_sub(1);
}
}
impl<Http: HttpClient + Clone + Send + Sync + 'static> StartingStream<Http> {
fn fetch_file_list(&mut self) {
let client = self.client.clone();
Pin::new(&mut self.fetch_file_list)
.set(Box::pin(async move { client.fetch_file_list().await }));
self.state = State::FetchFileList;
}
fn delete_outside_range(&mut self) {
let index = self.index;
let max_files = self.max_files;
let client = self.client.clone();
Pin::new(&mut self.delete_outside_range)
.set(Box::pin(async move { client.delete_outside_range(index, max_files).await }));
self.state = State::DeleteOutsideRange;
}
fn recover_index(&mut self) {
let client = self.client.clone();
Pin::new(&mut self.recover_index)
.set(Box::pin(async move { client.recover_index().await }));
self.state = State::RecoverIndex;
}
fn count_files(&mut self) {
let client = self.client.clone();
Pin::new(&mut self.files_count).set(Box::pin(async move { client.files_count().await }));
self.state = State::CountFiles;
}
fn next_url(&mut self, index: usize, max_missing: usize) {
let client = self.client.clone();
Pin::new(&mut self.next_url).set(Box::pin(async move { client.url(index).await }));
self.state = State::NextUrl(max_missing);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/era-downloader/src/lib.rs | crates/era-downloader/src/lib.rs | //! An asynchronous stream interface for downloading ERA1 files.
//!
//! # Examples
//! ```
//! use futures_util::StreamExt;
//! use reqwest::{Client, Url};
//! use reth_era_downloader::{EraClient, EraStream, EraStreamConfig};
//! use std::{path::PathBuf, str::FromStr};
//!
//! # async fn f() -> Result<(), Box<dyn std::error::Error + 'static>> {
//! // URL where the ERA1 files are hosted
//! let url = Url::from_str("file:///")?;
//!
//! // Directory where the ERA1 files will be downloaded to
//! let folder = PathBuf::new();
//!
//! let client = EraClient::new(Client::new(), url, folder);
//!
//! let config = EraStreamConfig::default()
//! // Keep up to 2 ERA1 files in the `folder`.
//! // More downloads won't start until some of the files are removed.
//! .with_max_files(2)
//! // Do not download more than 2 files at the same time.
//! .with_max_concurrent_downloads(2);
//!
//! let mut stream = EraStream::new(client, config);
//!
//! # return Ok(());
//! while let Some(file) = stream.next().await {
//! let file = file?;
//! // Process `file: Box<Path>`
//! }
//! # Ok(())
//! # }
//! ```
mod client;
mod fs;
mod stream;
pub use client::{EraClient, HttpClient};
pub use fs::read_dir;
pub use stream::{EraMeta, EraStream, EraStreamConfig};
pub(crate) const BLOCKS_PER_FILE: usize = 8192;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/era-downloader/src/fs.rs | crates/era-downloader/src/fs.rs | use crate::{EraMeta, BLOCKS_PER_FILE};
use alloy_primitives::{hex, hex::ToHexExt, BlockNumber};
use eyre::{eyre, OptionExt};
use futures_util::{stream, Stream};
use reth_fs_util as fs;
use sha2::{Digest, Sha256};
use std::{fmt::Debug, io, io::BufRead, path::Path, str::FromStr};
/// Creates a new ordered asynchronous [`Stream`] of ERA1 files read from `dir`.
pub fn read_dir(
dir: impl AsRef<Path> + Send + Sync + 'static,
start_from: BlockNumber,
) -> eyre::Result<impl Stream<Item = eyre::Result<EraLocalMeta>> + Send + Sync + 'static + Unpin> {
let mut checksums = None;
let mut entries = fs::read_dir(dir)?
.filter_map(|entry| {
(|| {
let path = entry?.path();
if path.extension() == Some("era1".as_ref()) {
if let Some(last) = path.components().next_back() {
let str = last.as_os_str().to_string_lossy().to_string();
let parts = str.split('-').collect::<Vec<_>>();
if parts.len() == 3 {
let number = usize::from_str(parts[1])?;
return Ok(Some((number, path.into_boxed_path())));
}
}
}
if path.file_name() == Some("checksums.txt".as_ref()) {
let file = fs::open(path)?;
let reader = io::BufReader::new(file);
let lines = reader.lines();
checksums = Some(lines);
}
Ok(None)
})()
.transpose()
})
.collect::<eyre::Result<Vec<_>>>()?;
let mut checksums = checksums.ok_or_eyre("Missing file `checksums.txt` in the `dir`")?;
entries.sort_by(|(left, _), (right, _)| left.cmp(right));
Ok(stream::iter(entries.into_iter().skip(start_from as usize / BLOCKS_PER_FILE).map(
move |(_, path)| {
let expected_checksum =
checksums.next().transpose()?.ok_or_eyre("Got less checksums than ERA files")?;
let expected_checksum = hex::decode(expected_checksum)?;
let mut hasher = Sha256::new();
let mut reader = io::BufReader::new(fs::open(&path)?);
io::copy(&mut reader, &mut hasher)?;
let actual_checksum = hasher.finalize().to_vec();
if actual_checksum != expected_checksum {
return Err(eyre!(
"Checksum mismatch, got: {}, expected: {}",
actual_checksum.encode_hex(),
expected_checksum.encode_hex()
));
}
Ok(EraLocalMeta::new(path))
},
)))
}
/// Contains information about an ERA file that is on the local file-system and is read-only.
#[derive(Debug)]
pub struct EraLocalMeta {
path: Box<Path>,
}
impl EraLocalMeta {
const fn new(path: Box<Path>) -> Self {
Self { path }
}
}
impl<T: AsRef<Path>> PartialEq<T> for EraLocalMeta {
fn eq(&self, other: &T) -> bool {
self.as_ref().eq(other.as_ref())
}
}
impl AsRef<Path> for EraLocalMeta {
fn as_ref(&self) -> &Path {
self.path.as_ref()
}
}
impl EraMeta for EraLocalMeta {
/// A no-op.
fn mark_as_processed(&self) -> eyre::Result<()> {
Ok(())
}
fn path(&self) -> &Path {
&self.path
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/era-downloader/src/client.rs | crates/era-downloader/src/client.rs | use alloy_primitives::{hex, hex::ToHexExt};
use bytes::Bytes;
use eyre::{eyre, OptionExt};
use futures_util::{stream::StreamExt, Stream, TryStreamExt};
use reqwest::{Client, IntoUrl, Url};
use sha2::{Digest, Sha256};
use std::{future::Future, path::Path, str::FromStr};
use tokio::{
fs::{self, File},
io::{self, AsyncBufReadExt, AsyncRead, AsyncReadExt, AsyncWriteExt},
join, try_join,
};
/// Accesses the network over HTTP.
pub trait HttpClient {
/// Makes an HTTP GET request to `url`. Returns a stream of response body bytes.
fn get<U: IntoUrl + Send + Sync>(
&self,
url: U,
) -> impl Future<
Output = eyre::Result<impl Stream<Item = eyre::Result<Bytes>> + Send + Sync + Unpin>,
> + Send
+ Sync;
}
impl HttpClient for Client {
async fn get<U: IntoUrl + Send + Sync>(
&self,
url: U,
) -> eyre::Result<impl Stream<Item = eyre::Result<Bytes>> + Unpin> {
let response = Self::get(self, url).send().await?;
Ok(response.bytes_stream().map_err(|e| eyre::Error::new(e)))
}
}
/// An HTTP client with features for downloading ERA files from an external HTTP accessible
/// endpoint.
#[derive(Debug, Clone)]
pub struct EraClient<Http> {
client: Http,
url: Url,
folder: Box<Path>,
}
impl<Http: HttpClient + Clone> EraClient<Http> {
const CHECKSUMS: &'static str = "checksums.txt";
/// Constructs [`EraClient`] using `client` to download from `url` into `folder`.
pub fn new(client: Http, url: Url, folder: impl Into<Box<Path>>) -> Self {
Self { client, url, folder: folder.into() }
}
/// Performs a GET request on `url` and stores the response body into a file located within
/// the `folder`.
pub async fn download_to_file(&mut self, url: impl IntoUrl) -> eyre::Result<Box<Path>> {
let path = self.folder.to_path_buf();
let url = url.into_url()?;
let client = self.client.clone();
let file_name = url
.path_segments()
.ok_or_eyre("cannot-be-a-base")?
.next_back()
.ok_or_eyre("empty path segments")?;
let path = path.join(file_name);
if !self.is_downloaded(file_name, &path).await? {
let number = self
.file_name_to_number(file_name)
.ok_or_eyre("Cannot parse number from file name")?;
let mut tries = 1..3;
let mut actual_checksum: eyre::Result<_>;
loop {
actual_checksum = async {
let mut file = File::create(&path).await?;
let mut stream = client.get(url.clone()).await?;
let mut hasher = Sha256::new();
while let Some(item) = stream.next().await.transpose()? {
io::copy(&mut item.as_ref(), &mut file).await?;
hasher.update(item);
}
Ok(hasher.finalize().to_vec())
}
.await;
if actual_checksum.is_ok() || tries.next().is_none() {
break;
}
}
self.assert_checksum(number, actual_checksum?)
.await
.map_err(|e| eyre!("{e} for {file_name} at {}", path.display()))?;
}
Ok(path.into_boxed_path())
}
/// Recovers index of file following the latest downloaded file from a different run.
pub async fn recover_index(&self) -> Option<usize> {
let mut max = None;
if let Ok(mut dir) = fs::read_dir(&self.folder).await {
while let Ok(Some(entry)) = dir.next_entry().await {
if let Some(name) = entry.file_name().to_str() {
if let Some(number) = self.file_name_to_number(name) {
if max.is_none() || matches!(max, Some(max) if number > max) {
max.replace(number + 1);
}
}
}
}
}
max
}
/// Deletes files that are outside-of the working range.
pub async fn delete_outside_range(&self, index: usize, max_files: usize) -> eyre::Result<()> {
let last = index + max_files;
if let Ok(mut dir) = fs::read_dir(&self.folder).await {
while let Ok(Some(entry)) = dir.next_entry().await {
if let Some(name) = entry.file_name().to_str() {
if let Some(number) = self.file_name_to_number(name) {
if number < index || number >= last {
eprintln!("Deleting file {}", entry.path().display());
eprintln!("{number} < {index} || {number} >= {last}");
reth_fs_util::remove_file(entry.path())?;
}
}
}
}
}
Ok(())
}
/// Returns a download URL for the file corresponding to `number`.
pub async fn url(&self, number: usize) -> eyre::Result<Option<Url>> {
Ok(self.number_to_file_name(number).await?.map(|name| self.url.join(&name)).transpose()?)
}
/// Returns the number of files in the `folder`.
pub async fn files_count(&self) -> usize {
let mut count = 0usize;
if let Ok(mut dir) = fs::read_dir(&self.folder).await {
while let Ok(Some(entry)) = dir.next_entry().await {
if entry.path().extension() == Some("era1".as_ref()) {
count += 1;
}
}
}
count
}
/// Fetches the list of ERA1 files from `url` and stores it in a file located within `folder`.
pub async fn fetch_file_list(&self) -> eyre::Result<()> {
let (mut index, mut checksums) = try_join!(
self.client.get(self.url.clone()),
self.client.get(self.url.clone().join(Self::CHECKSUMS)?),
)?;
let index_path = self.folder.to_path_buf().join("index.html");
let checksums_path = self.folder.to_path_buf().join(Self::CHECKSUMS);
let (mut index_file, mut checksums_file) =
try_join!(File::create(&index_path), File::create(&checksums_path))?;
loop {
let (index, checksums) = join!(index.next(), checksums.next());
let (index, checksums) = (index.transpose()?, checksums.transpose()?);
if index.is_none() && checksums.is_none() {
break;
}
let index_file = &mut index_file;
let checksums_file = &mut checksums_file;
try_join!(
async move {
if let Some(index) = index {
io::copy(&mut index.as_ref(), index_file).await?;
}
Ok::<(), eyre::Error>(())
},
async move {
if let Some(checksums) = checksums {
io::copy(&mut checksums.as_ref(), checksums_file).await?;
}
Ok::<(), eyre::Error>(())
},
)?;
}
let file = File::open(&index_path).await?;
let reader = io::BufReader::new(file);
let mut lines = reader.lines();
let path = self.folder.to_path_buf().join("index");
let file = File::create(&path).await?;
let mut writer = io::BufWriter::new(file);
while let Some(line) = lines.next_line().await? {
if let Some(j) = line.find(".era1") {
if let Some(i) = line[..j].rfind(|c: char| !c.is_alphanumeric() && c != '-') {
let era = &line[i + 1..j + 5];
writer.write_all(era.as_bytes()).await?;
writer.write_all(b"\n").await?;
}
}
}
writer.flush().await?;
Ok(())
}
/// Returns ERA1 file name that is ordered at `number`.
pub async fn number_to_file_name(&self, number: usize) -> eyre::Result<Option<String>> {
let path = self.folder.to_path_buf().join("index");
let file = File::open(&path).await?;
let reader = io::BufReader::new(file);
let mut lines = reader.lines();
for _ in 0..number {
lines.next_line().await?;
}
Ok(lines.next_line().await?)
}
async fn is_downloaded(&self, name: &str, path: impl AsRef<Path>) -> eyre::Result<bool> {
let path = path.as_ref();
match File::open(path).await {
Ok(file) => {
let number = self
.file_name_to_number(name)
.ok_or_else(|| eyre!("Cannot parse ERA number from {name}"))?;
let actual_checksum = checksum(file).await?;
let is_verified = self.verify_checksum(number, actual_checksum).await?;
if !is_verified {
fs::remove_file(path).await?;
}
Ok(is_verified)
}
Err(e) if e.kind() == io::ErrorKind::NotFound => Ok(false),
Err(e) => Err(e)?,
}
}
/// Returns `true` if `actual_checksum` matches expected checksum of the ERA1 file indexed by
/// `number` based on the [file list].
///
/// [file list]: Self::fetch_file_list
async fn verify_checksum(&self, number: usize, actual_checksum: Vec<u8>) -> eyre::Result<bool> {
Ok(actual_checksum == self.expected_checksum(number).await?)
}
/// Returns `Ok` if `actual_checksum` matches expected checksum of the ERA1 file indexed by
/// `number` based on the [file list].
///
/// [file list]: Self::fetch_file_list
async fn assert_checksum(&self, number: usize, actual_checksum: Vec<u8>) -> eyre::Result<()> {
let expected_checksum = self.expected_checksum(number).await?;
if actual_checksum == expected_checksum {
Ok(())
} else {
Err(eyre!(
"Checksum mismatch, got: {}, expected: {}",
actual_checksum.encode_hex(),
expected_checksum.encode_hex()
))
}
}
/// Returns SHA-256 checksum for ERA1 file indexed by `number` based on the [file list].
///
/// [file list]: Self::fetch_file_list
async fn expected_checksum(&self, number: usize) -> eyre::Result<Vec<u8>> {
let file = File::open(self.folder.join(Self::CHECKSUMS)).await?;
let reader = io::BufReader::new(file);
let mut lines = reader.lines();
for _ in 0..number {
lines.next_line().await?;
}
let expected_checksum =
lines.next_line().await?.ok_or_else(|| eyre!("Missing hash for number {number}"))?;
let expected_checksum = hex::decode(expected_checksum)?;
Ok(expected_checksum)
}
fn file_name_to_number(&self, file_name: &str) -> Option<usize> {
file_name.split('-').nth(1).and_then(|v| usize::from_str(v).ok())
}
}
async fn checksum(mut reader: impl AsyncRead + Unpin) -> eyre::Result<Vec<u8>> {
let mut hasher = Sha256::new();
// Create a buffer to read data into, sized for performance.
let mut data = vec![0; 64 * 1024];
loop {
// Read data from the reader into the buffer.
let len = reader.read(&mut data).await?;
if len == 0 {
break;
} // Exit loop if no more data.
// Update the hash with the data read.
hasher.update(&data[..len]);
}
// Finalize the hash after all data has been processed.
let hash = hasher.finalize().to_vec();
Ok(hash)
}
#[cfg(test)]
mod tests {
use super::*;
use std::path::PathBuf;
use test_case::test_case;
impl EraClient<Client> {
fn empty() -> Self {
Self::new(Client::new(), Url::from_str("file:///").unwrap(), PathBuf::new())
}
}
#[test_case("mainnet-00600-a81ae85f.era1", Some(600))]
#[test_case("mainnet-00000-a81ae85f.era1", Some(0))]
#[test_case("00000-a81ae85f.era1", None)]
#[test_case("", None)]
fn test_file_name_to_number(file_name: &str, expected_number: Option<usize>) {
let client = EraClient::empty();
let actual_number = client.file_name_to_number(file_name);
assert_eq!(actual_number, expected_number);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/era-downloader/tests/it/stream.rs | crates/era-downloader/tests/it/stream.rs | //! Tests downloading files and streaming their filenames
use crate::StubClient;
use futures_util::StreamExt;
use reqwest::Url;
use reth_era_downloader::{EraClient, EraStream, EraStreamConfig};
use std::str::FromStr;
use tempfile::tempdir;
use test_case::test_case;
#[test_case("https://mainnet.era1.nimbus.team/"; "nimbus")]
#[test_case("https://era1.ethportal.net/"; "ethportal")]
#[test_case("https://era.ithaca.xyz/era1/index.html"; "ithaca")]
#[tokio::test]
async fn test_streaming_files_after_fetching_file_list(url: &str) {
let base_url = Url::from_str(url).unwrap();
let folder = tempdir().unwrap();
let folder = folder.path();
let client = EraClient::new(StubClient, base_url, folder);
let mut stream = EraStream::new(
client,
EraStreamConfig::default().with_max_files(2).with_max_concurrent_downloads(1),
);
let expected_file = folder.join("mainnet-00000-5ec1ffb8.era1").into_boxed_path();
let actual_file = stream.next().await.unwrap().unwrap();
assert_eq!(actual_file.as_ref(), expected_file.as_ref());
let expected_file = folder.join("mainnet-00001-a5364e9a.era1").into_boxed_path();
let actual_file = stream.next().await.unwrap().unwrap();
assert_eq!(actual_file.as_ref(), expected_file.as_ref());
}
#[tokio::test]
async fn test_streaming_files_after_fetching_file_list_into_missing_folder_fails() {
let base_url = Url::from_str("https://era.ithaca.xyz/era1/index.html").unwrap();
let folder = tempdir().unwrap().path().to_owned();
let client = EraClient::new(StubClient, base_url, folder);
let mut stream = EraStream::new(
client,
EraStreamConfig::default().with_max_files(2).with_max_concurrent_downloads(1),
);
let actual_error = stream.next().await.unwrap().unwrap_err().to_string();
let expected_error = "No such file or directory (os error 2)".to_owned();
assert_eq!(actual_error, expected_error);
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/era-downloader/tests/it/download.rs | crates/era-downloader/tests/it/download.rs | //! Tests fetching a file
use crate::StubClient;
use reqwest::Url;
use reth_era_downloader::EraClient;
use std::str::FromStr;
use tempfile::tempdir;
use test_case::test_case;
#[test_case("https://mainnet.era1.nimbus.team/"; "nimbus")]
#[test_case("https://era1.ethportal.net/"; "ethportal")]
#[test_case("https://era.ithaca.xyz/era1/index.html"; "ithaca")]
#[tokio::test]
async fn test_getting_file_url_after_fetching_file_list(url: &str) {
let base_url = Url::from_str(url).unwrap();
let folder = tempdir().unwrap();
let folder = folder.path();
let client = EraClient::new(StubClient, base_url.clone(), folder);
client.fetch_file_list().await.unwrap();
let expected_url = Some(base_url.join("mainnet-00000-5ec1ffb8.era1").unwrap());
let actual_url = client.url(0).await.unwrap();
assert_eq!(actual_url, expected_url);
}
#[test_case("https://mainnet.era1.nimbus.team/"; "nimbus")]
#[test_case("https://era1.ethportal.net/"; "ethportal")]
#[test_case("https://era.ithaca.xyz/era1/index.html"; "ithaca")]
#[tokio::test]
async fn test_getting_file_after_fetching_file_list(url: &str) {
let base_url = Url::from_str(url).unwrap();
let folder = tempdir().unwrap();
let folder = folder.path();
let mut client = EraClient::new(StubClient, base_url, folder);
client.fetch_file_list().await.unwrap();
let url = client.url(0).await.unwrap().unwrap();
let expected_count = 0;
let actual_count = client.files_count().await;
assert_eq!(actual_count, expected_count);
client.download_to_file(url).await.unwrap();
let expected_count = 1;
let actual_count = client.files_count().await;
assert_eq!(actual_count, expected_count);
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/era-downloader/tests/it/fs.rs | crates/era-downloader/tests/it/fs.rs | use alloy_primitives::hex::ToHexExt;
use futures_util::StreamExt;
use reth_era_downloader::read_dir;
use sha2::Digest;
use tokio::fs;
const CONTENTS_0: &[u8; 1] = b"a";
const CONTENTS_1: &[u8; 1] = b"b";
#[test_case::test_case(
Ok(format!(
"{}\n{}",
sha2::Sha256::digest(CONTENTS_0).encode_hex(),
sha2::Sha256::digest(CONTENTS_1).encode_hex()
)),
[
Ok("mainnet-00000-5ec1ffb8.era1"),
Ok("mainnet-00001-a5364e9a.era1"),
];
"Reads all files successfully"
)]
#[test_case::test_case(
Ok("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n\
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"),
[
Err("Checksum mismatch, \
got: ca978112ca1bbdcafac231b39a23dc4da786eff8147c4e72b9807785afee48bb, \
expected: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
Err("Checksum mismatch, \
got: 3e23e8160039594a33894f6564e1b1348bbd7a0088d42c4acb73eeaed59c009d, \
expected: bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"),
];
"With invalid checksums fails"
)]
#[test_case::test_case(
Ok(format!(
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n{}",
sha2::Sha256::digest(CONTENTS_1).encode_hex()
)),
[
Err("Checksum mismatch, \
got: ca978112ca1bbdcafac231b39a23dc4da786eff8147c4e72b9807785afee48bb, \
expected: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
Ok("mainnet-00001-a5364e9a.era1"),
];
"With one invalid checksum partially fails"
)]
#[test_case::test_case(
Err::<&str, _>("Missing file `checksums.txt` in the `dir`"),
[
Err("Checksum mismatch, \
got: ca978112ca1bbdcafac231b39a23dc4da786eff8147c4e72b9807785afee48bb, \
expected: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
Ok("mainnet-00001-a5364e9a.era1"),
];
"With missing checksums file fails"
)]
#[tokio::test]
async fn test_streaming_from_local_directory(
checksums: Result<impl AsRef<[u8]>, &str>,
expected: [Result<&str, &str>; 2],
) {
let folder = tempfile::tempdir().unwrap();
let folder = folder.path().to_owned();
if let Ok(checksums) = &checksums {
fs::write(folder.join("checksums.txt"), checksums).await.unwrap();
}
fs::write(folder.join("mainnet-00000-5ec1ffb8.era1"), CONTENTS_0).await.unwrap();
fs::write(folder.join("mainnet-00001-a5364e9a.era1"), CONTENTS_1).await.unwrap();
let folder = folder.into_boxed_path();
let actual = read_dir(folder.clone(), 0);
match checksums {
Ok(_) => match actual {
Ok(mut stream) => {
for expected in expected {
let actual = stream.next().await.unwrap();
match expected {
Ok(expected_file) => {
let actual_file = actual.expect("should be ok");
let expected_file = folder.join(expected_file).into_boxed_path();
assert_eq!(actual_file, expected_file)
}
Err(expected_err) => {
let actual_err = actual.expect_err("should be err").to_string();
assert_eq!(actual_err, expected_err)
}
}
}
}
Err(err) => panic!("expected ok, got: {err:?}"),
},
Err(expected_err) => match actual {
Ok(_) => panic!("should be err"),
Err(actual_err) => assert_eq!(actual_err.to_string(), expected_err),
},
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/era-downloader/tests/it/list.rs | crates/era-downloader/tests/it/list.rs | //! Tests fetching a list of files
use crate::StubClient;
use reqwest::Url;
use reth_era_downloader::EraClient;
use std::str::FromStr;
use tempfile::tempdir;
use test_case::test_case;
#[test_case("https://mainnet.era1.nimbus.team/"; "nimbus")]
#[test_case("https://era1.ethportal.net/"; "ethportal")]
#[test_case("https://era.ithaca.xyz/era1/index.html"; "ithaca")]
#[tokio::test]
async fn test_getting_file_name_after_fetching_file_list(url: &str) {
let url = Url::from_str(url).unwrap();
let folder = tempdir().unwrap();
let folder = folder.path();
let client = EraClient::new(StubClient, url, folder);
client.fetch_file_list().await.unwrap();
let actual = client.number_to_file_name(600).await.unwrap();
let expected = Some("mainnet-00600-a81ae85f.era1".to_owned());
assert_eq!(actual, expected);
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/era-downloader/tests/it/checksums.rs | crates/era-downloader/tests/it/checksums.rs | use bytes::Bytes;
use futures::Stream;
use futures_util::StreamExt;
use reqwest::{IntoUrl, Url};
use reth_era_downloader::{EraClient, EraStream, EraStreamConfig, HttpClient};
use std::str::FromStr;
use tempfile::tempdir;
use test_case::test_case;
#[test_case("https://mainnet.era1.nimbus.team/"; "nimbus")]
#[test_case("https://era1.ethportal.net/"; "ethportal")]
#[test_case("https://era.ithaca.xyz/era1/index.html"; "ithaca")]
#[tokio::test]
async fn test_invalid_checksum_returns_error(url: &str) {
let base_url = Url::from_str(url).unwrap();
let folder = tempdir().unwrap();
let folder = folder.path();
let client = EraClient::new(FailingClient, base_url, folder);
let mut stream = EraStream::new(
client,
EraStreamConfig::default().with_max_files(2).with_max_concurrent_downloads(1),
);
let actual_err = stream.next().await.unwrap().unwrap_err().to_string();
let expected_err = format!(
"Checksum mismatch, \
got: 87428fc522803d31065e7bce3cf03fe475096631e5e07bbd7a0fde60c4cf25c7, \
expected: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \
for mainnet-00000-5ec1ffb8.era1 at {}/mainnet-00000-5ec1ffb8.era1",
folder.display()
);
assert_eq!(actual_err, expected_err);
let actual_err = stream.next().await.unwrap().unwrap_err().to_string();
let expected_err = format!(
"Checksum mismatch, \
got: 0263829989b6fd954f72baaf2fc64bc2e2f01d692d4de72986ea808f6e99813f, \
expected: bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb \
for mainnet-00001-a5364e9a.era1 at {}/mainnet-00001-a5364e9a.era1",
folder.display()
);
assert_eq!(actual_err, expected_err);
}
const CHECKSUMS: &[u8] = b"0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb";
/// An HTTP client pre-programmed with canned answers to received calls.
/// Panics if it receives an unknown call.
#[derive(Debug, Clone)]
struct FailingClient;
impl HttpClient for FailingClient {
async fn get<U: IntoUrl + Send + Sync>(
&self,
url: U,
) -> eyre::Result<impl Stream<Item = eyre::Result<Bytes>> + Send + Sync + Unpin> {
let url = url.into_url().unwrap();
Ok(futures::stream::iter(vec![Ok(match url.to_string().as_str() {
"https://mainnet.era1.nimbus.team/" => Bytes::from_static(crate::NIMBUS),
"https://era1.ethportal.net/" => Bytes::from_static(crate::ETH_PORTAL),
"https://era.ithaca.xyz/era1/index.html" => Bytes::from_static(crate::ITHACA),
"https://mainnet.era1.nimbus.team/checksums.txt" |
"https://era1.ethportal.net/checksums.txt" |
"https://era.ithaca.xyz/era1/checksums.txt" => Bytes::from_static(CHECKSUMS),
"https://era1.ethportal.net/mainnet-00000-5ec1ffb8.era1" |
"https://mainnet.era1.nimbus.team/mainnet-00000-5ec1ffb8.era1" |
"https://era.ithaca.xyz/era1/mainnet-00000-5ec1ffb8.era1" => {
Bytes::from_static(crate::MAINNET_0)
}
"https://era1.ethportal.net/mainnet-00001-a5364e9a.era1" |
"https://mainnet.era1.nimbus.team/mainnet-00001-a5364e9a.era1" |
"https://era.ithaca.xyz/era1/mainnet-00001-a5364e9a.era1" => {
Bytes::from_static(crate::MAINNET_1)
}
v => unimplemented!("Unexpected URL \"{v}\""),
})]))
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/era-downloader/tests/it/main.rs | crates/era-downloader/tests/it/main.rs | //! Root module for test modules, so that the tests are built into a single binary.
mod checksums;
mod download;
mod fs;
mod list;
mod stream;
const fn main() {}
use bytes::Bytes;
use futures::Stream;
use reqwest::IntoUrl;
use reth_era_downloader::HttpClient;
pub(crate) const NIMBUS: &[u8] = include_bytes!("../res/nimbus.html");
pub(crate) const ETH_PORTAL: &[u8] = include_bytes!("../res/ethportal.html");
pub(crate) const ITHACA: &[u8] = include_bytes!("../res/ithaca.html");
pub(crate) const CHECKSUMS: &[u8] = include_bytes!("../res/checksums.txt");
pub(crate) const MAINNET_0: &[u8] = include_bytes!("../res/mainnet-00000-5ec1ffb8.era1");
pub(crate) const MAINNET_1: &[u8] = include_bytes!("../res/mainnet-00001-a5364e9a.era1");
/// An HTTP client pre-programmed with canned answers to received calls.
/// Panics if it receives an unknown call.
#[derive(Debug, Clone)]
struct StubClient;
impl HttpClient for StubClient {
async fn get<U: IntoUrl + Send + Sync>(
&self,
url: U,
) -> eyre::Result<impl Stream<Item = eyre::Result<Bytes>> + Send + Sync + Unpin> {
let url = url.into_url().unwrap();
Ok(futures::stream::iter(vec![Ok(match url.to_string().as_str() {
"https://mainnet.era1.nimbus.team/" => Bytes::from_static(NIMBUS),
"https://era1.ethportal.net/" => Bytes::from_static(ETH_PORTAL),
"https://era.ithaca.xyz/era1/index.html" => Bytes::from_static(ITHACA),
"https://mainnet.era1.nimbus.team/checksums.txt" |
"https://era1.ethportal.net/checksums.txt" |
"https://era.ithaca.xyz/era1/checksums.txt" => Bytes::from_static(CHECKSUMS),
"https://era1.ethportal.net/mainnet-00000-5ec1ffb8.era1" |
"https://mainnet.era1.nimbus.team/mainnet-00000-5ec1ffb8.era1" |
"https://era.ithaca.xyz/era1/mainnet-00000-5ec1ffb8.era1" => {
Bytes::from_static(MAINNET_0)
}
"https://era1.ethportal.net/mainnet-00001-a5364e9a.era1" |
"https://mainnet.era1.nimbus.team/mainnet-00001-a5364e9a.era1" |
"https://era.ithaca.xyz/era1/mainnet-00001-a5364e9a.era1" => {
Bytes::from_static(MAINNET_1)
}
v => unimplemented!("Unexpected URL \"{v}\""),
})]))
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/util/src/lib.rs | crates/engine/util/src/lib.rs | //! Collection of various stream utilities for consensus engine.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
use futures::{Future, Stream};
use reth_engine_primitives::BeaconEngineMessage;
use reth_payload_primitives::PayloadTypes;
use std::path::PathBuf;
use tokio_util::either::Either;
pub mod engine_store;
use engine_store::EngineStoreStream;
pub mod skip_fcu;
use skip_fcu::EngineSkipFcu;
pub mod skip_new_payload;
use skip_new_payload::EngineSkipNewPayload;
pub mod reorg;
use reorg::EngineReorg;
/// The result type for `maybe_reorg` method.
type MaybeReorgResult<S, T, Provider, Evm, Validator, E> =
Result<Either<EngineReorg<S, T, Provider, Evm, Validator>, S>, E>;
/// The collection of stream extensions for engine API message stream.
pub trait EngineMessageStreamExt<T: PayloadTypes>: Stream<Item = BeaconEngineMessage<T>> {
/// Skips the specified number of [`BeaconEngineMessage::ForkchoiceUpdated`] messages from the
/// engine message stream.
fn skip_fcu(self, count: usize) -> EngineSkipFcu<Self>
where
Self: Sized,
{
EngineSkipFcu::new(self, count)
}
/// If the count is [Some], returns the stream that skips the specified number of
/// [`BeaconEngineMessage::ForkchoiceUpdated`] messages. Otherwise, returns `Self`.
fn maybe_skip_fcu(self, maybe_count: Option<usize>) -> Either<EngineSkipFcu<Self>, Self>
where
Self: Sized,
{
if let Some(count) = maybe_count {
Either::Left(self.skip_fcu(count))
} else {
Either::Right(self)
}
}
/// Skips the specified number of [`BeaconEngineMessage::NewPayload`] messages from the
/// engine message stream.
fn skip_new_payload(self, count: usize) -> EngineSkipNewPayload<Self>
where
Self: Sized,
{
EngineSkipNewPayload::new(self, count)
}
/// If the count is [Some], returns the stream that skips the specified number of
/// [`BeaconEngineMessage::NewPayload`] messages. Otherwise, returns `Self`.
fn maybe_skip_new_payload(
self,
maybe_count: Option<usize>,
) -> Either<EngineSkipNewPayload<Self>, Self>
where
Self: Sized,
{
if let Some(count) = maybe_count {
Either::Left(self.skip_new_payload(count))
} else {
Either::Right(self)
}
}
/// Stores engine messages at the specified location.
fn store_messages(self, path: PathBuf) -> EngineStoreStream<Self>
where
Self: Sized,
{
EngineStoreStream::new(self, path)
}
/// If the path is [Some], returns the stream that stores engine messages at the specified
/// location. Otherwise, returns `Self`.
fn maybe_store_messages(
self,
maybe_path: Option<PathBuf>,
) -> Either<EngineStoreStream<Self>, Self>
where
Self: Sized,
{
if let Some(path) = maybe_path {
Either::Left(self.store_messages(path))
} else {
Either::Right(self)
}
}
/// Creates reorgs with specified frequency.
fn reorg<Provider, Evm, Validator>(
self,
provider: Provider,
evm_config: Evm,
payload_validator: Validator,
frequency: usize,
depth: Option<usize>,
) -> EngineReorg<Self, T, Provider, Evm, Validator>
where
Self: Sized,
{
EngineReorg::new(
self,
provider,
evm_config,
payload_validator,
frequency,
depth.unwrap_or_default(),
)
}
/// If frequency is [Some], returns the stream that creates reorgs with
/// specified frequency. Otherwise, returns `Self`.
///
/// The `payload_validator_fn` closure is only called if `frequency` is `Some`,
/// allowing for lazy initialization of the validator.
fn maybe_reorg<Provider, Evm, Validator, E, F, Fut>(
self,
provider: Provider,
evm_config: Evm,
payload_validator_fn: F,
frequency: Option<usize>,
depth: Option<usize>,
) -> impl Future<Output = MaybeReorgResult<Self, T, Provider, Evm, Validator, E>> + Send
where
Self: Sized + Send,
Provider: Send,
Evm: Send,
F: FnOnce() -> Fut + Send,
Fut: Future<Output = Result<Validator, E>> + Send,
{
async move {
if let Some(frequency) = frequency {
let validator = payload_validator_fn().await?;
Ok(Either::Left(reorg::EngineReorg::new(
self,
provider,
evm_config,
validator,
frequency,
depth.unwrap_or_default(),
)))
} else {
Ok(Either::Right(self))
}
}
}
}
impl<T, S> EngineMessageStreamExt<T> for S
where
T: PayloadTypes,
S: Stream<Item = BeaconEngineMessage<T>>,
{
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/util/src/skip_fcu.rs | crates/engine/util/src/skip_fcu.rs | //! Stream wrapper that skips specified number of FCUs.
use futures::{Stream, StreamExt};
use reth_engine_primitives::{BeaconEngineMessage, OnForkChoiceUpdated};
use reth_payload_primitives::PayloadTypes;
use std::{
pin::Pin,
task::{ready, Context, Poll},
};
/// Engine API stream wrapper that skips the specified number of forkchoice updated messages.
#[derive(Debug)]
#[pin_project::pin_project]
pub struct EngineSkipFcu<S> {
#[pin]
stream: S,
/// The number of FCUs to skip.
threshold: usize,
/// Current count of skipped FCUs.
skipped: usize,
}
impl<S> EngineSkipFcu<S> {
/// Creates new [`EngineSkipFcu`] stream wrapper.
pub const fn new(stream: S, threshold: usize) -> Self {
Self {
stream,
threshold,
// Start with `threshold` so that the first FCU goes through.
skipped: threshold,
}
}
}
impl<S, T> Stream for EngineSkipFcu<S>
where
S: Stream<Item = BeaconEngineMessage<T>>,
T: PayloadTypes,
{
type Item = S::Item;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut this = self.project();
loop {
let next = ready!(this.stream.poll_next_unpin(cx));
let item = match next {
Some(BeaconEngineMessage::ForkchoiceUpdated {
state,
payload_attrs,
tx,
version,
}) => {
if this.skipped < this.threshold {
*this.skipped += 1;
tracing::warn!(target: "engine::stream::skip_fcu", ?state, ?payload_attrs, threshold=this.threshold, skipped=this.skipped, "Skipping FCU");
let _ = tx.send(Ok(OnForkChoiceUpdated::syncing()));
continue
}
*this.skipped = 0;
Some(BeaconEngineMessage::ForkchoiceUpdated {
state,
payload_attrs,
tx,
version,
})
}
next => next,
};
return Poll::Ready(item)
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/util/src/reorg.rs | crates/engine/util/src/reorg.rs | //! Stream wrapper that simulates reorgs.
use alloy_consensus::{BlockHeader, Transaction};
use alloy_rpc_types_engine::{ForkchoiceState, PayloadStatus};
use futures::{stream::FuturesUnordered, Stream, StreamExt, TryFutureExt};
use itertools::Either;
use reth_chainspec::{ChainSpecProvider, EthChainSpec};
use reth_engine_primitives::{
BeaconEngineMessage, BeaconOnNewPayloadError, ExecutionPayload as _, OnForkChoiceUpdated,
};
use reth_engine_tree::tree::EngineValidator;
use reth_errors::{BlockExecutionError, BlockValidationError, RethError, RethResult};
use reth_evm::{
execute::{BlockBuilder, BlockBuilderOutcome},
ConfigureEvm,
};
use reth_payload_primitives::{BuiltPayload, EngineApiMessageVersion, PayloadTypes};
use reth_primitives_traits::{
block::Block as _, BlockBody as _, BlockTy, HeaderTy, SealedBlock, SignedTransaction,
};
use reth_revm::{database::StateProviderDatabase, db::State};
use reth_storage_api::{errors::ProviderError, BlockReader, StateProviderFactory};
use std::{
collections::VecDeque,
future::Future,
pin::Pin,
task::{ready, Context, Poll},
};
use tokio::sync::oneshot;
use tracing::*;
#[derive(Debug)]
enum EngineReorgState<T: PayloadTypes> {
Forward,
Reorg { queue: VecDeque<BeaconEngineMessage<T>> },
}
type EngineReorgResponse = Result<
Either<Result<PayloadStatus, BeaconOnNewPayloadError>, RethResult<OnForkChoiceUpdated>>,
oneshot::error::RecvError,
>;
type ReorgResponseFut = Pin<Box<dyn Future<Output = EngineReorgResponse> + Send + Sync>>;
/// Engine API stream wrapper that simulates reorgs with specified frequency.
#[derive(Debug)]
#[pin_project::pin_project]
pub struct EngineReorg<S, T: PayloadTypes, Provider, Evm, Validator> {
/// Underlying stream
#[pin]
stream: S,
/// Database provider.
provider: Provider,
/// Evm configuration.
evm_config: Evm,
/// Payload validator.
payload_validator: Validator,
/// The frequency of reorgs.
frequency: usize,
/// The depth of reorgs.
depth: usize,
/// The number of forwarded forkchoice states.
/// This is reset after a reorg.
forkchoice_states_forwarded: usize,
/// Current state of the stream.
state: EngineReorgState<T>,
/// Last forkchoice state.
last_forkchoice_state: Option<ForkchoiceState>,
/// Pending engine responses to reorg messages.
reorg_responses: FuturesUnordered<ReorgResponseFut>,
}
impl<S, T: PayloadTypes, Provider, Evm, Validator> EngineReorg<S, T, Provider, Evm, Validator> {
/// Creates new [`EngineReorg`] stream wrapper.
pub fn new(
stream: S,
provider: Provider,
evm_config: Evm,
payload_validator: Validator,
frequency: usize,
depth: usize,
) -> Self {
Self {
stream,
provider,
evm_config,
payload_validator,
frequency,
depth,
state: EngineReorgState::Forward,
forkchoice_states_forwarded: 0,
last_forkchoice_state: None,
reorg_responses: FuturesUnordered::new(),
}
}
}
impl<S, T, Provider, Evm, Validator> Stream for EngineReorg<S, T, Provider, Evm, Validator>
where
S: Stream<Item = BeaconEngineMessage<T>>,
T: PayloadTypes<BuiltPayload: BuiltPayload<Primitives = Evm::Primitives>>,
Provider: BlockReader<Header = HeaderTy<Evm::Primitives>, Block = BlockTy<Evm::Primitives>>
+ StateProviderFactory
+ ChainSpecProvider,
Evm: ConfigureEvm,
Validator: EngineValidator<T, Evm::Primitives>,
{
type Item = S::Item;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut this = self.project();
loop {
if let Poll::Ready(Some(response)) = this.reorg_responses.poll_next_unpin(cx) {
match response {
Ok(Either::Left(Ok(payload_status))) => {
debug!(target: "engine::stream::reorg", ?payload_status, "Received response for reorg new payload");
}
Ok(Either::Left(Err(payload_error))) => {
error!(target: "engine::stream::reorg", %payload_error, "Error on reorg new payload");
}
Ok(Either::Right(Ok(fcu_status))) => {
debug!(target: "engine::stream::reorg", ?fcu_status, "Received response for reorg forkchoice update");
}
Ok(Either::Right(Err(fcu_error))) => {
error!(target: "engine::stream::reorg", %fcu_error, "Error on reorg forkchoice update");
}
Err(_) => {}
};
continue
}
if let EngineReorgState::Reorg { queue } = &mut this.state {
match queue.pop_front() {
Some(msg) => return Poll::Ready(Some(msg)),
None => {
*this.forkchoice_states_forwarded = 0;
*this.state = EngineReorgState::Forward;
}
}
}
let next = ready!(this.stream.poll_next_unpin(cx));
let item = match (next, &this.last_forkchoice_state) {
(
Some(BeaconEngineMessage::NewPayload { payload, tx }),
Some(last_forkchoice_state),
) if this.forkchoice_states_forwarded > this.frequency &&
// Only enter reorg state if new payload attaches to current head.
last_forkchoice_state.head_block_hash == payload.parent_hash() =>
{
// Enter the reorg state.
// The current payload will be immediately forwarded by being in front of the
// queue. Then we attempt to reorg the current head by generating a payload that
// attaches to the head's parent and is based on the non-conflicting
// transactions (txs from block `n + 1` that are valid at block `n` according to
// consensus checks) from the current payload as well as the corresponding
// forkchoice state. We will rely on CL to reorg us back to canonical chain.
// TODO: This is an expensive blocking operation, ideally it's spawned as a task
// so that the stream could yield the control back.
let reorg_block = match create_reorg_head(
this.provider,
this.evm_config,
this.payload_validator,
*this.depth,
payload.clone(),
) {
Ok(result) => result,
Err(error) => {
error!(target: "engine::stream::reorg", %error, "Error attempting to create reorg head");
// Forward the payload and attempt to create reorg on top of
// the next one
return Poll::Ready(Some(BeaconEngineMessage::NewPayload {
payload,
tx,
}))
}
};
let reorg_forkchoice_state = ForkchoiceState {
finalized_block_hash: last_forkchoice_state.finalized_block_hash,
safe_block_hash: last_forkchoice_state.safe_block_hash,
head_block_hash: reorg_block.hash(),
};
let (reorg_payload_tx, reorg_payload_rx) = oneshot::channel();
let (reorg_fcu_tx, reorg_fcu_rx) = oneshot::channel();
this.reorg_responses.extend([
Box::pin(reorg_payload_rx.map_ok(Either::Left)) as ReorgResponseFut,
Box::pin(reorg_fcu_rx.map_ok(Either::Right)) as ReorgResponseFut,
]);
let queue = VecDeque::from([
// Current payload
BeaconEngineMessage::NewPayload { payload, tx },
// Reorg payload
BeaconEngineMessage::NewPayload {
payload: T::block_to_payload(reorg_block),
tx: reorg_payload_tx,
},
// Reorg forkchoice state
BeaconEngineMessage::ForkchoiceUpdated {
state: reorg_forkchoice_state,
payload_attrs: None,
tx: reorg_fcu_tx,
version: EngineApiMessageVersion::default(),
},
]);
*this.state = EngineReorgState::Reorg { queue };
continue
}
(
Some(BeaconEngineMessage::ForkchoiceUpdated {
state,
payload_attrs,
tx,
version,
}),
_,
) => {
// Record last forkchoice state forwarded to the engine.
// We do not care if it's valid since engine should be able to handle
// reorgs that rely on invalid forkchoice state.
*this.last_forkchoice_state = Some(state);
*this.forkchoice_states_forwarded += 1;
Some(BeaconEngineMessage::ForkchoiceUpdated {
state,
payload_attrs,
tx,
version,
})
}
(item, _) => item,
};
return Poll::Ready(item)
}
}
}
fn create_reorg_head<Provider, Evm, T, Validator>(
provider: &Provider,
evm_config: &Evm,
payload_validator: &Validator,
mut depth: usize,
next_payload: T::ExecutionData,
) -> RethResult<SealedBlock<BlockTy<Evm::Primitives>>>
where
Provider: BlockReader<Header = HeaderTy<Evm::Primitives>, Block = BlockTy<Evm::Primitives>>
+ StateProviderFactory
+ ChainSpecProvider<ChainSpec: EthChainSpec>,
Evm: ConfigureEvm,
T: PayloadTypes<BuiltPayload: BuiltPayload<Primitives = Evm::Primitives>>,
Validator: EngineValidator<T, Evm::Primitives>,
{
// Ensure next payload is valid.
let next_block =
payload_validator.ensure_well_formed_payload(next_payload).map_err(RethError::msg)?;
// Fetch reorg target block depending on its depth and its parent.
let mut previous_hash = next_block.parent_hash();
let mut candidate_transactions = next_block.into_body().transactions().to_vec();
let reorg_target = 'target: {
loop {
let reorg_target = provider
.block_by_hash(previous_hash)?
.ok_or_else(|| ProviderError::HeaderNotFound(previous_hash.into()))?;
if depth == 0 {
break 'target reorg_target.seal_slow()
}
depth -= 1;
previous_hash = reorg_target.header().parent_hash();
candidate_transactions = reorg_target.into_body().into_transactions();
}
};
let reorg_target_parent = provider
.sealed_header_by_hash(reorg_target.header().parent_hash())?
.ok_or_else(|| ProviderError::HeaderNotFound(reorg_target.header().parent_hash().into()))?;
debug!(target: "engine::stream::reorg", number = reorg_target.header().number(), hash = %previous_hash, "Selected reorg target");
// Configure state
let state_provider = provider.state_by_block_hash(reorg_target.header().parent_hash())?;
let mut state = State::builder()
.with_database_ref(StateProviderDatabase::new(&state_provider))
.with_bundle_update()
.build();
let ctx = evm_config.context_for_block(&reorg_target);
let evm = evm_config.evm_for_block(&mut state, &reorg_target);
let mut builder = evm_config.create_block_builder(evm, &reorg_target_parent, ctx);
builder.apply_pre_execution_changes()?;
let mut cumulative_gas_used = 0;
for tx in candidate_transactions {
// ensure we still have capacity for this transaction
if cumulative_gas_used + tx.gas_limit() > reorg_target.gas_limit() {
continue
}
let tx_recovered =
tx.try_into_recovered().map_err(|_| ProviderError::SenderRecoveryError)?;
let gas_used = match builder.execute_transaction(tx_recovered) {
Ok(gas_used) => gas_used,
Err(BlockExecutionError::Validation(BlockValidationError::InvalidTx {
hash,
error,
})) => {
trace!(target: "engine::stream::reorg", hash = %hash, ?error, "Error executing transaction from next block");
continue
}
// Treat error as fatal
Err(error) => return Err(RethError::Execution(error)),
};
cumulative_gas_used += gas_used;
}
let BlockBuilderOutcome { block, .. } = builder.finish(&state_provider)?;
Ok(block.into_sealed_block())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/util/src/skip_new_payload.rs | crates/engine/util/src/skip_new_payload.rs | //! Stream wrapper that skips specified number of new payload messages.
use alloy_rpc_types_engine::{PayloadStatus, PayloadStatusEnum};
use futures::{Stream, StreamExt};
use reth_engine_primitives::{BeaconEngineMessage, ExecutionPayload};
use reth_payload_primitives::PayloadTypes;
use std::{
pin::Pin,
task::{ready, Context, Poll},
};
/// Engine API stream wrapper that skips the specified number of new payload messages.
#[derive(Debug)]
#[pin_project::pin_project]
pub struct EngineSkipNewPayload<S> {
#[pin]
stream: S,
/// The number of messages to skip.
threshold: usize,
/// Current count of skipped messages.
skipped: usize,
}
impl<S> EngineSkipNewPayload<S> {
/// Creates new [`EngineSkipNewPayload`] stream wrapper.
pub const fn new(stream: S, threshold: usize) -> Self {
Self { stream, threshold, skipped: 0 }
}
}
impl<S, T> Stream for EngineSkipNewPayload<S>
where
S: Stream<Item = BeaconEngineMessage<T>>,
T: PayloadTypes,
{
type Item = S::Item;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut this = self.project();
loop {
let next = ready!(this.stream.poll_next_unpin(cx));
let item = match next {
Some(BeaconEngineMessage::NewPayload { payload, tx }) => {
if this.skipped < this.threshold {
*this.skipped += 1;
tracing::warn!(
target: "engine::stream::skip_new_payload",
block_number = payload.block_number(),
block_hash = %payload.block_hash(),
?payload,
threshold=this.threshold,
skipped=this.skipped, "Skipping new payload"
);
let _ = tx.send(Ok(PayloadStatus::from_status(PayloadStatusEnum::Syncing)));
continue
}
*this.skipped = 0;
Some(BeaconEngineMessage::NewPayload { payload, tx })
}
next => next,
};
return Poll::Ready(item)
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/util/src/engine_store.rs | crates/engine/util/src/engine_store.rs | //! Stores engine API messages to disk for later inspection and replay.
use alloy_rpc_types_engine::ForkchoiceState;
use futures::{Stream, StreamExt};
use reth_engine_primitives::{BeaconEngineMessage, ExecutionPayload};
use reth_fs_util as fs;
use reth_payload_primitives::PayloadTypes;
use serde::{Deserialize, Serialize};
use std::{
collections::BTreeMap,
path::PathBuf,
pin::Pin,
task::{ready, Context, Poll},
time::SystemTime,
};
use tracing::*;
/// A message from the engine API that has been stored to disk.
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum StoredEngineApiMessage<T: PayloadTypes> {
/// The on-disk representation of an `engine_forkchoiceUpdated` method call.
ForkchoiceUpdated {
/// The [`ForkchoiceState`] sent in the persisted call.
state: ForkchoiceState,
/// The payload attributes sent in the persisted call, if any.
payload_attrs: Option<T::PayloadAttributes>,
},
/// The on-disk representation of an `engine_newPayload` method call.
NewPayload {
/// The [`PayloadTypes::ExecutionData`] sent in the persisted call.
#[serde(flatten)]
payload: T::ExecutionData,
},
}
/// This can read and write engine API messages in a specific directory.
#[derive(Debug)]
pub struct EngineMessageStore {
/// The path to the directory that stores the engine API messages.
path: PathBuf,
}
impl EngineMessageStore {
/// Creates a new [`EngineMessageStore`] at the given path.
///
/// The path is expected to be a directory, where individual message JSON files will be stored.
pub const fn new(path: PathBuf) -> Self {
Self { path }
}
/// Stores the received [`BeaconEngineMessage`] to disk, appending the `received_at` time to the
/// path.
pub fn on_message<T>(
&self,
msg: &BeaconEngineMessage<T>,
received_at: SystemTime,
) -> eyre::Result<()>
where
T: PayloadTypes,
{
fs::create_dir_all(&self.path)?; // ensure that store path had been created
let timestamp = received_at.duration_since(SystemTime::UNIX_EPOCH).unwrap().as_millis();
match msg {
BeaconEngineMessage::ForkchoiceUpdated {
state,
payload_attrs,
tx: _tx,
version: _version,
} => {
let filename = format!("{}-fcu-{}.json", timestamp, state.head_block_hash);
fs::write(
self.path.join(filename),
serde_json::to_vec(&StoredEngineApiMessage::<T>::ForkchoiceUpdated {
state: *state,
payload_attrs: payload_attrs.clone(),
})?,
)?;
}
BeaconEngineMessage::NewPayload { payload, tx: _tx } => {
let filename = format!("{}-new_payload-{}.json", timestamp, payload.block_hash());
fs::write(
self.path.join(filename),
serde_json::to_vec(&StoredEngineApiMessage::<T>::NewPayload {
payload: payload.clone(),
})?,
)?;
}
};
Ok(())
}
/// Finds and iterates through any stored engine API message files, ordered by timestamp.
pub fn engine_messages_iter(&self) -> eyre::Result<impl Iterator<Item = PathBuf>> {
let mut filenames_by_ts = BTreeMap::<u64, Vec<PathBuf>>::default();
for entry in fs::read_dir(&self.path)? {
let entry = entry?;
let filename = entry.file_name();
if let Some(filename) = filename.to_str().filter(|n| n.ends_with(".json")) {
if let Some(Ok(timestamp)) = filename.split('-').next().map(|n| n.parse::<u64>()) {
filenames_by_ts.entry(timestamp).or_default().push(entry.path());
tracing::debug!(target: "engine::store", timestamp, filename, "Queued engine API message");
} else {
tracing::warn!(target: "engine::store", %filename, "Could not parse timestamp from filename")
}
} else {
tracing::warn!(target: "engine::store", ?filename, "Skipping non json file");
}
}
Ok(filenames_by_ts.into_iter().flat_map(|(_, paths)| paths))
}
}
/// A wrapper stream that stores Engine API messages in
/// the specified directory.
#[derive(Debug)]
#[pin_project::pin_project]
pub struct EngineStoreStream<S> {
/// Inner message stream.
#[pin]
stream: S,
/// Engine message store.
store: EngineMessageStore,
}
impl<S> EngineStoreStream<S> {
/// Create new engine store stream wrapper.
pub const fn new(stream: S, path: PathBuf) -> Self {
Self { stream, store: EngineMessageStore::new(path) }
}
}
impl<S, T> Stream for EngineStoreStream<S>
where
S: Stream<Item = BeaconEngineMessage<T>>,
T: PayloadTypes,
{
type Item = S::Item;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut this = self.project();
let next = ready!(this.stream.poll_next_unpin(cx));
if let Some(msg) = &next {
if let Err(error) = this.store.on_message(msg, SystemTime::now()) {
error!(target: "engine::stream::store", ?msg, %error, "Error handling Engine API message");
}
}
Poll::Ready(next)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/service/src/lib.rs | crates/engine/service/src/lib.rs | //! Engine service implementation.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
/// Engine Service
pub mod service;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/service/src/service.rs | crates/engine/service/src/service.rs | use futures::{Stream, StreamExt};
use pin_project::pin_project;
use reth_chainspec::EthChainSpec;
use reth_consensus::{ConsensusError, FullConsensus};
use reth_engine_primitives::{BeaconEngineMessage, ConsensusEngineEvent};
use reth_engine_tree::{
backfill::PipelineSync,
backup::BackupHandle,
download::BasicBlockDownloader,
engine::{EngineApiKind, EngineApiRequest, EngineApiRequestHandler, EngineHandler},
persistence::PersistenceHandle,
tree::{EngineApiTreeHandler, EngineValidator, TreeConfig},
};
pub use reth_engine_tree::{
chain::{ChainEvent, ChainOrchestrator},
engine::EngineApiEvent,
};
use reth_ethereum_primitives::EthPrimitives;
use reth_evm::ConfigureEvm;
use reth_network_p2p::BlockClient;
use reth_node_types::{BlockTy, NodeTypes};
use reth_payload_builder::PayloadBuilderHandle;
use reth_provider::{
providers::{BlockchainProvider, ProviderNodeTypes},
ProviderFactory,
};
use reth_prune::PrunerWithFactory;
use reth_stages_api::{MetricEventsSender, Pipeline};
use reth_tasks::TaskSpawner;
use std::{
pin::Pin,
sync::Arc,
task::{Context, Poll},
};
// seismic imports that upstream doesn't use
use reth_node_core::dirs::{ChainPath, DataDirPath};
/// Alias for consensus engine stream.
pub type EngineMessageStream<T> = Pin<Box<dyn Stream<Item = BeaconEngineMessage<T>> + Send + Sync>>;
/// Alias for chain orchestrator.
type EngineServiceType<N, Client> = ChainOrchestrator<
EngineHandler<
EngineApiRequestHandler<
EngineApiRequest<<N as NodeTypes>::Payload, <N as NodeTypes>::Primitives>,
<N as NodeTypes>::Primitives,
>,
EngineMessageStream<<N as NodeTypes>::Payload>,
BasicBlockDownloader<Client, BlockTy<N>>,
>,
PipelineSync<N>,
>;
/// The type that drives the chain forward and communicates progress.
#[pin_project]
#[expect(missing_debug_implementations)]
// TODO(mattsse): remove hidden once fixed : <https://github.com/rust-lang/rust/issues/135363>
// otherwise rustdoc fails to resolve the alias
#[doc(hidden)]
pub struct EngineService<N, Client>
where
N: ProviderNodeTypes,
Client: BlockClient<Block = BlockTy<N>> + 'static,
{
orchestrator: EngineServiceType<N, Client>,
}
impl<N, Client> EngineService<N, Client>
where
N: ProviderNodeTypes,
Client: BlockClient<Block = BlockTy<N>> + 'static,
{
/// Constructor for `EngineService`.
#[expect(clippy::too_many_arguments)]
pub fn new<V, C>(
consensus: Arc<dyn FullConsensus<N::Primitives, Error = ConsensusError>>,
chain_spec: Arc<N::ChainSpec>,
client: Client,
incoming_requests: EngineMessageStream<N::Payload>,
pipeline: Pipeline<N>,
pipeline_task_spawner: Box<dyn TaskSpawner>,
provider: ProviderFactory<N>,
blockchain_db: BlockchainProvider<N>,
pruner: PrunerWithFactory<ProviderFactory<N>>,
payload_builder: PayloadBuilderHandle<N::Payload>,
payload_validator: V,
tree_config: TreeConfig,
sync_metrics_tx: MetricEventsSender,
evm_config: C,
data_dir: ChainPath<DataDirPath>,
) -> Self
where
V: EngineValidator<N::Payload>,
C: ConfigureEvm<Primitives = N::Primitives> + 'static,
{
let engine_kind =
if chain_spec.is_optimism() { EngineApiKind::OpStack } else { EngineApiKind::Ethereum };
let downloader = BasicBlockDownloader::new(client, consensus.clone());
let persistence_handle =
PersistenceHandle::<EthPrimitives>::spawn_service(provider, pruner, sync_metrics_tx);
let canonical_in_memory_state = blockchain_db.canonical_in_memory_state();
let backup_handle = BackupHandle::spawn_service(data_dir);
let (to_tree_tx, from_tree) = EngineApiTreeHandler::<N::Primitives, _, _, _, _>::spawn_new(
blockchain_db,
consensus,
payload_validator,
persistence_handle,
payload_builder,
canonical_in_memory_state,
tree_config,
engine_kind,
evm_config,
backup_handle,
);
let engine_handler = EngineApiRequestHandler::new(to_tree_tx, from_tree);
let handler = EngineHandler::new(engine_handler, downloader, incoming_requests);
let backfill_sync = PipelineSync::new(pipeline, pipeline_task_spawner);
Self { orchestrator: ChainOrchestrator::new(handler, backfill_sync) }
}
/// Returns a mutable reference to the orchestrator.
pub fn orchestrator_mut(&mut self) -> &mut EngineServiceType<N, Client> {
&mut self.orchestrator
}
}
impl<N, Client> Stream for EngineService<N, Client>
where
N: ProviderNodeTypes,
Client: BlockClient<Block = BlockTy<N>> + 'static,
{
type Item = ChainEvent<ConsensusEngineEvent<N::Primitives>>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut orchestrator = self.project().orchestrator;
StreamExt::poll_next_unpin(&mut orchestrator, cx)
}
}
/// Potential error returned by `EngineService`.
#[derive(Debug, thiserror::Error)]
#[error("Engine service error.")]
pub struct EngineServiceError {}
#[cfg(test)]
mod tests {
use super::*;
use reth_chainspec::{ChainSpecBuilder, MAINNET};
use reth_engine_primitives::{BeaconEngineMessage, NoopInvalidBlockHook};
use reth_engine_tree::{test_utils::TestPipelineBuilder, tree::BasicEngineValidator};
use reth_ethereum_consensus::EthBeaconConsensus;
use reth_ethereum_engine_primitives::EthEngineTypes;
use reth_evm_ethereum::EthEvmConfig;
use reth_exex_types::FinishedExExHeight;
use reth_network_p2p::test_utils::TestFullBlockClient;
use reth_node_core::dirs::MaybePlatformPath;
use reth_node_ethereum::EthereumEngineValidator;
use reth_primitives_traits::SealedHeader;
use reth_provider::{
providers::BlockchainProvider, test_utils::create_test_provider_factory_with_chain_spec,
};
use reth_prune::Pruner;
use reth_tasks::TokioTaskExecutor;
use std::sync::Arc;
use tokio::sync::{mpsc::unbounded_channel, watch};
use tokio_stream::wrappers::UnboundedReceiverStream;
#[test]
fn eth_chain_orchestrator_build() {
let chain_spec = Arc::new(
ChainSpecBuilder::default()
.chain(MAINNET.chain)
.genesis(MAINNET.genesis.clone())
.paris_activated()
.build(),
);
let consensus = Arc::new(EthBeaconConsensus::new(chain_spec.clone()));
let client = TestFullBlockClient::default();
let (_tx, rx) = unbounded_channel::<BeaconEngineMessage<EthEngineTypes>>();
let incoming_requests = UnboundedReceiverStream::new(rx);
let pipeline = TestPipelineBuilder::new().build(chain_spec.clone());
let pipeline_task_spawner = Box::<TokioTaskExecutor>::default();
let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone());
let blockchain_db =
BlockchainProvider::with_latest(provider_factory.clone(), SealedHeader::default())
.unwrap();
let engine_payload_validator = EthereumEngineValidator::new(chain_spec.clone());
let (_tx, rx) = watch::channel(FinishedExExHeight::NoExExs);
let pruner = Pruner::new_with_factory(provider_factory.clone(), vec![], 0, 0, None, rx);
let evm_config = EthEvmConfig::new(chain_spec.clone());
let engine_validator = BasicEngineValidator::new(
blockchain_db.clone(),
consensus.clone(),
evm_config.clone(),
engine_payload_validator,
TreeConfig::default(),
Box::new(NoopInvalidBlockHook::default()),
);
let (sync_metrics_tx, _sync_metrics_rx) = unbounded_channel();
let (tx, _rx) = unbounded_channel();
let _eth_service = EngineService::new(
consensus,
chain_spec.clone(),
client,
Box::pin(incoming_requests),
pipeline,
pipeline_task_spawner,
provider_factory,
blockchain_db,
pruner,
PayloadBuilderHandle::new(tx),
engine_validator,
TreeConfig::default(),
sync_metrics_tx,
evm_config,
MaybePlatformPath::chain_default(chain_spec.chain.clone()),
);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/invalid-block-hooks/src/lib.rs | crates/engine/invalid-block-hooks/src/lib.rs | //! Invalid block hook implementations.
mod witness;
pub use witness::InvalidBlockWitnessHook;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/invalid-block-hooks/src/witness.rs | crates/engine/invalid-block-hooks/src/witness.rs | use alloy_consensus::BlockHeader;
use alloy_primitives::{keccak256, Address, B256, U256};
use alloy_rpc_types_debug::ExecutionWitness;
use pretty_assertions::Comparison;
use reth_engine_primitives::InvalidBlockHook;
use reth_evm::{execute::Executor, ConfigureEvm};
use reth_primitives_traits::{NodePrimitives, RecoveredBlock, SealedHeader};
use reth_provider::{BlockExecutionOutput, ChainSpecProvider, StateProviderFactory};
use reth_revm::{database::StateProviderDatabase, db::BundleState, state::AccountInfo};
use reth_rpc_api::DebugApiClient;
use reth_tracing::tracing::warn;
use reth_trie::{updates::TrieUpdates, HashedStorage};
use revm_bytecode::Bytecode;
use revm_database::states::{
reverts::{AccountInfoRevert, RevertToSlot},
AccountStatus, StorageSlot,
};
use serde::Serialize;
use std::{collections::BTreeMap, fmt::Debug, fs::File, io::Write, path::PathBuf};
#[derive(Debug, PartialEq, Eq)]
struct AccountRevertSorted {
pub account: AccountInfoRevert,
pub storage: BTreeMap<U256, RevertToSlot>,
pub previous_status: AccountStatus,
pub wipe_storage: bool,
}
#[derive(Debug, PartialEq, Eq)]
struct BundleAccountSorted {
pub info: Option<AccountInfo>,
pub original_info: Option<AccountInfo>,
/// Contains both original and present state.
/// When extracting changeset we compare if original value is different from present value.
/// If it is different we add it to changeset.
/// If Account was destroyed we ignore original value and compare present state with
/// `U256::ZERO`.
pub storage: BTreeMap<U256, StorageSlot>,
/// Account status.
pub status: AccountStatus,
}
#[derive(Debug, PartialEq, Eq)]
struct BundleStateSorted {
/// Account state
pub state: BTreeMap<Address, BundleAccountSorted>,
/// All created contracts in this block.
pub contracts: BTreeMap<B256, Bytecode>,
/// Changes to revert
///
/// **Note**: Inside vector is *not* sorted by address.
///
/// But it is unique by address.
pub reverts: Vec<Vec<(Address, AccountRevertSorted)>>,
/// The size of the plain state in the bundle state
pub state_size: usize,
/// The size of reverts in the bundle state
pub reverts_size: usize,
}
impl BundleStateSorted {
fn from_bundle_state(bundle_state: &BundleState) -> Self {
let state = bundle_state
.state
.clone()
.into_iter()
.map(|(address, account)| {
(
address,
BundleAccountSorted {
info: account.info,
original_info: account.original_info,
status: account.status,
storage: BTreeMap::from_iter(account.storage),
},
)
})
.collect();
let contracts = BTreeMap::from_iter(bundle_state.contracts.clone());
let reverts = bundle_state
.reverts
.iter()
.map(|block| {
block
.iter()
.map(|(address, account_revert)| {
(
*address,
AccountRevertSorted {
account: account_revert.account.clone(),
previous_status: account_revert.previous_status,
wipe_storage: account_revert.wipe_storage,
storage: BTreeMap::from_iter(account_revert.storage.clone()),
},
)
})
.collect()
})
.collect();
let state_size = bundle_state.state_size;
let reverts_size = bundle_state.reverts_size;
Self { state, contracts, reverts, state_size, reverts_size }
}
}
/// Generates a witness for the given block and saves it to a file.
#[derive(Debug)]
pub struct InvalidBlockWitnessHook<P, E> {
/// The provider to read the historical state and do the EVM execution.
provider: P,
/// The EVM configuration to use for the execution.
evm_config: E,
/// The directory to write the witness to. Additionally, diff files will be written to this
/// directory in case of failed sanity checks.
output_directory: PathBuf,
/// The healthy node client to compare the witness against.
healthy_node_client: Option<jsonrpsee::http_client::HttpClient>,
}
impl<P, E> InvalidBlockWitnessHook<P, E> {
/// Creates a new witness hook.
pub const fn new(
provider: P,
evm_config: E,
output_directory: PathBuf,
healthy_node_client: Option<jsonrpsee::http_client::HttpClient>,
) -> Self {
Self { provider, evm_config, output_directory, healthy_node_client }
}
}
impl<P, E, N> InvalidBlockWitnessHook<P, E>
where
P: StateProviderFactory + ChainSpecProvider + Send + Sync + 'static,
E: ConfigureEvm<Primitives = N> + 'static,
N: NodePrimitives,
{
fn on_invalid_block(
&self,
parent_header: &SealedHeader<N::BlockHeader>,
block: &RecoveredBlock<N::Block>,
output: &BlockExecutionOutput<N::Receipt>,
trie_updates: Option<(&TrieUpdates, B256)>,
) -> eyre::Result<()>
where
N: NodePrimitives,
{
// TODO(alexey): unify with `DebugApi::debug_execution_witness`
let mut executor = self.evm_config.batch_executor(StateProviderDatabase::new(
self.provider.state_by_block_hash(parent_header.hash())?,
));
executor.execute_one(block)?;
// Take the bundle state
let mut db = executor.into_state();
let bundle_state = db.take_bundle();
// Initialize a map of preimages.
let mut state_preimages = Vec::default();
// Get codes
let codes = db
.cache
.contracts
.values()
.map(|code| code.original_bytes())
.chain(
// cache state does not have all the contracts, especially when
// a contract is created within the block
// the contract only exists in bundle state, therefore we need
// to include them as well
bundle_state.contracts.values().map(|code| code.original_bytes()),
)
.collect();
// Grab all account proofs for the data accessed during block execution.
//
// Note: We grab *all* accounts in the cache here, as the `BundleState` prunes
// referenced accounts + storage slots.
let mut hashed_state = db.database.hashed_post_state(&bundle_state);
for (address, account) in db.cache.accounts {
let hashed_address = keccak256(address);
hashed_state
.accounts
.insert(hashed_address, account.account.as_ref().map(|a| a.info.clone().into()));
let storage = hashed_state
.storages
.entry(hashed_address)
.or_insert_with(|| HashedStorage::new(account.status.was_destroyed()));
if let Some(account) = account.account {
state_preimages.push(alloy_rlp::encode(address).into());
for (slot, value) in account.storage {
let slot = B256::from(slot);
let hashed_slot = keccak256(slot);
storage.storage.insert(hashed_slot, value);
state_preimages.push(alloy_rlp::encode(slot).into());
}
}
}
// Generate an execution witness for the aggregated state of accessed accounts.
// Destruct the cache database to retrieve the state provider.
let state_provider = db.database.into_inner();
let state = state_provider.witness(Default::default(), hashed_state.clone())?;
// Write the witness to the output directory.
let response =
ExecutionWitness { state, codes, keys: state_preimages, ..Default::default() };
let re_executed_witness_path = self.save_file(
format!("{}_{}.witness.re_executed.json", block.number(), block.hash()),
&response,
)?;
if let Some(healthy_node_client) = &self.healthy_node_client {
// Compare the witness against the healthy node.
let healthy_node_witness = futures::executor::block_on(async move {
DebugApiClient::<()>::debug_execution_witness(
healthy_node_client,
block.number().into(),
)
.await
})?;
let healthy_path = self.save_file(
format!("{}_{}.witness.healthy.json", block.number(), block.hash()),
&healthy_node_witness,
)?;
// If the witnesses are different, write the diff to the output directory.
if response != healthy_node_witness {
let filename = format!("{}_{}.witness.diff", block.number(), block.hash());
let diff_path = self.save_diff(filename, &response, &healthy_node_witness)?;
warn!(
target: "engine::invalid_block_hooks::witness",
diff_path = %diff_path.display(),
re_executed_path = %re_executed_witness_path.display(),
healthy_path = %healthy_path.display(),
"Witness mismatch against healthy node"
);
}
}
// The bundle state after re-execution should match the original one.
//
// Reverts now supports order-independent equality, so we can compare directly without
// sorting the reverts vectors.
//
// See: https://github.com/bluealloy/revm/pull/1827
if bundle_state != output.state {
let original_path = self.save_file(
format!("{}_{}.bundle_state.original.json", block.number(), block.hash()),
&output.state,
)?;
let re_executed_path = self.save_file(
format!("{}_{}.bundle_state.re_executed.json", block.number(), block.hash()),
&bundle_state,
)?;
let filename = format!("{}_{}.bundle_state.diff", block.number(), block.hash());
// Convert bundle state to sorted struct which has BTreeMap instead of HashMap to
// have deterministic ordering
let bundle_state_sorted = BundleStateSorted::from_bundle_state(&bundle_state);
let output_state_sorted = BundleStateSorted::from_bundle_state(&output.state);
let diff_path = self.save_diff(filename, &bundle_state_sorted, &output_state_sorted)?;
warn!(
target: "engine::invalid_block_hooks::witness",
diff_path = %diff_path.display(),
original_path = %original_path.display(),
re_executed_path = %re_executed_path.display(),
"Bundle state mismatch after re-execution"
);
}
// Calculate the state root and trie updates after re-execution. They should match
// the original ones.
let (re_executed_root, trie_output) =
state_provider.state_root_with_updates(hashed_state)?;
if let Some((original_updates, original_root)) = trie_updates {
if re_executed_root != original_root {
let filename = format!("{}_{}.state_root.diff", block.number(), block.hash());
let diff_path = self.save_diff(filename, &re_executed_root, &original_root)?;
warn!(target: "engine::invalid_block_hooks::witness", ?original_root, ?re_executed_root, diff_path = %diff_path.display(), "State root mismatch after re-execution");
}
// If the re-executed state root does not match the _header_ state root, also log that.
if re_executed_root != block.state_root() {
let filename =
format!("{}_{}.header_state_root.diff", block.number(), block.hash());
let diff_path = self.save_diff(filename, &re_executed_root, &block.state_root())?;
warn!(target: "engine::invalid_block_hooks::witness", header_state_root=?block.state_root(), ?re_executed_root, diff_path = %diff_path.display(), "Re-executed state root does not match block state root");
}
if &trie_output != original_updates {
// Trie updates are too big to diff, so we just save the original and re-executed
let trie_output_sorted = &trie_output.into_sorted_ref();
let original_updates_sorted = &original_updates.into_sorted_ref();
let original_path = self.save_file(
format!("{}_{}.trie_updates.original.json", block.number(), block.hash()),
original_updates_sorted,
)?;
let re_executed_path = self.save_file(
format!("{}_{}.trie_updates.re_executed.json", block.number(), block.hash()),
trie_output_sorted,
)?;
warn!(
target: "engine::invalid_block_hooks::witness",
original_path = %original_path.display(),
re_executed_path = %re_executed_path.display(),
"Trie updates mismatch after re-execution"
);
}
}
Ok(())
}
/// Saves the diff of two values into a file with the given name in the output directory.
fn save_diff<T: PartialEq + Debug>(
&self,
filename: String,
original: &T,
new: &T,
) -> eyre::Result<PathBuf> {
let path = self.output_directory.join(filename);
let diff = Comparison::new(original, new);
File::create(&path)?.write_all(diff.to_string().as_bytes())?;
Ok(path)
}
fn save_file<T: Serialize>(&self, filename: String, value: &T) -> eyre::Result<PathBuf> {
let path = self.output_directory.join(filename);
File::create(&path)?.write_all(serde_json::to_string(value)?.as_bytes())?;
Ok(path)
}
}
impl<P, E, N: NodePrimitives> InvalidBlockHook<N> for InvalidBlockWitnessHook<P, E>
where
P: StateProviderFactory + ChainSpecProvider + Send + Sync + 'static,
E: ConfigureEvm<Primitives = N> + 'static,
{
fn on_invalid_block(
&self,
parent_header: &SealedHeader<N::BlockHeader>,
block: &RecoveredBlock<N::Block>,
output: &BlockExecutionOutput<N::Receipt>,
trie_updates: Option<(&TrieUpdates, B256)>,
) {
if let Err(err) = self.on_invalid_block(parent_header, block, output, trie_updates) {
warn!(target: "engine::invalid_block_hooks::witness", %err, "Failed to invoke hook");
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/primitives/src/config.rs | crates/engine/primitives/src/config.rs | //! Engine tree configuration.
/// Triggers persistence when the number of canonical blocks in memory exceeds this threshold.
pub const DEFAULT_PERSISTENCE_THRESHOLD: u64 = 0; // todo(dalton): Maybe feature flag this? We need this so archive nodes can get an accurate
// snapshot. benchmarks dont signal this is of great cost to us
/// How close to the canonical head we persist blocks.
pub const DEFAULT_MEMORY_BLOCK_BUFFER_TARGET: u64 = 0;
/// Default maximum concurrency for proof tasks
pub const DEFAULT_MAX_PROOF_TASK_CONCURRENCY: u64 = 256;
/// Default number of reserved CPU cores for non-reth processes.
///
/// This will be deducted from the thread count of main reth global threadpool.
pub const DEFAULT_RESERVED_CPU_CORES: usize = 1;
const DEFAULT_BLOCK_BUFFER_LIMIT: u32 = 256;
const DEFAULT_MAX_INVALID_HEADER_CACHE_LENGTH: u32 = 256;
const DEFAULT_MAX_EXECUTE_BLOCK_BATCH_SIZE: usize = 4;
const DEFAULT_CROSS_BLOCK_CACHE_SIZE: u64 = 4 * 1024 * 1024 * 1024;
/// Determines if the host has enough parallelism to run the payload processor.
///
/// It requires at least 5 parallel threads:
/// - Engine in main thread that spawns the state root task.
/// - Multiproof task in payload processor
/// - Sparse Trie task in payload processor
/// - Multiproof computation spawned in payload processor
/// - Storage root computation spawned in trie parallel proof
pub fn has_enough_parallelism() -> bool {
#[cfg(feature = "std")]
{
std::thread::available_parallelism().is_ok_and(|num| num.get() >= 5)
}
#[cfg(not(feature = "std"))]
false
}
/// The configuration of the engine tree.
#[derive(Debug, Clone)]
pub struct TreeConfig {
/// Maximum number of blocks to be kept only in memory without triggering
/// persistence.
persistence_threshold: u64,
/// How close to the canonical head we persist blocks. Represents the ideal
/// number of most recent blocks to keep in memory for quick access and reorgs.
///
/// Note: this should be less than or equal to `persistence_threshold`.
memory_block_buffer_target: u64,
/// Number of pending blocks that cannot be executed due to missing parent and
/// are kept in cache.
block_buffer_limit: u32,
/// Number of invalid headers to keep in cache.
max_invalid_header_cache_length: u32,
/// Maximum number of blocks to execute sequentially in a batch.
///
/// This is used as a cutoff to prevent long-running sequential block execution when we receive
/// a batch of downloaded blocks.
max_execute_block_batch_size: usize,
/// Whether to use the legacy state root calculation method instead of the
/// new state root task.
legacy_state_root: bool,
/// Whether to always compare trie updates from the state root task to the trie updates from
/// the regular state root calculation.
always_compare_trie_updates: bool,
/// Whether to disable cross-block caching and parallel prewarming.
disable_caching_and_prewarming: bool,
/// Whether to disable the parallel sparse trie state root algorithm.
disable_parallel_sparse_trie: bool,
/// Whether to enable state provider metrics.
state_provider_metrics: bool,
/// Cross-block cache size in bytes.
cross_block_cache_size: u64,
/// Whether the host has enough parallelism to run state root task.
has_enough_parallelism: bool,
/// Maximum number of concurrent proof tasks
max_proof_task_concurrency: u64,
/// Number of reserved CPU cores for non-reth processes
reserved_cpu_cores: usize,
/// Whether to disable the precompile cache
precompile_cache_disabled: bool,
/// Whether to use state root fallback for testing
state_root_fallback: bool,
/// Whether to always process payload attributes and begin a payload build process
/// even if `forkchoiceState.headBlockHash` is already the canonical head or an ancestor.
///
/// The Engine API specification generally states that client software "MUST NOT begin a
/// payload build process if `forkchoiceState.headBlockHash` references a `VALID`
/// ancestor of the head of canonical chain".
/// See: <https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#engine_forkchoiceupdatedv1> (Rule 2)
///
/// This flag allows overriding that behavior.
/// This is useful for specific chain configurations (e.g., OP Stack where proposers
/// can reorg their own chain), various custom chains, or for development/testing purposes
/// where immediate payload regeneration is desired despite the head not changing or moving to
/// an ancestor.
always_process_payload_attributes_on_canonical_head: bool,
}
impl Default for TreeConfig {
fn default() -> Self {
Self {
persistence_threshold: DEFAULT_PERSISTENCE_THRESHOLD,
memory_block_buffer_target: DEFAULT_MEMORY_BLOCK_BUFFER_TARGET,
block_buffer_limit: DEFAULT_BLOCK_BUFFER_LIMIT,
max_invalid_header_cache_length: DEFAULT_MAX_INVALID_HEADER_CACHE_LENGTH,
max_execute_block_batch_size: DEFAULT_MAX_EXECUTE_BLOCK_BATCH_SIZE,
legacy_state_root: false,
always_compare_trie_updates: false,
disable_caching_and_prewarming: false,
disable_parallel_sparse_trie: false,
state_provider_metrics: false,
cross_block_cache_size: DEFAULT_CROSS_BLOCK_CACHE_SIZE,
has_enough_parallelism: has_enough_parallelism(),
max_proof_task_concurrency: DEFAULT_MAX_PROOF_TASK_CONCURRENCY,
reserved_cpu_cores: DEFAULT_RESERVED_CPU_CORES,
precompile_cache_disabled: false,
state_root_fallback: false,
always_process_payload_attributes_on_canonical_head: false,
}
}
}
impl TreeConfig {
/// Create engine tree configuration.
#[expect(clippy::too_many_arguments)]
pub const fn new(
persistence_threshold: u64,
memory_block_buffer_target: u64,
block_buffer_limit: u32,
max_invalid_header_cache_length: u32,
max_execute_block_batch_size: usize,
legacy_state_root: bool,
always_compare_trie_updates: bool,
disable_caching_and_prewarming: bool,
disable_parallel_sparse_trie: bool,
state_provider_metrics: bool,
cross_block_cache_size: u64,
has_enough_parallelism: bool,
max_proof_task_concurrency: u64,
reserved_cpu_cores: usize,
precompile_cache_disabled: bool,
state_root_fallback: bool,
always_process_payload_attributes_on_canonical_head: bool,
) -> Self {
Self {
persistence_threshold,
memory_block_buffer_target,
block_buffer_limit,
max_invalid_header_cache_length,
max_execute_block_batch_size,
legacy_state_root,
always_compare_trie_updates,
disable_caching_and_prewarming,
disable_parallel_sparse_trie,
state_provider_metrics,
cross_block_cache_size,
has_enough_parallelism,
max_proof_task_concurrency,
reserved_cpu_cores,
precompile_cache_disabled,
state_root_fallback,
always_process_payload_attributes_on_canonical_head,
}
}
/// Return the persistence threshold.
pub const fn persistence_threshold(&self) -> u64 {
self.persistence_threshold
}
/// Return the memory block buffer target.
pub const fn memory_block_buffer_target(&self) -> u64 {
self.memory_block_buffer_target
}
/// Return the block buffer limit.
pub const fn block_buffer_limit(&self) -> u32 {
self.block_buffer_limit
}
/// Return the maximum invalid cache header length.
pub const fn max_invalid_header_cache_length(&self) -> u32 {
self.max_invalid_header_cache_length
}
/// Return the maximum execute block batch size.
pub const fn max_execute_block_batch_size(&self) -> usize {
self.max_execute_block_batch_size
}
/// Return the maximum proof task concurrency.
pub const fn max_proof_task_concurrency(&self) -> u64 {
self.max_proof_task_concurrency
}
/// Return the number of reserved CPU cores for non-reth processes
pub const fn reserved_cpu_cores(&self) -> usize {
self.reserved_cpu_cores
}
/// Returns whether to use the legacy state root calculation method instead
/// of the new state root task
pub const fn legacy_state_root(&self) -> bool {
self.legacy_state_root
}
/// Returns whether or not state provider metrics are enabled.
pub const fn state_provider_metrics(&self) -> bool {
self.state_provider_metrics
}
/// Returns whether or not the parallel sparse trie is disabled.
pub const fn disable_parallel_sparse_trie(&self) -> bool {
self.disable_parallel_sparse_trie
}
/// Returns whether or not cross-block caching and parallel prewarming should be used.
pub const fn disable_caching_and_prewarming(&self) -> bool {
self.disable_caching_and_prewarming
}
/// Returns whether to always compare trie updates from the state root task to the trie updates
/// from the regular state root calculation.
pub const fn always_compare_trie_updates(&self) -> bool {
self.always_compare_trie_updates
}
/// Returns the cross-block cache size.
pub const fn cross_block_cache_size(&self) -> u64 {
self.cross_block_cache_size
}
/// Returns whether precompile cache is disabled.
pub const fn precompile_cache_disabled(&self) -> bool {
self.precompile_cache_disabled
}
/// Returns whether to use state root fallback.
pub const fn state_root_fallback(&self) -> bool {
self.state_root_fallback
}
/// Sets whether to always process payload attributes when the FCU head is already canonical.
pub const fn with_always_process_payload_attributes_on_canonical_head(
mut self,
always_process_payload_attributes_on_canonical_head: bool,
) -> Self {
self.always_process_payload_attributes_on_canonical_head =
always_process_payload_attributes_on_canonical_head;
self
}
/// Returns true if payload attributes should always be processed even when the FCU head is
/// canonical.
pub const fn always_process_payload_attributes_on_canonical_head(&self) -> bool {
self.always_process_payload_attributes_on_canonical_head
}
/// Setter for persistence threshold.
pub const fn with_persistence_threshold(mut self, persistence_threshold: u64) -> Self {
self.persistence_threshold = persistence_threshold;
self
}
/// Setter for memory block buffer target.
pub const fn with_memory_block_buffer_target(
mut self,
memory_block_buffer_target: u64,
) -> Self {
self.memory_block_buffer_target = memory_block_buffer_target;
self
}
/// Setter for block buffer limit.
pub const fn with_block_buffer_limit(mut self, block_buffer_limit: u32) -> Self {
self.block_buffer_limit = block_buffer_limit;
self
}
/// Setter for maximum invalid header cache length.
pub const fn with_max_invalid_header_cache_length(
mut self,
max_invalid_header_cache_length: u32,
) -> Self {
self.max_invalid_header_cache_length = max_invalid_header_cache_length;
self
}
/// Setter for maximum execute block batch size.
pub const fn with_max_execute_block_batch_size(
mut self,
max_execute_block_batch_size: usize,
) -> Self {
self.max_execute_block_batch_size = max_execute_block_batch_size;
self
}
/// Setter for whether to use the legacy state root calculation method.
pub const fn with_legacy_state_root(mut self, legacy_state_root: bool) -> Self {
self.legacy_state_root = legacy_state_root;
self
}
/// Setter for whether to disable cross-block caching and parallel prewarming.
pub const fn without_caching_and_prewarming(
mut self,
disable_caching_and_prewarming: bool,
) -> Self {
self.disable_caching_and_prewarming = disable_caching_and_prewarming;
self
}
/// Setter for whether to always compare trie updates from the state root task to the trie
/// updates from the regular state root calculation.
pub const fn with_always_compare_trie_updates(
mut self,
always_compare_trie_updates: bool,
) -> Self {
self.always_compare_trie_updates = always_compare_trie_updates;
self
}
/// Setter for cross block cache size.
pub const fn with_cross_block_cache_size(mut self, cross_block_cache_size: u64) -> Self {
self.cross_block_cache_size = cross_block_cache_size;
self
}
/// Setter for has enough parallelism.
pub const fn with_has_enough_parallelism(mut self, has_enough_parallelism: bool) -> Self {
self.has_enough_parallelism = has_enough_parallelism;
self
}
/// Setter for state provider metrics.
pub const fn with_state_provider_metrics(mut self, state_provider_metrics: bool) -> Self {
self.state_provider_metrics = state_provider_metrics;
self
}
/// Setter for using the parallel sparse trie
pub const fn with_disable_parallel_sparse_trie(
mut self,
disable_parallel_sparse_trie: bool,
) -> Self {
self.disable_parallel_sparse_trie = disable_parallel_sparse_trie;
self
}
/// Setter for maximum number of concurrent proof tasks.
pub const fn with_max_proof_task_concurrency(
mut self,
max_proof_task_concurrency: u64,
) -> Self {
self.max_proof_task_concurrency = max_proof_task_concurrency;
self
}
/// Setter for the number of reserved CPU cores for any non-reth processes
pub const fn with_reserved_cpu_cores(mut self, reserved_cpu_cores: usize) -> Self {
self.reserved_cpu_cores = reserved_cpu_cores;
self
}
/// Setter for whether to disable the precompile cache.
pub const fn without_precompile_cache(mut self, precompile_cache_disabled: bool) -> Self {
self.precompile_cache_disabled = precompile_cache_disabled;
self
}
/// Setter for whether to use state root fallback, useful for testing.
pub const fn with_state_root_fallback(mut self, state_root_fallback: bool) -> Self {
self.state_root_fallback = state_root_fallback;
self
}
/// Whether or not to use state root task
pub const fn use_state_root_task(&self) -> bool {
self.has_enough_parallelism && !self.legacy_state_root
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/primitives/src/event.rs | crates/engine/primitives/src/event.rs | //! Events emitted by the beacon consensus engine.
use crate::ForkchoiceStatus;
use alloc::boxed::Box;
use alloy_consensus::BlockHeader;
use alloy_eips::BlockNumHash;
use alloy_primitives::B256;
use alloy_rpc_types_engine::ForkchoiceState;
use core::{
fmt::{Display, Formatter, Result},
time::Duration,
};
use reth_chain_state::ExecutedBlockWithTrieUpdates;
use reth_ethereum_primitives::EthPrimitives;
use reth_primitives_traits::{NodePrimitives, SealedBlock, SealedHeader};
/// Type alias for backwards compat
#[deprecated(note = "Use ConsensusEngineEvent instead")]
pub type BeaconConsensusEngineEvent<N> = ConsensusEngineEvent<N>;
/// Events emitted by the consensus engine.
#[derive(Clone, Debug)]
pub enum ConsensusEngineEvent<N: NodePrimitives = EthPrimitives> {
/// The fork choice state was updated, and the current fork choice status
ForkchoiceUpdated(ForkchoiceState, ForkchoiceStatus),
/// A block was added to the fork chain.
ForkBlockAdded(ExecutedBlockWithTrieUpdates<N>, Duration),
/// A new block was received from the consensus engine
BlockReceived(BlockNumHash),
/// A block was added to the canonical chain, and the elapsed time validating the block
CanonicalBlockAdded(ExecutedBlockWithTrieUpdates<N>, Duration),
/// A canonical chain was committed, and the elapsed time committing the data
CanonicalChainCommitted(Box<SealedHeader<N::BlockHeader>>, Duration),
/// The consensus engine processed an invalid block.
InvalidBlock(Box<SealedBlock<N::Block>>),
/// The consensus engine is involved in live sync, and has specific progress
LiveSyncProgress(ConsensusEngineLiveSyncProgress),
}
impl<N: NodePrimitives> ConsensusEngineEvent<N> {
/// Returns the canonical header if the event is a
/// [`ConsensusEngineEvent::CanonicalChainCommitted`].
pub const fn canonical_header(&self) -> Option<&SealedHeader<N::BlockHeader>> {
match self {
Self::CanonicalChainCommitted(header, _) => Some(header),
_ => None,
}
}
}
impl<N> Display for ConsensusEngineEvent<N>
where
N: NodePrimitives<BlockHeader: BlockHeader>,
{
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
match self {
Self::ForkchoiceUpdated(state, status) => {
write!(f, "ForkchoiceUpdated({state:?}, {status:?})")
}
Self::ForkBlockAdded(block, duration) => {
write!(f, "ForkBlockAdded({:?}, {duration:?})", block.recovered_block.num_hash())
}
Self::CanonicalBlockAdded(block, duration) => {
write!(
f,
"CanonicalBlockAdded({:?}, {duration:?})",
block.recovered_block.num_hash()
)
}
Self::CanonicalChainCommitted(block, duration) => {
write!(f, "CanonicalChainCommitted({:?}, {duration:?})", block.num_hash())
}
Self::InvalidBlock(block) => {
write!(f, "InvalidBlock({:?})", block.num_hash())
}
Self::LiveSyncProgress(progress) => {
write!(f, "LiveSyncProgress({progress:?})")
}
Self::BlockReceived(num_hash) => {
write!(f, "BlockReceived({num_hash:?})")
}
}
}
}
/// Progress of the consensus engine during live sync.
#[derive(Clone, Debug)]
pub enum ConsensusEngineLiveSyncProgress {
/// The consensus engine is downloading blocks from the network.
DownloadingBlocks {
/// The number of blocks remaining to download.
remaining_blocks: u64,
/// The target block hash and number to download.
target: B256,
},
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/primitives/src/forkchoice.rs | crates/engine/primitives/src/forkchoice.rs | use alloy_primitives::B256;
use alloy_rpc_types_engine::{ForkchoiceState, PayloadStatusEnum};
/// The struct that keeps track of the received forkchoice state and their status.
#[derive(Debug, Clone, Default)]
pub struct ForkchoiceStateTracker {
/// The latest forkchoice state that we received.
///
/// Caution: this can be invalid.
latest: Option<ReceivedForkchoiceState>,
/// Tracks the latest forkchoice state that we received to which we need to sync.
last_syncing: Option<ForkchoiceState>,
/// The latest valid forkchoice state that we received and processed as valid.
last_valid: Option<ForkchoiceState>,
}
impl ForkchoiceStateTracker {
/// Sets the latest forkchoice state that we received.
///
/// If the status is `VALID`, we also update the last valid forkchoice state and set the
/// `sync_target` to `None`, since we're now fully synced.
pub const fn set_latest(&mut self, state: ForkchoiceState, status: ForkchoiceStatus) {
if status.is_valid() {
self.set_valid(state);
} else if status.is_syncing() {
self.last_syncing = Some(state);
}
let received = ReceivedForkchoiceState { state, status };
self.latest = Some(received);
}
const fn set_valid(&mut self, state: ForkchoiceState) {
// we no longer need to sync to this state.
self.last_syncing = None;
self.last_valid = Some(state);
}
/// Returns the [`ForkchoiceStatus`] of the latest received FCU.
///
/// Caution: this can be invalid.
pub(crate) fn latest_status(&self) -> Option<ForkchoiceStatus> {
self.latest.as_ref().map(|s| s.status)
}
/// Returns whether the latest received FCU is valid: [`ForkchoiceStatus::Valid`]
#[expect(dead_code)]
pub(crate) fn is_latest_valid(&self) -> bool {
self.latest_status().is_some_and(|s| s.is_valid())
}
/// Returns whether the latest received FCU is syncing: [`ForkchoiceStatus::Syncing`]
#[expect(dead_code)]
pub(crate) fn is_latest_syncing(&self) -> bool {
self.latest_status().is_some_and(|s| s.is_syncing())
}
/// Returns whether the latest received FCU is invalid: [`ForkchoiceStatus::Invalid`]
pub fn is_latest_invalid(&self) -> bool {
self.latest_status().is_some_and(|s| s.is_invalid())
}
/// Returns the last valid head hash.
pub fn last_valid_head(&self) -> Option<B256> {
self.last_valid.as_ref().map(|s| s.head_block_hash)
}
/// Returns the head hash of the latest received FCU to which we need to sync.
#[cfg_attr(not(test), expect(dead_code))]
pub(crate) fn sync_target(&self) -> Option<B256> {
self.last_syncing.as_ref().map(|s| s.head_block_hash)
}
/// Returns the latest received [`ForkchoiceState`].
///
/// Caution: this can be invalid.
pub fn latest_state(&self) -> Option<ForkchoiceState> {
self.latest.as_ref().map(|s| s.state)
}
/// Returns the last valid [`ForkchoiceState`].
pub const fn last_valid_state(&self) -> Option<ForkchoiceState> {
self.last_valid
}
/// Returns the last valid finalized hash.
///
/// This will return [`None`]:
/// - If either there is no valid finalized forkchoice state,
/// - Or the finalized hash for the latest valid forkchoice state is zero.
#[inline]
pub fn last_valid_finalized(&self) -> Option<B256> {
self.last_valid
.filter(|state| !state.finalized_block_hash.is_zero())
.map(|state| state.finalized_block_hash)
}
/// Returns the last received `ForkchoiceState` to which we need to sync.
pub const fn sync_target_state(&self) -> Option<ForkchoiceState> {
self.last_syncing
}
/// Returns the sync target finalized hash.
///
/// This will return [`None`]:
/// - If either there is no sync target forkchoice state,
/// - Or the finalized hash for the sync target forkchoice state is zero.
#[inline]
pub fn sync_target_finalized(&self) -> Option<B256> {
self.last_syncing
.filter(|state| !state.finalized_block_hash.is_zero())
.map(|state| state.finalized_block_hash)
}
/// Returns true if no forkchoice state has been received yet.
pub const fn is_empty(&self) -> bool {
self.latest.is_none()
}
}
/// Represents a forkchoice update and tracks the status we assigned to it.
#[derive(Debug, Clone)]
pub(crate) struct ReceivedForkchoiceState {
state: ForkchoiceState,
status: ForkchoiceStatus,
}
/// A simplified representation of [`PayloadStatusEnum`] specifically for FCU.
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum ForkchoiceStatus {
/// The forkchoice state is valid.
Valid,
/// The forkchoice state is invalid.
Invalid,
/// The forkchoice state is unknown.
Syncing,
}
impl ForkchoiceStatus {
/// Returns `true` if the forkchoice state is [`ForkchoiceStatus::Valid`].
pub const fn is_valid(&self) -> bool {
matches!(self, Self::Valid)
}
/// Returns `true` if the forkchoice state is [`ForkchoiceStatus::Invalid`].
pub const fn is_invalid(&self) -> bool {
matches!(self, Self::Invalid)
}
/// Returns `true` if the forkchoice state is [`ForkchoiceStatus::Syncing`].
pub const fn is_syncing(&self) -> bool {
matches!(self, Self::Syncing)
}
/// Converts the general purpose [`PayloadStatusEnum`] into a [`ForkchoiceStatus`].
pub(crate) const fn from_payload_status(status: &PayloadStatusEnum) -> Self {
match status {
PayloadStatusEnum::Valid | PayloadStatusEnum::Accepted => {
// `Accepted` is only returned on `newPayload`. It would be a valid state here.
Self::Valid
}
PayloadStatusEnum::Invalid { .. } => Self::Invalid,
PayloadStatusEnum::Syncing => Self::Syncing,
}
}
}
impl From<PayloadStatusEnum> for ForkchoiceStatus {
fn from(status: PayloadStatusEnum) -> Self {
Self::from_payload_status(&status)
}
}
/// A helper type to check represent hashes of a [`ForkchoiceState`]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ForkchoiceStateHash {
/// Head hash of the [`ForkchoiceState`].
Head(B256),
/// Safe hash of the [`ForkchoiceState`].
Safe(B256),
/// Finalized hash of the [`ForkchoiceState`].
Finalized(B256),
}
impl ForkchoiceStateHash {
/// Tries to find a matching hash in the given [`ForkchoiceState`].
pub fn find(state: &ForkchoiceState, hash: B256) -> Option<Self> {
if state.head_block_hash == hash {
Some(Self::Head(hash))
} else if state.safe_block_hash == hash {
Some(Self::Safe(hash))
} else if state.finalized_block_hash == hash {
Some(Self::Finalized(hash))
} else {
None
}
}
/// Returns true if this is the head hash of the [`ForkchoiceState`]
pub const fn is_head(&self) -> bool {
matches!(self, Self::Head(_))
}
}
impl AsRef<B256> for ForkchoiceStateHash {
fn as_ref(&self) -> &B256 {
match self {
Self::Head(h) | Self::Safe(h) | Self::Finalized(h) => h,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_forkchoice_state_tracker_set_latest_valid() {
let mut tracker = ForkchoiceStateTracker::default();
// Latest state is None
assert!(tracker.latest_status().is_none());
// Create a valid ForkchoiceState
let state = ForkchoiceState {
head_block_hash: B256::from_slice(&[1; 32]),
safe_block_hash: B256::from_slice(&[2; 32]),
finalized_block_hash: B256::from_slice(&[3; 32]),
};
let status = ForkchoiceStatus::Valid;
tracker.set_latest(state, status);
// Assert that the latest state is set
assert!(tracker.latest.is_some());
assert_eq!(tracker.latest.as_ref().unwrap().state, state);
// Assert that last valid state is updated
assert!(tracker.last_valid.is_some());
assert_eq!(tracker.last_valid.as_ref().unwrap(), &state);
// Assert that last syncing state is None
assert!(tracker.last_syncing.is_none());
// Test when there is a latest status and it is valid
assert_eq!(tracker.latest_status(), Some(ForkchoiceStatus::Valid));
}
#[test]
fn test_forkchoice_state_tracker_set_latest_syncing() {
let mut tracker = ForkchoiceStateTracker::default();
// Create a syncing ForkchoiceState
let state = ForkchoiceState {
head_block_hash: B256::from_slice(&[1; 32]),
safe_block_hash: B256::from_slice(&[2; 32]),
finalized_block_hash: B256::from_slice(&[0; 32]), // Zero to simulate not finalized
};
let status = ForkchoiceStatus::Syncing;
tracker.set_latest(state, status);
// Assert that the latest state is set
assert!(tracker.latest.is_some());
assert_eq!(tracker.latest.as_ref().unwrap().state, state);
// Assert that last valid state is None since the status is syncing
assert!(tracker.last_valid.is_none());
// Assert that last syncing state is updated
assert!(tracker.last_syncing.is_some());
assert_eq!(tracker.last_syncing.as_ref().unwrap(), &state);
// Test when there is a latest status and it is syncing
assert_eq!(tracker.latest_status(), Some(ForkchoiceStatus::Syncing));
}
#[test]
fn test_forkchoice_state_tracker_set_latest_invalid() {
let mut tracker = ForkchoiceStateTracker::default();
// Create an invalid ForkchoiceState
let state = ForkchoiceState {
head_block_hash: B256::from_slice(&[1; 32]),
safe_block_hash: B256::from_slice(&[2; 32]),
finalized_block_hash: B256::from_slice(&[3; 32]),
};
let status = ForkchoiceStatus::Invalid;
tracker.set_latest(state, status);
// Assert that the latest state is set
assert!(tracker.latest.is_some());
assert_eq!(tracker.latest.as_ref().unwrap().state, state);
// Assert that last valid state is None since the status is invalid
assert!(tracker.last_valid.is_none());
// Assert that last syncing state is None since the status is invalid
assert!(tracker.last_syncing.is_none());
// Test when there is a latest status and it is invalid
assert_eq!(tracker.latest_status(), Some(ForkchoiceStatus::Invalid));
}
#[test]
fn test_forkchoice_state_tracker_sync_target() {
let mut tracker = ForkchoiceStateTracker::default();
// Test when there is no last syncing state (should return None)
assert!(tracker.sync_target().is_none());
// Set a last syncing forkchoice state
let state = ForkchoiceState {
head_block_hash: B256::from_slice(&[1; 32]),
safe_block_hash: B256::from_slice(&[2; 32]),
finalized_block_hash: B256::from_slice(&[3; 32]),
};
tracker.last_syncing = Some(state);
// Test when the last syncing state is set (should return the head block hash)
assert_eq!(tracker.sync_target(), Some(B256::from_slice(&[1; 32])));
}
#[test]
fn test_forkchoice_state_tracker_last_valid_finalized() {
let mut tracker = ForkchoiceStateTracker::default();
// No valid finalized state (should return None)
assert!(tracker.last_valid_finalized().is_none());
// Valid finalized state, but finalized hash is zero (should return None)
let zero_finalized_state = ForkchoiceState {
head_block_hash: B256::ZERO,
safe_block_hash: B256::ZERO,
finalized_block_hash: B256::ZERO, // Zero finalized hash
};
tracker.last_valid = Some(zero_finalized_state);
assert!(tracker.last_valid_finalized().is_none());
// Valid finalized state with non-zero finalized hash (should return finalized hash)
let valid_finalized_state = ForkchoiceState {
head_block_hash: B256::from_slice(&[1; 32]),
safe_block_hash: B256::from_slice(&[2; 32]),
finalized_block_hash: B256::from_slice(&[123; 32]), // Non-zero finalized hash
};
tracker.last_valid = Some(valid_finalized_state);
assert_eq!(tracker.last_valid_finalized(), Some(B256::from_slice(&[123; 32])));
// Reset the last valid state to None
tracker.last_valid = None;
assert!(tracker.last_valid_finalized().is_none());
}
#[test]
fn test_forkchoice_state_tracker_sync_target_finalized() {
let mut tracker = ForkchoiceStateTracker::default();
// No sync target state (should return None)
assert!(tracker.sync_target_finalized().is_none());
// Sync target state with finalized hash as zero (should return None)
let zero_finalized_sync_target = ForkchoiceState {
head_block_hash: B256::from_slice(&[1; 32]),
safe_block_hash: B256::from_slice(&[2; 32]),
finalized_block_hash: B256::ZERO, // Zero finalized hash
};
tracker.last_syncing = Some(zero_finalized_sync_target);
assert!(tracker.sync_target_finalized().is_none());
// Sync target state with non-zero finalized hash (should return the hash)
let valid_sync_target = ForkchoiceState {
head_block_hash: B256::from_slice(&[1; 32]),
safe_block_hash: B256::from_slice(&[2; 32]),
finalized_block_hash: B256::from_slice(&[22; 32]), // Non-zero finalized hash
};
tracker.last_syncing = Some(valid_sync_target);
assert_eq!(tracker.sync_target_finalized(), Some(B256::from_slice(&[22; 32])));
// Reset the last sync target state to None
tracker.last_syncing = None;
assert!(tracker.sync_target_finalized().is_none());
}
#[test]
fn test_forkchoice_state_tracker_is_empty() {
let mut forkchoice = ForkchoiceStateTracker::default();
// Initially, no forkchoice state has been received, so it should be empty.
assert!(forkchoice.is_empty());
// After setting a forkchoice state, it should no longer be empty.
forkchoice.set_latest(ForkchoiceState::default(), ForkchoiceStatus::Valid);
assert!(!forkchoice.is_empty());
// Reset the forkchoice latest, it should be empty again.
forkchoice.latest = None;
assert!(forkchoice.is_empty());
}
#[test]
fn test_forkchoice_state_hash_find() {
// Define example hashes
let head_hash = B256::random();
let safe_hash = B256::random();
let finalized_hash = B256::random();
let non_matching_hash = B256::random();
// Create a ForkchoiceState with specific hashes
let state = ForkchoiceState {
head_block_hash: head_hash,
safe_block_hash: safe_hash,
finalized_block_hash: finalized_hash,
};
// Test finding the head hash
assert_eq!(
ForkchoiceStateHash::find(&state, head_hash),
Some(ForkchoiceStateHash::Head(head_hash))
);
// Test finding the safe hash
assert_eq!(
ForkchoiceStateHash::find(&state, safe_hash),
Some(ForkchoiceStateHash::Safe(safe_hash))
);
// Test finding the finalized hash
assert_eq!(
ForkchoiceStateHash::find(&state, finalized_hash),
Some(ForkchoiceStateHash::Finalized(finalized_hash))
);
// Test with a hash that doesn't match any of the hashes in ForkchoiceState
assert_eq!(ForkchoiceStateHash::find(&state, non_matching_hash), None);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/primitives/src/lib.rs | crates/engine/primitives/src/lib.rs | //! Traits, validation methods, and helper types used to abstract over engine types.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(feature = "std"), no_std)]
extern crate alloc;
use alloy_consensus::BlockHeader;
use reth_errors::ConsensusError;
use reth_payload_primitives::{
EngineApiMessageVersion, EngineObjectValidationError, InvalidPayloadAttributesError,
NewPayloadError, PayloadAttributes, PayloadOrAttributes, PayloadTypes,
};
use reth_primitives_traits::{Block, RecoveredBlock};
use reth_trie_common::HashedPostState;
use serde::{de::DeserializeOwned, Serialize};
// Re-export [`ExecutionPayload`] moved to `reth_payload_primitives`
pub use reth_evm::{ConfigureEngineEvm, ExecutableTxIterator};
pub use reth_payload_primitives::ExecutionPayload;
mod error;
pub use error::*;
mod forkchoice;
pub use forkchoice::{ForkchoiceStateHash, ForkchoiceStateTracker, ForkchoiceStatus};
#[cfg(feature = "std")]
mod message;
#[cfg(feature = "std")]
pub use message::*;
mod event;
pub use event::*;
mod invalid_block_hook;
pub use invalid_block_hook::{InvalidBlockHook, InvalidBlockHooks, NoopInvalidBlockHook};
pub mod config;
pub use config::*;
/// This type defines the versioned types of the engine API based on the [ethereum engine API](https://github.com/ethereum/execution-apis/tree/main/src/engine).
///
/// This includes the execution payload types and payload attributes that are used to trigger a
/// payload job. Hence this trait is also [`PayloadTypes`].
///
/// Implementations of this type are intended to be stateless and just define the types as
/// associated types.
/// This type is intended for non-ethereum chains that closely mirror the ethereum engine API spec,
/// but may have different payload, for example opstack, but structurally equivalent otherwise (same
/// engine API RPC endpoints for example).
pub trait EngineTypes:
PayloadTypes<
BuiltPayload: TryInto<Self::ExecutionPayloadEnvelopeV1>
+ TryInto<Self::ExecutionPayloadEnvelopeV2>
+ TryInto<Self::ExecutionPayloadEnvelopeV3>
+ TryInto<Self::ExecutionPayloadEnvelopeV4>
+ TryInto<Self::ExecutionPayloadEnvelopeV5>,
> + DeserializeOwned
+ Serialize
{
/// Execution Payload V1 envelope type.
type ExecutionPayloadEnvelopeV1: DeserializeOwned
+ Serialize
+ Clone
+ Unpin
+ Send
+ Sync
+ 'static;
/// Execution Payload V2 envelope type.
type ExecutionPayloadEnvelopeV2: DeserializeOwned
+ Serialize
+ Clone
+ Unpin
+ Send
+ Sync
+ 'static;
/// Execution Payload V3 envelope type.
type ExecutionPayloadEnvelopeV3: DeserializeOwned
+ Serialize
+ Clone
+ Unpin
+ Send
+ Sync
+ 'static;
/// Execution Payload V4 envelope type.
type ExecutionPayloadEnvelopeV4: DeserializeOwned
+ Serialize
+ Clone
+ Unpin
+ Send
+ Sync
+ 'static;
/// Execution Payload V5 envelope type.
type ExecutionPayloadEnvelopeV5: DeserializeOwned
+ Serialize
+ Clone
+ Unpin
+ Send
+ Sync
+ 'static;
}
/// Type that validates the payloads processed by the engine API.
pub trait EngineApiValidator<Types: PayloadTypes>: Send + Sync + Unpin + 'static {
/// Validates the presence or exclusion of fork-specific fields based on the payload attributes
/// and the message version.
fn validate_version_specific_fields(
&self,
version: EngineApiMessageVersion,
payload_or_attrs: PayloadOrAttributes<'_, Types::ExecutionData, Types::PayloadAttributes>,
) -> Result<(), EngineObjectValidationError>;
/// Ensures that the payload attributes are valid for the given [`EngineApiMessageVersion`].
fn ensure_well_formed_attributes(
&self,
version: EngineApiMessageVersion,
attributes: &Types::PayloadAttributes,
) -> Result<(), EngineObjectValidationError>;
}
/// Type that validates an [`ExecutionPayload`].
#[auto_impl::auto_impl(&, Arc)]
pub trait PayloadValidator<Types: PayloadTypes>: Send + Sync + Unpin + 'static {
/// The block type used by the engine.
type Block: Block;
/// Ensures that the given payload does not violate any consensus rules that concern the block's
/// layout.
///
/// This function must convert the payload into the executable block and pre-validate its
/// fields.
///
/// Implementers should ensure that the checks are done in the order that conforms with the
/// engine-API specification.
fn ensure_well_formed_payload(
&self,
payload: Types::ExecutionData,
) -> Result<RecoveredBlock<Self::Block>, NewPayloadError>;
/// Verifies payload post-execution w.r.t. hashed state updates.
fn validate_block_post_execution_with_hashed_state(
&self,
_state_updates: &HashedPostState,
_block: &RecoveredBlock<Self::Block>,
) -> Result<(), ConsensusError> {
// method not used by l1
Ok(())
}
/// Validates the payload attributes with respect to the header.
///
/// By default, this enforces that the payload attributes timestamp is greater than the
/// timestamp according to:
/// > 7. Client software MUST ensure that payloadAttributes.timestamp is greater than
/// > timestamp
/// > of a block referenced by forkchoiceState.headBlockHash.
///
/// See also: <https://github.com/ethereum/execution-apis/blob/main/src/engine/common.md#specification-1>
fn validate_payload_attributes_against_header(
&self,
attr: &Types::PayloadAttributes,
header: &<Self::Block as Block>::Header,
) -> Result<(), InvalidPayloadAttributesError> {
if attr.timestamp() <= header.timestamp() {
return Err(InvalidPayloadAttributesError::InvalidTimestamp);
}
Ok(())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/primitives/src/invalid_block_hook.rs | crates/engine/primitives/src/invalid_block_hook.rs | use alloc::{boxed::Box, fmt, vec::Vec};
use alloy_primitives::B256;
use reth_execution_types::BlockExecutionOutput;
use reth_primitives_traits::{NodePrimitives, RecoveredBlock, SealedHeader};
use reth_trie_common::updates::TrieUpdates;
/// An invalid block hook.
pub trait InvalidBlockHook<N: NodePrimitives>: Send + Sync {
/// Invoked when an invalid block is encountered.
fn on_invalid_block(
&self,
parent_header: &SealedHeader<N::BlockHeader>,
block: &RecoveredBlock<N::Block>,
output: &BlockExecutionOutput<N::Receipt>,
trie_updates: Option<(&TrieUpdates, B256)>,
);
}
impl<F, N> InvalidBlockHook<N> for F
where
N: NodePrimitives,
F: Fn(
&SealedHeader<N::BlockHeader>,
&RecoveredBlock<N::Block>,
&BlockExecutionOutput<N::Receipt>,
Option<(&TrieUpdates, B256)>,
) + Send
+ Sync,
{
fn on_invalid_block(
&self,
parent_header: &SealedHeader<N::BlockHeader>,
block: &RecoveredBlock<N::Block>,
output: &BlockExecutionOutput<N::Receipt>,
trie_updates: Option<(&TrieUpdates, B256)>,
) {
self(parent_header, block, output, trie_updates)
}
}
/// A no-op [`InvalidBlockHook`] that does nothing.
#[derive(Debug, Default)]
#[non_exhaustive]
pub struct NoopInvalidBlockHook;
impl<N: NodePrimitives> InvalidBlockHook<N> for NoopInvalidBlockHook {
fn on_invalid_block(
&self,
_parent_header: &SealedHeader<N::BlockHeader>,
_block: &RecoveredBlock<N::Block>,
_output: &BlockExecutionOutput<N::Receipt>,
_trie_updates: Option<(&TrieUpdates, B256)>,
) {
}
}
/// Multiple [`InvalidBlockHook`]s that are executed in order.
pub struct InvalidBlockHooks<N: NodePrimitives>(pub Vec<Box<dyn InvalidBlockHook<N>>>);
impl<N: NodePrimitives> fmt::Debug for InvalidBlockHooks<N> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("InvalidBlockHooks").field("len", &self.0.len()).finish()
}
}
impl<N: NodePrimitives> InvalidBlockHook<N> for InvalidBlockHooks<N> {
fn on_invalid_block(
&self,
parent_header: &SealedHeader<N::BlockHeader>,
block: &RecoveredBlock<N::Block>,
output: &BlockExecutionOutput<N::Receipt>,
trie_updates: Option<(&TrieUpdates, B256)>,
) {
for hook in &self.0 {
hook.on_invalid_block(parent_header, block, output, trie_updates);
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/primitives/src/error.rs | crates/engine/primitives/src/error.rs | use alloc::boxed::Box;
use alloy_rpc_types_engine::ForkchoiceUpdateError;
/// Represents all error cases when handling a new payload.
///
/// This represents all possible error cases that must be returned as JSON RPC errors back to the
/// beacon node.
#[derive(Debug, thiserror::Error)]
pub enum BeaconOnNewPayloadError {
/// Thrown when the engine task is unavailable/stopped.
#[error("beacon consensus engine task stopped")]
EngineUnavailable,
/// An internal error occurred, not necessarily related to the payload.
#[error(transparent)]
Internal(Box<dyn core::error::Error + Send + Sync>),
}
impl BeaconOnNewPayloadError {
/// Create a new internal error.
pub fn internal<E: core::error::Error + Send + Sync + 'static>(e: E) -> Self {
Self::Internal(Box::new(e))
}
}
/// Represents error cases for an applied forkchoice update.
///
/// This represents all possible error cases, that must be returned as JSON RPC errors back to the
/// beacon node.
#[derive(Debug, thiserror::Error)]
pub enum BeaconForkChoiceUpdateError {
/// Thrown when a forkchoice update resulted in an error.
#[error("forkchoice update error: {0}")]
ForkchoiceUpdateError(#[from] ForkchoiceUpdateError),
/// Thrown when the engine task is unavailable/stopped.
#[error("beacon consensus engine task stopped")]
EngineUnavailable,
/// An internal error occurred, not necessarily related to the update.
#[error(transparent)]
Internal(Box<dyn core::error::Error + Send + Sync>),
}
impl BeaconForkChoiceUpdateError {
/// Create a new internal error.
pub fn internal<E: core::error::Error + Send + Sync + 'static>(e: E) -> Self {
Self::Internal(Box::new(e))
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/primitives/src/message.rs | crates/engine/primitives/src/message.rs | use crate::{
error::BeaconForkChoiceUpdateError, BeaconOnNewPayloadError, ExecutionPayload, ForkchoiceStatus,
};
use alloy_rpc_types_engine::{
ForkChoiceUpdateResult, ForkchoiceState, ForkchoiceUpdateError, ForkchoiceUpdated, PayloadId,
PayloadStatus, PayloadStatusEnum,
};
use core::{
fmt::{self, Display},
future::Future,
pin::Pin,
task::{ready, Context, Poll},
};
use futures::{future::Either, FutureExt, TryFutureExt};
use reth_errors::RethResult;
use reth_payload_builder_primitives::PayloadBuilderError;
use reth_payload_primitives::{EngineApiMessageVersion, PayloadTypes};
use tokio::sync::{mpsc::UnboundedSender, oneshot};
/// Type alias for backwards compat
#[deprecated(note = "Use ConsensusEngineHandle instead")]
pub type BeaconConsensusEngineHandle<Payload> = ConsensusEngineHandle<Payload>;
/// Represents the outcome of forkchoice update.
///
/// This is a future that resolves to [`ForkChoiceUpdateResult`]
#[must_use = "futures do nothing unless you `.await` or poll them"]
#[derive(Debug)]
pub struct OnForkChoiceUpdated {
/// Represents the status of the forkchoice update.
///
/// Note: This is separate from the response `fut`, because we still can return an error
/// depending on the payload attributes, even if the forkchoice update itself is valid.
forkchoice_status: ForkchoiceStatus,
/// Returns the result of the forkchoice update.
fut: Either<futures::future::Ready<ForkChoiceUpdateResult>, PendingPayloadId>,
}
// === impl OnForkChoiceUpdated ===
impl OnForkChoiceUpdated {
/// Returns the determined status of the received `ForkchoiceState`.
pub const fn forkchoice_status(&self) -> ForkchoiceStatus {
self.forkchoice_status
}
/// Creates a new instance of `OnForkChoiceUpdated` for the `SYNCING` state
pub fn syncing() -> Self {
let status = PayloadStatus::from_status(PayloadStatusEnum::Syncing);
Self {
forkchoice_status: ForkchoiceStatus::from_payload_status(&status.status),
fut: Either::Left(futures::future::ready(Ok(ForkchoiceUpdated::new(status)))),
}
}
/// Creates a new instance of `OnForkChoiceUpdated` if the forkchoice update succeeded and no
/// payload attributes were provided.
pub fn valid(status: PayloadStatus) -> Self {
Self {
forkchoice_status: ForkchoiceStatus::from_payload_status(&status.status),
fut: Either::Left(futures::future::ready(Ok(ForkchoiceUpdated::new(status)))),
}
}
/// Creates a new instance of `OnForkChoiceUpdated` with the given payload status, if the
/// forkchoice update failed due to an invalid payload.
pub fn with_invalid(status: PayloadStatus) -> Self {
Self {
forkchoice_status: ForkchoiceStatus::from_payload_status(&status.status),
fut: Either::Left(futures::future::ready(Ok(ForkchoiceUpdated::new(status)))),
}
}
/// Creates a new instance of `OnForkChoiceUpdated` if the forkchoice update failed because the
/// given state is considered invalid
pub fn invalid_state() -> Self {
Self {
forkchoice_status: ForkchoiceStatus::Invalid,
fut: Either::Left(futures::future::ready(Err(ForkchoiceUpdateError::InvalidState))),
}
}
/// Creates a new instance of `OnForkChoiceUpdated` if the forkchoice update was successful but
/// payload attributes were invalid.
pub fn invalid_payload_attributes() -> Self {
Self {
// This is valid because this is only reachable if the state and payload is valid
forkchoice_status: ForkchoiceStatus::Valid,
fut: Either::Left(futures::future::ready(Err(
ForkchoiceUpdateError::UpdatedInvalidPayloadAttributes,
))),
}
}
/// If the forkchoice update was successful and no payload attributes were provided, this method
pub const fn updated_with_pending_payload_id(
payload_status: PayloadStatus,
pending_payload_id: oneshot::Receiver<Result<PayloadId, PayloadBuilderError>>,
) -> Self {
Self {
forkchoice_status: ForkchoiceStatus::from_payload_status(&payload_status.status),
fut: Either::Right(PendingPayloadId {
payload_status: Some(payload_status),
pending_payload_id,
}),
}
}
}
impl Future for OnForkChoiceUpdated {
type Output = ForkChoiceUpdateResult;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.get_mut().fut.poll_unpin(cx)
}
}
/// A future that returns the payload id of a yet to be initiated payload job after a successful
/// forkchoice update
#[derive(Debug)]
struct PendingPayloadId {
payload_status: Option<PayloadStatus>,
pending_payload_id: oneshot::Receiver<Result<PayloadId, PayloadBuilderError>>,
}
impl Future for PendingPayloadId {
type Output = ForkChoiceUpdateResult;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
let res = ready!(this.pending_payload_id.poll_unpin(cx));
match res {
Ok(Ok(payload_id)) => Poll::Ready(Ok(ForkchoiceUpdated {
payload_status: this.payload_status.take().expect("Polled after completion"),
payload_id: Some(payload_id),
})),
Err(_) | Ok(Err(_)) => {
// failed to initiate a payload build job
Poll::Ready(Err(ForkchoiceUpdateError::UpdatedInvalidPayloadAttributes))
}
}
}
}
/// A message for the beacon engine from other components of the node (engine RPC API invoked by the
/// consensus layer).
#[derive(Debug)]
pub enum BeaconEngineMessage<Payload: PayloadTypes> {
/// Message with new payload.
NewPayload {
/// The execution payload received by Engine API.
payload: Payload::ExecutionData,
/// The sender for returning payload status result.
tx: oneshot::Sender<Result<PayloadStatus, BeaconOnNewPayloadError>>,
},
/// Message with updated forkchoice state.
ForkchoiceUpdated {
/// The updated forkchoice state.
state: ForkchoiceState,
/// The payload attributes for block building.
payload_attrs: Option<Payload::PayloadAttributes>,
/// The Engine API Version.
version: EngineApiMessageVersion,
/// The sender for returning forkchoice updated result.
tx: oneshot::Sender<RethResult<OnForkChoiceUpdated>>,
},
}
impl<Payload: PayloadTypes> Display for BeaconEngineMessage<Payload> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::NewPayload { payload, .. } => {
write!(
f,
"NewPayload(parent: {}, number: {}, hash: {})",
payload.parent_hash(),
payload.block_number(),
payload.block_hash()
)
}
Self::ForkchoiceUpdated { state, payload_attrs, .. } => {
// we don't want to print the entire payload attributes, because for OP this
// includes all txs
write!(
f,
"ForkchoiceUpdated {{ state: {state:?}, has_payload_attributes: {} }}",
payload_attrs.is_some()
)
}
}
}
}
/// A cloneable sender type that can be used to send engine API messages.
///
/// This type mirrors consensus related functions of the engine API.
#[derive(Debug, Clone)]
pub struct ConsensusEngineHandle<Payload>
where
Payload: PayloadTypes,
{
to_engine: UnboundedSender<BeaconEngineMessage<Payload>>,
}
impl<Payload> ConsensusEngineHandle<Payload>
where
Payload: PayloadTypes,
{
/// Creates a new beacon consensus engine handle.
pub const fn new(to_engine: UnboundedSender<BeaconEngineMessage<Payload>>) -> Self {
Self { to_engine }
}
/// Sends a new payload message to the beacon consensus engine and waits for a response.
///
/// See also <https://github.com/ethereum/execution-apis/blob/3d627c95a4d3510a8187dd02e0250ecb4331d27e/src/engine/shanghai.md#engine_newpayloadv2>
pub async fn new_payload(
&self,
payload: Payload::ExecutionData,
) -> Result<PayloadStatus, BeaconOnNewPayloadError> {
let (tx, rx) = oneshot::channel();
let _ = self.to_engine.send(BeaconEngineMessage::NewPayload { payload, tx });
rx.await.map_err(|_| BeaconOnNewPayloadError::EngineUnavailable)?
}
/// Sends a forkchoice update message to the beacon consensus engine and waits for a response.
///
/// See also <https://github.com/ethereum/execution-apis/blob/3d627c95a4d3510a8187dd02e0250ecb4331d27e/src/engine/shanghai.md#engine_forkchoiceupdatedv2>
pub async fn fork_choice_updated(
&self,
state: ForkchoiceState,
payload_attrs: Option<Payload::PayloadAttributes>,
version: EngineApiMessageVersion,
) -> Result<ForkchoiceUpdated, BeaconForkChoiceUpdateError> {
Ok(self
.send_fork_choice_updated(state, payload_attrs, version)
.map_err(|_| BeaconForkChoiceUpdateError::EngineUnavailable)
.await?
.map_err(BeaconForkChoiceUpdateError::internal)?
.await?)
}
/// Sends a forkchoice update message to the beacon consensus engine and returns the receiver to
/// wait for a response.
fn send_fork_choice_updated(
&self,
state: ForkchoiceState,
payload_attrs: Option<Payload::PayloadAttributes>,
version: EngineApiMessageVersion,
) -> oneshot::Receiver<RethResult<OnForkChoiceUpdated>> {
let (tx, rx) = oneshot::channel();
let _ = self.to_engine.send(BeaconEngineMessage::ForkchoiceUpdated {
state,
payload_attrs,
tx,
version,
});
rx
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/local/src/miner.rs | crates/engine/local/src/miner.rs | //! Contains the implementation of the mining mode for the local engine.
use alloy_consensus::BlockHeader;
use alloy_primitives::{TxHash, B256};
use alloy_rpc_types_engine::ForkchoiceState;
use eyre::OptionExt;
use futures_util::{stream::Fuse, StreamExt};
use reth_engine_primitives::ConsensusEngineHandle;
use reth_payload_builder::PayloadBuilderHandle;
use reth_payload_primitives::{
BuiltPayload, EngineApiMessageVersion, PayloadAttributesBuilder, PayloadKind, PayloadTypes,
};
use reth_provider::BlockReader;
use reth_transaction_pool::TransactionPool;
use std::{
collections::VecDeque,
future::Future,
pin::Pin,
task::{Context, Poll},
time::{Duration, UNIX_EPOCH},
};
use tokio::time::Interval;
use tokio_stream::wrappers::ReceiverStream;
use tracing::error;
/// A mining mode for the local dev engine.
#[derive(Debug)]
pub enum MiningMode<Pool: TransactionPool + Unpin> {
/// In this mode a block is built as soon as
/// a valid transaction reaches the pool.
/// If `max_transactions` is set, a block is built when that many transactions have
/// accumulated.
Instant {
/// The transaction pool.
pool: Pool,
/// Stream of transaction notifications.
rx: Fuse<ReceiverStream<TxHash>>,
/// Maximum number of transactions to accumulate before mining a block.
/// If None, mine immediately when any transaction arrives.
max_transactions: Option<usize>,
/// Counter for accumulated transactions (only used when `max_transactions` is set).
accumulated: usize,
},
/// In this mode a block is built at a fixed interval.
Interval(Interval),
}
impl<Pool: TransactionPool + Unpin> MiningMode<Pool> {
/// Constructor for a [`MiningMode::Instant`]
pub fn instant(pool: Pool, max_transactions: Option<usize>) -> Self {
let rx = pool.pending_transactions_listener();
Self::Instant { pool, rx: ReceiverStream::new(rx).fuse(), max_transactions, accumulated: 0 }
}
/// Constructor for a [`MiningMode::Interval`]
pub fn interval(duration: Duration) -> Self {
let start = tokio::time::Instant::now() + duration;
Self::Interval(tokio::time::interval_at(start, duration))
}
}
impl<Pool: TransactionPool + Unpin> Future for MiningMode<Pool> {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
match this {
Self::Instant { pool, rx, max_transactions, accumulated } => {
// Poll for new transaction notifications
while let Poll::Ready(Some(_)) = rx.poll_next_unpin(cx) {
if pool.pending_and_queued_txn_count().0 == 0 {
continue;
}
if let Some(max_tx) = max_transactions {
*accumulated += 1;
// If we've reached the max transactions threshold, mine a block
if *accumulated >= *max_tx {
*accumulated = 0; // Reset counter for next block
return Poll::Ready(());
}
} else {
// If no max_transactions is set, mine immediately
return Poll::Ready(());
}
}
Poll::Pending
}
Self::Interval(interval) => {
if interval.poll_tick(cx).is_ready() {
return Poll::Ready(())
}
Poll::Pending
}
}
}
}
/// Local miner advancing the chain
#[derive(Debug)]
pub struct LocalMiner<T: PayloadTypes, B, Pool: TransactionPool + Unpin> {
/// The payload attribute builder for the engine
payload_attributes_builder: B,
/// Sender for events to engine.
to_engine: ConsensusEngineHandle<T>,
/// The mining mode for the engine
mode: MiningMode<Pool>,
/// The payload builder for the engine
payload_builder: PayloadBuilderHandle<T>,
/// Timestamp for the next block.
/// NOTE: this is in MILLISECONDS when timestamp-in-seconds feature is disabled.
/// Different from upstream reth, which holds this in seconds
last_timestamp: u64,
/// Stores latest mined blocks.
last_block_hashes: VecDeque<B256>,
}
impl<T, B, Pool> LocalMiner<T, B, Pool>
where
T: PayloadTypes,
B: PayloadAttributesBuilder<<T as PayloadTypes>::PayloadAttributes>,
Pool: TransactionPool + Unpin,
{
/// Spawns a new [`LocalMiner`] with the given parameters.
pub fn new(
provider: impl BlockReader,
payload_attributes_builder: B,
to_engine: ConsensusEngineHandle<T>,
mode: MiningMode<Pool>,
payload_builder: PayloadBuilderHandle<T>,
) -> Self {
// NOTE: header block timestamp should be in milliseconds here
let latest_header =
provider.sealed_header(provider.best_block_number().unwrap()).unwrap().unwrap();
Self {
payload_attributes_builder,
to_engine,
mode,
payload_builder,
last_timestamp: latest_header.timestamp(),
last_block_hashes: VecDeque::from([latest_header.hash()]),
}
}
/// Runs the [`LocalMiner`] in a loop, polling the miner and building payloads.
pub async fn run(mut self) {
let mut fcu_interval = tokio::time::interval(Duration::from_secs(1));
loop {
tokio::select! {
// Wait for the interval or the pool to receive a transaction
_ = &mut self.mode => {
if let Err(e) = self.advance().await {
error!(target: "engine::local", "Error advancing the chain: {:?}", e);
}
}
// send FCU once in a while
_ = fcu_interval.tick() => {
if let Err(e) = self.update_forkchoice_state().await {
error!(target: "engine::local", "Error updating fork choice: {:?}", e);
}
}
}
}
}
/// Returns current forkchoice state.
fn forkchoice_state(&self) -> ForkchoiceState {
ForkchoiceState {
head_block_hash: *self.last_block_hashes.back().expect("at least 1 block exists"),
safe_block_hash: *self
.last_block_hashes
.get(self.last_block_hashes.len().saturating_sub(32))
.expect("at least 1 block exists"),
finalized_block_hash: *self
.last_block_hashes
.get(self.last_block_hashes.len().saturating_sub(64))
.expect("at least 1 block exists"),
}
}
/// Sends a FCU to the engine.
async fn update_forkchoice_state(&self) -> eyre::Result<()> {
let res = self
.to_engine
.fork_choice_updated(self.forkchoice_state(), None, EngineApiMessageVersion::default())
.await?;
if !res.is_valid() {
eyre::bail!("Invalid fork choice update")
}
Ok(())
}
/// Generates payload attributes for a new block, passes them to FCU and inserts built payload
/// through newPayload.
async fn advance(&mut self) -> eyre::Result<()> {
#[cfg(feature = "timestamp-in-seconds")]
let timestamp = std::cmp::max(
self.last_timestamp + 1,
std::time::SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("cannot be earlier than UNIX_EPOCH")
.as_secs(),
);
#[cfg(not(feature = "timestamp-in-seconds"))]
let timestamp = std::cmp::max(
self.last_timestamp + 1000,
std::time::SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("cannot be earlier than UNIX_EPOCH")
.as_millis() as u64,
);
let res = self
.to_engine
.fork_choice_updated(
self.forkchoice_state(),
Some(self.payload_attributes_builder.build(timestamp)),
EngineApiMessageVersion::default(),
)
.await?;
if !res.is_valid() {
eyre::bail!("Invalid payload status")
}
let payload_id = res.payload_id.ok_or_eyre("No payload id")?;
let Some(Ok(payload)) =
self.payload_builder.resolve_kind(payload_id, PayloadKind::WaitForPending).await
else {
eyre::bail!("No payload")
};
let block = payload.block();
tracing::debug!("local_miner: advance: block: {:?}", block.body());
let payload = T::block_to_payload(payload.block().clone());
let res = self.to_engine.new_payload(payload).await?;
if !res.is_valid() {
eyre::bail!("Invalid payload")
}
self.last_timestamp = timestamp;
self.last_block_hashes.push_back(block.hash());
// ensure we keep at most 64 blocks
if self.last_block_hashes.len() > 64 {
self.last_block_hashes.pop_front();
}
Ok(())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/local/src/lib.rs | crates/engine/local/src/lib.rs | //! A local engine service that can be used to drive a dev chain.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
pub mod miner;
pub mod payload;
pub use miner::{LocalMiner, MiningMode};
pub use payload::LocalPayloadAttributesBuilder;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/local/src/service.rs | crates/engine/local/src/service.rs | //! Provides a local dev service engine that can be used to run a dev chain.
//!
//! [`LocalEngineService`] polls the payload builder based on a mining mode
//! which can be set to `Instant` or `Interval`. The `Instant` mode will
//! constantly poll the payload builder and initiate block building
//! with a single transaction. The `Interval` mode will initiate block
//! building at a fixed interval.
use core::fmt;
use std::{
fmt::{Debug, Formatter},
pin::Pin,
sync::Arc,
task::{Context, Poll},
};
use crate::miner::{LocalMiner, MiningMode};
use futures_util::{Stream, StreamExt};
use reth_chainspec::EthChainSpec;
use reth_consensus::{ConsensusError, FullConsensus};
use reth_engine_primitives::{BeaconConsensusEngineEvent, BeaconEngineMessage, EngineValidator};
use reth_engine_service::service::EngineMessageStream;
use reth_engine_tree::{
backup::BackupHandle,
chain::{ChainEvent, HandlerEvent},
engine::{
EngineApiKind, EngineApiRequest, EngineApiRequestHandler, EngineRequestHandler, FromEngine,
RequestHandlerEvent,
},
persistence::PersistenceHandle,
tree::{EngineApiTreeHandler, InvalidBlockHook, TreeConfig},
};
use reth_evm::ConfigureEvm;
use reth_node_types::BlockTy;
use reth_payload_builder::PayloadBuilderHandle;
use reth_payload_primitives::{PayloadAttributesBuilder, PayloadTypes};
use reth_provider::{
providers::{BlockchainProvider, ProviderNodeTypes},
ChainSpecProvider, ProviderFactory,
};
use reth_prune::PrunerWithFactory;
use reth_stages_api::MetricEventsSender;
use tokio::sync::mpsc::UnboundedSender;
use tracing::error;
// seismic imports not used by upstream
use reth_node_core::dirs::{ChainPath, DataDirPath};
/// Provides a local dev service engine that can be used to drive the
/// chain forward.
///
/// This service both produces and consumes [`BeaconEngineMessage`]s. This is done to allow
/// modifications of the stream
pub struct LocalEngineService<N>
where
N: ProviderNodeTypes,
{
/// Processes requests.
///
/// This type is responsible for processing incoming requests.
handler: EngineApiRequestHandler<EngineApiRequest<N::Payload, N::Primitives>, N::Primitives>,
/// Receiver for incoming requests (from the engine API endpoint) that need to be processed.
incoming_requests: EngineMessageStream<N::Payload>,
}
impl<N> LocalEngineService<N>
where
N: ProviderNodeTypes,
{
/// Constructor for [`LocalEngineService`].
#[expect(clippy::too_many_arguments)]
pub fn new<B, V, C>(
consensus: Arc<dyn FullConsensus<N::Primitives, Error = ConsensusError>>,
provider: ProviderFactory<N>,
blockchain_db: BlockchainProvider<N>,
pruner: PrunerWithFactory<ProviderFactory<N>>,
payload_builder: PayloadBuilderHandle<N::Payload>,
payload_validator: V,
tree_config: TreeConfig,
invalid_block_hook: Box<dyn InvalidBlockHook<N::Primitives>>,
sync_metrics_tx: MetricEventsSender,
to_engine: UnboundedSender<BeaconEngineMessage<N::Payload>>,
from_engine: EngineMessageStream<N::Payload>,
mode: MiningMode,
payload_attributes_builder: B,
evm_config: C,
data_dir: ChainPath<DataDirPath>,
) -> Self
where
B: PayloadAttributesBuilder<<N::Payload as PayloadTypes>::PayloadAttributes>,
V: EngineValidator<N::Payload, Block = BlockTy<N>>,
C: ConfigureEvm<Primitives = N::Primitives> + 'static,
{
let chain_spec = provider.chain_spec();
let engine_kind =
if chain_spec.is_optimism() { EngineApiKind::OpStack } else { EngineApiKind::Ethereum };
let persistence_handle =
PersistenceHandle::<N::Primitives>::spawn_service(provider, pruner, sync_metrics_tx);
let canonical_in_memory_state = blockchain_db.canonical_in_memory_state();
let backup_handle = BackupHandle::spawn_service(data_dir);
let (to_tree_tx, from_tree) = EngineApiTreeHandler::<N::Primitives, _, _, _, _>::spawn_new(
blockchain_db.clone(),
consensus,
payload_validator,
persistence_handle,
payload_builder.clone(),
canonical_in_memory_state,
tree_config,
invalid_block_hook,
engine_kind,
evm_config,
backup_handle,
);
let handler = EngineApiRequestHandler::new(to_tree_tx, from_tree);
LocalMiner::spawn_new(
blockchain_db,
payload_attributes_builder,
to_engine,
mode,
payload_builder,
);
Self { handler, incoming_requests: from_engine }
}
}
impl<N> Stream for LocalEngineService<N>
where
N: ProviderNodeTypes,
{
type Item = ChainEvent<BeaconConsensusEngineEvent<N::Primitives>>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
if let Poll::Ready(ev) = this.handler.poll(cx) {
return match ev {
RequestHandlerEvent::HandlerEvent(ev) => match ev {
HandlerEvent::BackfillAction(_) => {
error!(target: "engine::local", "received backfill request in local engine");
Poll::Ready(Some(ChainEvent::FatalError))
}
HandlerEvent::Event(ev) => Poll::Ready(Some(ChainEvent::Handler(ev))),
HandlerEvent::FatalError => Poll::Ready(Some(ChainEvent::FatalError)),
},
RequestHandlerEvent::Download(_) => {
error!(target: "engine::local", "received download request in local engine");
Poll::Ready(Some(ChainEvent::FatalError))
}
}
}
// forward incoming requests to the handler
while let Poll::Ready(Some(req)) = this.incoming_requests.poll_next_unpin(cx) {
this.handler.on_event(FromEngine::Request(req.into()));
}
Poll::Pending
}
}
impl<N: ProviderNodeTypes> Debug for LocalEngineService<N> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("LocalEngineService").finish_non_exhaustive()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/local/src/payload.rs | crates/engine/local/src/payload.rs | //! The implementation of the [`PayloadAttributesBuilder`] for the
//! [`LocalMiner`](super::LocalMiner).
use alloy_primitives::{Address, B256};
use reth_chainspec::EthereumHardforks;
use reth_ethereum_engine_primitives::EthPayloadAttributes;
use reth_payload_primitives::PayloadAttributesBuilder;
use std::sync::Arc;
/// The attributes builder for local Ethereum payload.
#[derive(Debug)]
#[non_exhaustive]
pub struct LocalPayloadAttributesBuilder<ChainSpec> {
/// The chainspec
pub chain_spec: Arc<ChainSpec>,
}
impl<ChainSpec> LocalPayloadAttributesBuilder<ChainSpec> {
/// Creates a new instance of the builder.
pub const fn new(chain_spec: Arc<ChainSpec>) -> Self {
Self { chain_spec }
}
}
impl<ChainSpec> PayloadAttributesBuilder<EthPayloadAttributes>
for LocalPayloadAttributesBuilder<ChainSpec>
where
ChainSpec: Send + Sync + EthereumHardforks + 'static,
{
/// Timestamp is in milliseconds when passed into here.
/// Will store it as milliseconds in payload attributes
/// (similar to payload attributes emitted from consensus layer)
/// Use timestamp in seconds for is_shanghai_active_at_timestamp and
/// is_cancun_active_at_timestamp
fn build(&self, timestamp: u64) -> EthPayloadAttributes {
let timestamp_seconds =
if cfg!(feature = "timestamp-in-seconds") { timestamp } else { timestamp / 1000 };
EthPayloadAttributes {
timestamp,
prev_randao: B256::random(),
suggested_fee_recipient: Address::random(),
withdrawals: self
.chain_spec
.is_shanghai_active_at_timestamp(timestamp_seconds)
.then(Default::default),
parent_beacon_block_root: self
.chain_spec
.is_cancun_active_at_timestamp(timestamp_seconds)
.then(B256::random),
}
}
}
#[cfg(feature = "op")]
impl<ChainSpec> PayloadAttributesBuilder<op_alloy_rpc_types_engine::OpPayloadAttributes>
for LocalPayloadAttributesBuilder<ChainSpec>
where
ChainSpec: Send + Sync + EthereumHardforks + 'static,
{
fn build(&self, timestamp: u64) -> op_alloy_rpc_types_engine::OpPayloadAttributes {
op_alloy_rpc_types_engine::OpPayloadAttributes {
payload_attributes: self.build(timestamp),
// Add dummy system transaction
transactions: Some(vec![
reth_optimism_chainspec::constants::TX_SET_L1_BLOCK_OP_MAINNET_BLOCK_124665056
.into(),
]),
no_tx_pool: None,
gas_limit: None,
eip_1559_params: None,
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/tree/src/engine.rs | crates/engine/tree/src/engine.rs | //! An engine API handler for the chain.
use crate::{
backfill::BackfillAction,
chain::{ChainHandler, FromOrchestrator, HandlerEvent},
download::{BlockDownloader, DownloadAction, DownloadOutcome},
};
use alloy_primitives::B256;
use futures::{Stream, StreamExt};
use reth_chain_state::ExecutedBlockWithTrieUpdates;
use reth_engine_primitives::{BeaconEngineMessage, ConsensusEngineEvent};
use reth_ethereum_primitives::EthPrimitives;
use reth_payload_primitives::PayloadTypes;
use reth_primitives_traits::{Block, NodePrimitives, RecoveredBlock};
use std::{
collections::HashSet,
fmt::Display,
sync::mpsc::Sender,
task::{ready, Context, Poll},
};
use tokio::sync::mpsc::UnboundedReceiver;
/// A [`ChainHandler`] that advances the chain based on incoming requests (CL engine API).
///
/// This is a general purpose request handler with network access.
/// This type listens for incoming messages and processes them via the configured request handler.
///
/// ## Overview
///
/// This type is an orchestrator for incoming messages and responsible for delegating requests
/// received from the CL to the handler.
///
/// It is responsible for handling the following:
/// - Delegating incoming requests to the [`EngineRequestHandler`].
/// - Advancing the [`EngineRequestHandler`] by polling it and emitting events.
/// - Downloading blocks on demand from the network if requested by the [`EngineApiRequestHandler`].
///
/// The core logic is part of the [`EngineRequestHandler`], which is responsible for processing the
/// incoming requests.
#[derive(Debug)]
pub struct EngineHandler<T, S, D> {
/// Processes requests.
///
/// This type is responsible for processing incoming requests.
handler: T,
/// Receiver for incoming requests (from the engine API endpoint) that need to be processed.
incoming_requests: S,
/// A downloader to download blocks on demand.
downloader: D,
}
impl<T, S, D> EngineHandler<T, S, D> {
/// Creates a new [`EngineHandler`] with the given handler and downloader and incoming stream of
/// requests.
pub const fn new(handler: T, downloader: D, incoming_requests: S) -> Self
where
T: EngineRequestHandler,
{
Self { handler, incoming_requests, downloader }
}
/// Returns a mutable reference to the request handler.
pub const fn handler_mut(&mut self) -> &mut T {
&mut self.handler
}
}
impl<T, S, D> ChainHandler for EngineHandler<T, S, D>
where
T: EngineRequestHandler<Block = D::Block>,
S: Stream + Send + Sync + Unpin + 'static,
<S as Stream>::Item: Into<T::Request>,
D: BlockDownloader,
{
type Event = T::Event;
fn on_event(&mut self, event: FromOrchestrator) {
// delegate event to the handler
self.handler.on_event(event.into());
}
fn poll(&mut self, cx: &mut Context<'_>) -> Poll<HandlerEvent<Self::Event>> {
loop {
// drain the handler first
while let Poll::Ready(ev) = self.handler.poll(cx) {
match ev {
RequestHandlerEvent::HandlerEvent(ev) => {
return match ev {
HandlerEvent::BackfillAction(target) => {
// bubble up backfill sync request
self.downloader.on_action(DownloadAction::Clear);
Poll::Ready(HandlerEvent::BackfillAction(target))
}
HandlerEvent::Event(ev) => {
// bubble up the event
Poll::Ready(HandlerEvent::Event(ev))
}
HandlerEvent::FatalError => Poll::Ready(HandlerEvent::FatalError),
}
}
RequestHandlerEvent::Download(req) => {
// delegate download request to the downloader
self.downloader.on_action(DownloadAction::Download(req));
}
}
}
// pop the next incoming request
if let Poll::Ready(Some(req)) = self.incoming_requests.poll_next_unpin(cx) {
// and delegate the request to the handler
self.handler.on_event(FromEngine::Request(req.into()));
// skip downloading in this iteration to allow the handler to process the request
continue
}
// advance the downloader
if let Poll::Ready(outcome) = self.downloader.poll(cx) {
if let DownloadOutcome::Blocks(blocks) = outcome {
// delegate the downloaded blocks to the handler
self.handler.on_event(FromEngine::DownloadedBlocks(blocks));
}
continue
}
return Poll::Pending
}
}
}
/// A type that processes incoming requests (e.g. requests from the consensus layer, engine API,
/// such as newPayload).
///
/// ## Control flow
///
/// Requests and certain updates, such as a change in backfill sync status, are delegated to this
/// type via [`EngineRequestHandler::on_event`]. This type is responsible for processing the
/// incoming requests and advancing the chain and emit events when it is polled.
pub trait EngineRequestHandler: Send + Sync {
/// Event type this handler can emit
type Event: Send;
/// The request type this handler can process.
type Request;
/// Type of the block sent in [`FromEngine::DownloadedBlocks`] variant.
type Block: Block;
/// Informs the handler about an event from the [`EngineHandler`].
fn on_event(&mut self, event: FromEngine<Self::Request, Self::Block>);
/// Advances the handler.
fn poll(&mut self, cx: &mut Context<'_>) -> Poll<RequestHandlerEvent<Self::Event>>;
}
/// An [`EngineRequestHandler`] that processes engine API requests by delegating to an execution
/// task.
///
/// This type is responsible for advancing the chain during live sync (following the tip of the
/// chain).
///
/// It advances the chain based on received engine API requests by delegating them to the tree
/// executor.
///
/// There are two types of requests that can be processed:
///
/// - `on_new_payload`: Executes the payload and inserts it into the tree. These are allowed to be
/// processed concurrently.
/// - `on_forkchoice_updated`: Updates the fork choice based on the new head. These require write
/// access to the database and are skipped if the handler can't acquire exclusive access to the
/// database.
///
/// In case required blocks are missing, the handler will request them from the network, by emitting
/// a download request upstream.
#[derive(Debug)]
pub struct EngineApiRequestHandler<Request, N: NodePrimitives> {
/// channel to send messages to the tree to execute the payload.
to_tree: Sender<FromEngine<Request, N::Block>>,
/// channel to receive messages from the tree.
from_tree: UnboundedReceiver<EngineApiEvent<N>>,
}
impl<Request, N: NodePrimitives> EngineApiRequestHandler<Request, N> {
/// Creates a new `EngineApiRequestHandler`.
pub const fn new(
to_tree: Sender<FromEngine<Request, N::Block>>,
from_tree: UnboundedReceiver<EngineApiEvent<N>>,
) -> Self {
Self { to_tree, from_tree }
}
}
impl<Request, N: NodePrimitives> EngineRequestHandler for EngineApiRequestHandler<Request, N>
where
Request: Send,
{
type Event = ConsensusEngineEvent<N>;
type Request = Request;
type Block = N::Block;
fn on_event(&mut self, event: FromEngine<Self::Request, Self::Block>) {
// delegate to the tree
let _ = self.to_tree.send(event);
}
fn poll(&mut self, cx: &mut Context<'_>) -> Poll<RequestHandlerEvent<Self::Event>> {
let Some(ev) = ready!(self.from_tree.poll_recv(cx)) else {
return Poll::Ready(RequestHandlerEvent::HandlerEvent(HandlerEvent::FatalError))
};
let ev = match ev {
EngineApiEvent::BeaconConsensus(ev) => {
RequestHandlerEvent::HandlerEvent(HandlerEvent::Event(ev))
}
EngineApiEvent::BackfillAction(action) => {
RequestHandlerEvent::HandlerEvent(HandlerEvent::BackfillAction(action))
}
EngineApiEvent::Download(action) => RequestHandlerEvent::Download(action),
};
Poll::Ready(ev)
}
}
/// The type for specifying the kind of engine api.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub enum EngineApiKind {
/// The chain contains Ethereum configuration.
#[default]
Ethereum,
/// The chain contains Optimism configuration.
OpStack,
}
impl EngineApiKind {
/// Returns true if this is the ethereum variant
pub const fn is_ethereum(&self) -> bool {
matches!(self, Self::Ethereum)
}
/// Returns true if this is the ethereum variant
pub const fn is_opstack(&self) -> bool {
matches!(self, Self::OpStack)
}
}
/// The request variants that the engine API handler can receive.
#[derive(Debug)]
pub enum EngineApiRequest<T: PayloadTypes, N: NodePrimitives> {
/// A request received from the consensus engine.
Beacon(BeaconEngineMessage<T>),
/// Request to insert an already executed block, e.g. via payload building.
InsertExecutedBlock(ExecutedBlockWithTrieUpdates<N>),
}
impl<T: PayloadTypes, N: NodePrimitives> Display for EngineApiRequest<T, N> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Beacon(msg) => msg.fmt(f),
Self::InsertExecutedBlock(block) => {
write!(f, "InsertExecutedBlock({:?})", block.recovered_block().num_hash())
}
}
}
}
impl<T: PayloadTypes, N: NodePrimitives> From<BeaconEngineMessage<T>> for EngineApiRequest<T, N> {
fn from(msg: BeaconEngineMessage<T>) -> Self {
Self::Beacon(msg)
}
}
impl<T: PayloadTypes, N: NodePrimitives> From<EngineApiRequest<T, N>>
for FromEngine<EngineApiRequest<T, N>, N::Block>
{
fn from(req: EngineApiRequest<T, N>) -> Self {
Self::Request(req)
}
}
/// Events emitted by the engine API handler.
#[derive(Debug)]
pub enum EngineApiEvent<N: NodePrimitives = EthPrimitives> {
/// Event from the consensus engine.
// TODO(mattsse): find a more appropriate name for this variant, consider phasing it out.
BeaconConsensus(ConsensusEngineEvent<N>),
/// Backfill action is needed.
BackfillAction(BackfillAction),
/// Block download is needed.
Download(DownloadRequest),
}
impl<N: NodePrimitives> EngineApiEvent<N> {
/// Returns `true` if the event is a backfill action.
pub const fn is_backfill_action(&self) -> bool {
matches!(self, Self::BackfillAction(_))
}
}
impl<N: NodePrimitives> From<ConsensusEngineEvent<N>> for EngineApiEvent<N> {
fn from(event: ConsensusEngineEvent<N>) -> Self {
Self::BeaconConsensus(event)
}
}
/// Events received from the engine.
#[derive(Debug)]
pub enum FromEngine<Req, B: Block> {
/// Event from the top level orchestrator.
Event(FromOrchestrator),
/// Request from the engine.
Request(Req),
/// Downloaded blocks from the network.
DownloadedBlocks(Vec<RecoveredBlock<B>>),
}
impl<Req: Display, B: Block> Display for FromEngine<Req, B> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Event(ev) => write!(f, "Event({ev:?})"),
Self::Request(req) => write!(f, "Request({req})"),
Self::DownloadedBlocks(blocks) => {
write!(f, "DownloadedBlocks({} blocks)", blocks.len())
}
}
}
}
impl<Req, B: Block> From<FromOrchestrator> for FromEngine<Req, B> {
fn from(event: FromOrchestrator) -> Self {
Self::Event(event)
}
}
/// Requests produced by a [`EngineRequestHandler`].
#[derive(Debug)]
pub enum RequestHandlerEvent<T> {
/// An event emitted by the handler.
HandlerEvent(HandlerEvent<T>),
/// Request to download blocks.
Download(DownloadRequest),
}
/// A request to download blocks from the network.
#[derive(Debug)]
pub enum DownloadRequest {
/// Download the given set of blocks.
BlockSet(HashSet<B256>),
/// Download the given range of blocks.
BlockRange(B256, u64),
}
impl DownloadRequest {
/// Returns a [`DownloadRequest`] for a single block.
pub fn single_block(hash: B256) -> Self {
Self::BlockSet(HashSet::from([hash]))
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/tree/src/lib.rs | crates/engine/tree/src/lib.rs | //! This crate includes the core components for advancing a reth chain.
//!
//! ## Functionality
//!
//! The components in this crate are involved in:
//! * Handling and reacting to incoming consensus events ([`EngineHandler`](engine::EngineHandler))
//! * Advancing the chain ([`ChainOrchestrator`](chain::ChainOrchestrator))
//! * Keeping track of the chain structure in-memory ([`TreeState`](tree::state::TreeState))
//! * Performing backfill sync and handling its progress ([`BackfillSync`](backfill::BackfillSync))
//! * Downloading blocks ([`BlockDownloader`](download::BlockDownloader)), and
//! * Persisting blocks and performing pruning
//! ([`PersistenceService`](persistence::PersistenceService))
//!
//! ## Design and motivation
//!
//! The node must keep up with the state of the chain and validate new updates to the chain state.
//!
//! In order to respond to consensus messages and advance the chain quickly, validation code must
//! avoid database write operations and perform as much work as possible in-memory. This requirement
//! is what informs the architecture of the components this crate.
//!
//! ## Chain synchronization
//!
//! When the node receives a block with an unknown parent, or cannot find a certain block hash, it
//! needs to download and validate the part of the chain that is missing.
//!
//! This can happen during a live sync when the node receives a forkchoice update from the consensus
//! layer which causes the node to have to walk back from the received head, downloading the block's
//! parents until it reaches a known block.
//!
//! This can also technically happen when a finalized block is fetched, before checking distance,
//! but this is a very unlikely case.
//!
//! There are two mutually-exclusive ways the node can bring itself in sync with the chain:
//! * Backfill sync: downloading and validating large ranges of blocks in a structured manner,
//! performing different parts of the validation process in sequence.
//! * Live sync: By responding to new blocks from a connected consensus layer and downloading any
//! missing blocks on the fly.
//!
//! To determine which sync type to use, the node checks how many blocks it needs to execute to
//! catch up to the tip of the chain. If this range is large, backfill sync will be used. Otherwise,
//! live sync will be used.
//!
//! The backfill sync is driven by components which implement the
//! [`BackfillSync`](backfill::BackfillSync) trait, like [`PipelineSync`](backfill::PipelineSync).
//!
//! ## Handling consensus messages
//!
//! Consensus message handling is performed by three main components:
//! 1. The [`EngineHandler`](engine::EngineHandler), which takes incoming consensus messages and
//! manages any requested backfill or download work.
//! 2. The [`EngineApiRequestHandler`](engine::EngineApiRequestHandler), which processes messages
//! from the [`EngineHandler`](engine::EngineHandler) and delegates them to the
//! [`EngineApiTreeHandler`](tree::EngineApiTreeHandler).
//! 3. The [`EngineApiTreeHandler`](tree::EngineApiTreeHandler), which processes incoming tree
//! events, such as new payload events, sending back requests for any needed backfill or download
//! work.
//!
//! ## Chain representation
//!
//! The chain is represented by the [`TreeState`](tree::state::TreeState) data structure, which
//! keeps tracks of blocks by hash and number, as well as keeping track of parent-child
//! relationships between blocks. The hash and number of the current head of the canonical chain is
//! also tracked in the [`TreeState`](tree::state::TreeState).
//!
//! ## Persistence model
//!
//! Because the node minimizes database writes in the critical path for handling consensus messages,
//! it must perform database writes in the background.
//!
//! Performing writes in the background has two advantages:
//! 1. As mentioned, writes are not in the critical path of request processing.
//! 2. Writes can be larger and done at a lower frequency, allowing for more efficient writes.
//!
//! This is achieved by spawning a separate thread which is sent different commands corresponding to
//! different types of writes, for example a command to write a list of transactions, or remove a
//! specific range of blocks.
//!
//! The persistence service must also respond to these commands, to ensure that any in-memory state
//! that is on-disk can be cleaned up, conserving memory and allowing us to add new blocks
//! indefinitely.
//!
//! ## Feature Flags
//!
//! - `test-utils`: Export utilities for testing
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
/// Support for backfill sync mode.
pub mod backfill;
/// The background backup service
pub mod backup;
/// The type that drives the chain forward.
pub mod chain;
/// Support for downloading blocks on demand for live sync.
pub mod download;
/// Engine Api chain handler support.
pub mod engine;
/// Metrics support.
pub mod metrics;
/// The background writer service, coordinating write operations on static files and the database.
pub mod persistence;
/// Support for interacting with the blockchain tree.
pub mod tree;
/// Test utilities.
#[cfg(any(test, feature = "test-utils"))]
pub mod test_utils;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/tree/src/download.rs | crates/engine/tree/src/download.rs | //! Handler that can download blocks on demand (e.g. from the network).
use crate::{engine::DownloadRequest, metrics::BlockDownloaderMetrics};
use alloy_consensus::BlockHeader;
use alloy_primitives::B256;
use futures::FutureExt;
use reth_consensus::{Consensus, ConsensusError};
use reth_network_p2p::{
full_block::{FetchFullBlockFuture, FetchFullBlockRangeFuture, FullBlockClient},
BlockClient,
};
use reth_primitives_traits::{Block, RecoveredBlock, SealedBlock};
use std::{
cmp::{Ordering, Reverse},
collections::{binary_heap::PeekMut, BinaryHeap, HashSet, VecDeque},
fmt::Debug,
sync::Arc,
task::{Context, Poll},
};
use tracing::trace;
/// A trait that can download blocks on demand.
pub trait BlockDownloader: Send + Sync {
/// Type of the block being downloaded.
type Block: Block;
/// Handle an action.
fn on_action(&mut self, action: DownloadAction);
/// Advance in progress requests if any
fn poll(&mut self, cx: &mut Context<'_>) -> Poll<DownloadOutcome<Self::Block>>;
}
/// Actions that can be performed by the block downloader.
#[derive(Debug)]
pub enum DownloadAction {
/// Stop downloading blocks.
Clear,
/// Download given blocks
Download(DownloadRequest),
}
/// Outcome of downloaded blocks.
#[derive(Debug)]
pub enum DownloadOutcome<B: Block> {
/// Downloaded blocks.
Blocks(Vec<RecoveredBlock<B>>),
/// New download started.
NewDownloadStarted {
/// How many blocks are pending in this download.
remaining_blocks: u64,
/// The hash of the highest block of this download.
target: B256,
},
}
/// Basic [`BlockDownloader`].
#[expect(missing_debug_implementations)]
pub struct BasicBlockDownloader<Client, B: Block>
where
Client: BlockClient + 'static,
{
/// A downloader that can download full blocks from the network.
full_block_client: FullBlockClient<Client>,
/// In-flight full block requests in progress.
inflight_full_block_requests: Vec<FetchFullBlockFuture<Client>>,
/// In-flight full block _range_ requests in progress.
inflight_block_range_requests: Vec<FetchFullBlockRangeFuture<Client>>,
/// Buffered blocks from downloads - this is a min-heap of blocks, using the block number for
/// ordering. This means the blocks will be popped from the heap with ascending block numbers.
set_buffered_blocks: BinaryHeap<Reverse<OrderedRecoveredBlock<B>>>,
/// Engine download metrics.
metrics: BlockDownloaderMetrics,
/// Pending events to be emitted.
pending_events: VecDeque<DownloadOutcome<B>>,
}
impl<Client, B> BasicBlockDownloader<Client, B>
where
Client: BlockClient<Block = B> + 'static,
B: Block,
{
/// Create a new instance
pub fn new(client: Client, consensus: Arc<dyn Consensus<B, Error = ConsensusError>>) -> Self {
Self {
full_block_client: FullBlockClient::new(client, consensus),
inflight_full_block_requests: Vec::new(),
inflight_block_range_requests: Vec::new(),
set_buffered_blocks: BinaryHeap::new(),
metrics: BlockDownloaderMetrics::default(),
pending_events: Default::default(),
}
}
/// Clears the stored inflight requests.
fn clear(&mut self) {
self.inflight_full_block_requests.clear();
self.inflight_block_range_requests.clear();
self.set_buffered_blocks.clear();
self.update_block_download_metrics();
}
/// Processes a download request.
fn download(&mut self, request: DownloadRequest) {
match request {
DownloadRequest::BlockSet(hashes) => self.download_block_set(hashes),
DownloadRequest::BlockRange(hash, count) => self.download_block_range(hash, count),
}
}
/// Processes a block set download request.
fn download_block_set(&mut self, hashes: HashSet<B256>) {
for hash in hashes {
self.download_full_block(hash);
}
}
/// Processes a block range download request.
fn download_block_range(&mut self, hash: B256, count: u64) {
if count == 1 {
self.download_full_block(hash);
} else {
trace!(
target: "engine::download",
?hash,
?count,
"start downloading full block range."
);
let request = self.full_block_client.get_full_block_range(hash, count);
self.push_pending_event(DownloadOutcome::NewDownloadStarted {
remaining_blocks: request.count(),
target: request.start_hash(),
});
self.inflight_block_range_requests.push(request);
self.update_block_download_metrics();
}
}
/// Starts requesting a full block from the network.
///
/// Returns `true` if the request was started, `false` if there's already a request for the
/// given hash.
fn download_full_block(&mut self, hash: B256) -> bool {
if self.is_inflight_request(hash) {
return false
}
self.push_pending_event(DownloadOutcome::NewDownloadStarted {
remaining_blocks: 1,
target: hash,
});
trace!(
target: "engine::download",
?hash,
"Start downloading full block"
);
let request = self.full_block_client.get_full_block(hash);
self.inflight_full_block_requests.push(request);
self.update_block_download_metrics();
true
}
/// Returns true if there's already a request for the given hash.
fn is_inflight_request(&self, hash: B256) -> bool {
self.inflight_full_block_requests.iter().any(|req| *req.hash() == hash)
}
/// Sets the metrics for the active downloads
fn update_block_download_metrics(&self) {
let blocks = self.inflight_full_block_requests.len() +
self.inflight_block_range_requests.iter().map(|r| r.count() as usize).sum::<usize>();
self.metrics.active_block_downloads.set(blocks as f64);
}
/// Adds a pending event to the FIFO queue.
fn push_pending_event(&mut self, pending_event: DownloadOutcome<B>) {
self.pending_events.push_back(pending_event);
}
/// Removes a pending event from the FIFO queue.
fn pop_pending_event(&mut self) -> Option<DownloadOutcome<B>> {
self.pending_events.pop_front()
}
}
impl<Client, B> BlockDownloader for BasicBlockDownloader<Client, B>
where
Client: BlockClient<Block = B>,
B: Block,
{
type Block = B;
/// Handles incoming download actions.
fn on_action(&mut self, action: DownloadAction) {
match action {
DownloadAction::Clear => self.clear(),
DownloadAction::Download(request) => self.download(request),
}
}
/// Advances the download process.
fn poll(&mut self, cx: &mut Context<'_>) -> Poll<DownloadOutcome<B>> {
if let Some(pending_event) = self.pop_pending_event() {
return Poll::Ready(pending_event);
}
// advance all full block requests
for idx in (0..self.inflight_full_block_requests.len()).rev() {
let mut request = self.inflight_full_block_requests.swap_remove(idx);
if let Poll::Ready(block) = request.poll_unpin(cx) {
trace!(target: "engine::download", block=?block.num_hash(), "Received single full block, buffering");
self.set_buffered_blocks.push(Reverse(block.into()));
} else {
// still pending
self.inflight_full_block_requests.push(request);
}
}
// advance all full block range requests
for idx in (0..self.inflight_block_range_requests.len()).rev() {
let mut request = self.inflight_block_range_requests.swap_remove(idx);
if let Poll::Ready(blocks) = request.poll_unpin(cx) {
trace!(target: "engine::download", len=?blocks.len(), first=?blocks.first().map(|b| b.num_hash()), last=?blocks.last().map(|b| b.num_hash()), "Received full block range, buffering");
self.set_buffered_blocks.extend(
blocks
.into_iter()
.map(|b| {
let senders = b.senders().unwrap_or_default();
OrderedRecoveredBlock(RecoveredBlock::new_sealed(b, senders))
})
.map(Reverse),
);
} else {
// still pending
self.inflight_block_range_requests.push(request);
}
}
self.update_block_download_metrics();
if self.set_buffered_blocks.is_empty() {
return Poll::Pending;
}
// drain all unique element of the block buffer if there are any
let mut downloaded_blocks: Vec<RecoveredBlock<B>> =
Vec::with_capacity(self.set_buffered_blocks.len());
while let Some(block) = self.set_buffered_blocks.pop() {
// peek ahead and pop duplicates
while let Some(peek) = self.set_buffered_blocks.peek_mut() {
if peek.0 .0.hash() == block.0 .0.hash() {
PeekMut::pop(peek);
} else {
break
}
}
downloaded_blocks.push(block.0.into());
}
Poll::Ready(DownloadOutcome::Blocks(downloaded_blocks))
}
}
/// A wrapper type around [`RecoveredBlock`] that implements the [Ord]
/// trait by block number.
#[derive(Debug, Clone, PartialEq, Eq)]
struct OrderedRecoveredBlock<B: Block>(RecoveredBlock<B>);
impl<B: Block> PartialOrd for OrderedRecoveredBlock<B> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<B: Block> Ord for OrderedRecoveredBlock<B> {
fn cmp(&self, other: &Self) -> Ordering {
self.0.number().cmp(&other.0.number())
}
}
impl<B: Block> From<SealedBlock<B>> for OrderedRecoveredBlock<B> {
fn from(block: SealedBlock<B>) -> Self {
let senders = block.senders().unwrap_or_default();
Self(RecoveredBlock::new_sealed(block, senders))
}
}
impl<B: Block> From<OrderedRecoveredBlock<B>> for RecoveredBlock<B> {
fn from(value: OrderedRecoveredBlock<B>) -> Self {
value.0
}
}
/// A [`BlockDownloader`] that does nothing.
#[derive(Debug, Clone, Default)]
#[non_exhaustive]
pub struct NoopBlockDownloader<B>(core::marker::PhantomData<B>);
impl<B: Block> BlockDownloader for NoopBlockDownloader<B> {
type Block = B;
fn on_action(&mut self, _event: DownloadAction) {}
fn poll(&mut self, _cx: &mut Context<'_>) -> Poll<DownloadOutcome<B>> {
Poll::Pending
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::insert_headers_into_client;
use alloy_consensus::Header;
use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT_30M;
use assert_matches::assert_matches;
use reth_chainspec::{ChainSpecBuilder, MAINNET};
use reth_ethereum_consensus::EthBeaconConsensus;
use reth_network_p2p::test_utils::TestFullBlockClient;
use reth_primitives_traits::SealedHeader;
use std::{future::poll_fn, sync::Arc};
struct TestHarness {
block_downloader:
BasicBlockDownloader<TestFullBlockClient, reth_ethereum_primitives::Block>,
client: TestFullBlockClient,
}
impl TestHarness {
fn new(total_blocks: usize) -> Self {
let chain_spec = Arc::new(
ChainSpecBuilder::default()
.chain(MAINNET.chain)
.genesis(MAINNET.genesis.clone())
.paris_activated()
.build(),
);
let client = TestFullBlockClient::default();
let header = Header {
base_fee_per_gas: Some(7),
gas_limit: ETHEREUM_BLOCK_GAS_LIMIT_30M,
..Default::default()
};
let header = SealedHeader::seal_slow(header);
insert_headers_into_client(&client, header, 0..total_blocks);
let consensus = Arc::new(EthBeaconConsensus::new(chain_spec));
let block_downloader = BasicBlockDownloader::new(client.clone(), consensus);
Self { block_downloader, client }
}
}
#[tokio::test]
async fn block_downloader_range_request() {
const TOTAL_BLOCKS: usize = 10;
let TestHarness { mut block_downloader, client } = TestHarness::new(TOTAL_BLOCKS);
let tip = client.highest_block().expect("there should be blocks here");
// send block range download request
block_downloader.on_action(DownloadAction::Download(DownloadRequest::BlockRange(
tip.hash(),
tip.number,
)));
// ensure we have one in flight range request
assert_eq!(block_downloader.inflight_block_range_requests.len(), 1);
// ensure the range request is made correctly
let first_req = block_downloader.inflight_block_range_requests.first().unwrap();
assert_eq!(first_req.start_hash(), tip.hash());
assert_eq!(first_req.count(), tip.number);
// poll downloader
let sync_future = poll_fn(|cx| block_downloader.poll(cx));
let next_ready = sync_future.await;
assert_matches!(next_ready, DownloadOutcome::NewDownloadStarted { remaining_blocks, .. } => {
assert_eq!(remaining_blocks, TOTAL_BLOCKS as u64);
});
let sync_future = poll_fn(|cx| block_downloader.poll(cx));
let next_ready = sync_future.await;
assert_matches!(next_ready, DownloadOutcome::Blocks(blocks) => {
// ensure all blocks were obtained
assert_eq!(blocks.len(), TOTAL_BLOCKS);
// ensure they are in ascending order
for num in 1..=TOTAL_BLOCKS {
assert_eq!(blocks[num-1].number(), num as u64);
}
});
}
#[tokio::test]
async fn block_downloader_set_request() {
const TOTAL_BLOCKS: usize = 2;
let TestHarness { mut block_downloader, client } = TestHarness::new(TOTAL_BLOCKS);
let tip = client.highest_block().expect("there should be blocks here");
// send block set download request
block_downloader.on_action(DownloadAction::Download(DownloadRequest::BlockSet(
HashSet::from([tip.hash(), tip.parent_hash]),
)));
// ensure we have TOTAL_BLOCKS in flight full block request
assert_eq!(block_downloader.inflight_full_block_requests.len(), TOTAL_BLOCKS);
// poll downloader
for _ in 0..TOTAL_BLOCKS {
let sync_future = poll_fn(|cx| block_downloader.poll(cx));
let next_ready = sync_future.await;
assert_matches!(next_ready, DownloadOutcome::NewDownloadStarted { remaining_blocks, .. } => {
assert_eq!(remaining_blocks, 1);
});
}
let sync_future = poll_fn(|cx| block_downloader.poll(cx));
let next_ready = sync_future.await;
assert_matches!(next_ready, DownloadOutcome::Blocks(blocks) => {
// ensure all blocks were obtained
assert_eq!(blocks.len(), TOTAL_BLOCKS);
// ensure they are in ascending order
for num in 1..=TOTAL_BLOCKS {
assert_eq!(blocks[num-1].number(), num as u64);
}
});
}
#[tokio::test]
async fn block_downloader_clear_request() {
const TOTAL_BLOCKS: usize = 10;
let TestHarness { mut block_downloader, client } = TestHarness::new(TOTAL_BLOCKS);
let tip = client.highest_block().expect("there should be blocks here");
// send block range download request
block_downloader.on_action(DownloadAction::Download(DownloadRequest::BlockRange(
tip.hash(),
tip.number,
)));
// send block set download request
let download_set = HashSet::from([tip.hash(), tip.parent_hash]);
block_downloader
.on_action(DownloadAction::Download(DownloadRequest::BlockSet(download_set.clone())));
// ensure we have one in flight range request
assert_eq!(block_downloader.inflight_block_range_requests.len(), 1);
// ensure the range request is made correctly
let first_req = block_downloader.inflight_block_range_requests.first().unwrap();
assert_eq!(first_req.start_hash(), tip.hash());
assert_eq!(first_req.count(), tip.number);
// ensure we have download_set.len() in flight full block request
assert_eq!(block_downloader.inflight_full_block_requests.len(), download_set.len());
// send clear request
block_downloader.on_action(DownloadAction::Clear);
// ensure we have no in flight range request
assert_eq!(block_downloader.inflight_block_range_requests.len(), 0);
// ensure we have no in flight full block request
assert_eq!(block_downloader.inflight_full_block_requests.len(), 0);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/tree/src/persistence.rs | crates/engine/tree/src/persistence.rs | use crate::metrics::PersistenceMetrics;
use alloy_consensus::BlockHeader;
use alloy_eips::BlockNumHash;
use reth_chain_state::ExecutedBlockWithTrieUpdates;
use reth_errors::ProviderError;
use reth_ethereum_primitives::EthPrimitives;
use reth_primitives_traits::NodePrimitives;
use reth_provider::{
providers::ProviderNodeTypes, writer::UnifiedStorageWriter, BlockHashReader,
ChainStateBlockWriter, DatabaseProviderFactory, ProviderFactory, StaticFileProviderFactory,
};
use reth_prune::{PrunerError, PrunerOutput, PrunerWithFactory};
use reth_stages_api::{MetricEvent, MetricEventsSender};
use std::{
sync::mpsc::{Receiver, SendError, Sender},
time::Instant,
};
use thiserror::Error;
use tokio::sync::oneshot;
use tracing::{debug, error};
/// Writes parts of reth's in memory tree state to the database and static files.
///
/// This is meant to be a spawned service that listens for various incoming persistence operations,
/// performing those actions on disk, and returning the result in a channel.
///
/// This should be spawned in its own thread with [`std::thread::spawn`], since this performs
/// blocking I/O operations in an endless loop.
#[derive(Debug)]
pub struct PersistenceService<N>
where
N: ProviderNodeTypes,
{
/// The provider factory to use
provider: ProviderFactory<N>,
/// Incoming requests
incoming: Receiver<PersistenceAction<N::Primitives>>,
/// The pruner
pruner: PrunerWithFactory<ProviderFactory<N>>,
/// metrics
metrics: PersistenceMetrics,
/// Sender for sync metrics - we only submit sync metrics for persisted blocks
sync_metrics_tx: MetricEventsSender,
}
impl<N> PersistenceService<N>
where
N: ProviderNodeTypes,
{
/// Create a new persistence service
pub fn new(
provider: ProviderFactory<N>,
incoming: Receiver<PersistenceAction<N::Primitives>>,
pruner: PrunerWithFactory<ProviderFactory<N>>,
sync_metrics_tx: MetricEventsSender,
) -> Self {
Self { provider, incoming, pruner, metrics: PersistenceMetrics::default(), sync_metrics_tx }
}
/// Prunes block data before the given block hash according to the configured prune
/// configuration.
fn prune_before(&mut self, block_num: u64) -> Result<PrunerOutput, PrunerError> {
debug!(target: "engine::persistence", ?block_num, "Running pruner");
let start_time = Instant::now();
// TODO: doing this properly depends on pruner segment changes
let result = self.pruner.run(block_num);
self.metrics.prune_before_duration_seconds.record(start_time.elapsed());
result
}
}
impl<N> PersistenceService<N>
where
N: ProviderNodeTypes,
{
/// This is the main loop, that will listen to database events and perform the requested
/// database actions
pub fn run(mut self) -> Result<(), PersistenceError> {
// If the receiver errors then senders have disconnected, so the loop should then end.
while let Ok(action) = self.incoming.recv() {
match action {
PersistenceAction::RemoveBlocksAbove(new_tip_num, sender) => {
let result = self.on_remove_blocks_above(new_tip_num)?;
// send new sync metrics based on removed blocks
let _ =
self.sync_metrics_tx.send(MetricEvent::SyncHeight { height: new_tip_num });
// we ignore the error because the caller may or may not care about the result
let _ = sender.send(result);
}
PersistenceAction::SaveBlocks(blocks, sender) => {
let result = self.on_save_blocks(blocks)?;
let result_number = result.map(|r| r.number);
// we ignore the error because the caller may or may not care about the result
let _ = sender.send(result);
if let Some(block_number) = result_number {
// send new sync metrics based on saved blocks
let _ = self
.sync_metrics_tx
.send(MetricEvent::SyncHeight { height: block_number });
if self.pruner.is_pruning_needed(block_number) {
// We log `PrunerOutput` inside the `Pruner`
let _ = self.prune_before(block_number)?;
}
}
}
PersistenceAction::SaveFinalizedBlock(finalized_block) => {
let provider = self.provider.database_provider_rw()?;
provider.save_finalized_block_number(finalized_block)?;
provider.commit()?;
}
PersistenceAction::SaveSafeBlock(safe_block) => {
let provider = self.provider.database_provider_rw()?;
provider.save_safe_block_number(safe_block)?;
provider.commit()?;
}
}
}
Ok(())
}
fn on_remove_blocks_above(
&self,
new_tip_num: u64,
) -> Result<Option<BlockNumHash>, PersistenceError> {
debug!(target: "engine::persistence", ?new_tip_num, "Removing blocks");
let start_time = Instant::now();
let provider_rw = self.provider.database_provider_rw()?;
let sf_provider = self.provider.static_file_provider();
let new_tip_hash = provider_rw.block_hash(new_tip_num)?;
UnifiedStorageWriter::from(&provider_rw, &sf_provider).remove_blocks_above(new_tip_num)?;
UnifiedStorageWriter::commit_unwind(provider_rw)?;
debug!(target: "engine::persistence", ?new_tip_num, ?new_tip_hash, "Removed blocks from disk");
self.metrics.remove_blocks_above_duration_seconds.record(start_time.elapsed());
Ok(new_tip_hash.map(|hash| BlockNumHash { hash, number: new_tip_num }))
}
fn on_save_blocks(
&self,
blocks: Vec<ExecutedBlockWithTrieUpdates<N::Primitives>>,
) -> Result<Option<BlockNumHash>, PersistenceError> {
debug!(target: "engine::persistence", first=?blocks.first().map(|b| b.recovered_block.num_hash()), last=?blocks.last().map(|b| b.recovered_block.num_hash()), "Saving range of blocks");
let start_time = Instant::now();
let last_block_hash_num = blocks.last().map(|block| BlockNumHash {
hash: block.recovered_block().hash(),
number: block.recovered_block().header().number(),
});
if last_block_hash_num.is_some() {
let provider_rw = self.provider.database_provider_rw()?;
let static_file_provider = self.provider.static_file_provider();
UnifiedStorageWriter::from(&provider_rw, &static_file_provider).save_blocks(blocks)?;
UnifiedStorageWriter::commit(provider_rw)?;
}
self.metrics.save_blocks_duration_seconds.record(start_time.elapsed());
Ok(last_block_hash_num)
}
}
/// One of the errors that can happen when using the persistence service.
#[derive(Debug, Error)]
pub enum PersistenceError {
/// A pruner error
#[error(transparent)]
PrunerError(#[from] PrunerError),
/// A provider error
#[error(transparent)]
ProviderError(#[from] ProviderError),
}
/// A signal to the persistence service that part of the tree state can be persisted.
#[derive(Debug)]
pub enum PersistenceAction<N: NodePrimitives = EthPrimitives> {
/// The section of tree state that should be persisted. These blocks are expected in order of
/// increasing block number.
///
/// First, header, transaction, and receipt-related data should be written to static files.
/// Then the execution history-related data will be written to the database.
SaveBlocks(Vec<ExecutedBlockWithTrieUpdates<N>>, oneshot::Sender<Option<BlockNumHash>>),
/// Removes block data above the given block number from the database.
///
/// This will first update checkpoints from the database, then remove actual block data from
/// static files.
RemoveBlocksAbove(u64, oneshot::Sender<Option<BlockNumHash>>),
/// Update the persisted finalized block on disk
SaveFinalizedBlock(u64),
/// Update the persisted safe block on disk
SaveSafeBlock(u64),
}
/// A handle to the persistence service
#[derive(Debug, Clone)]
pub struct PersistenceHandle<N: NodePrimitives = EthPrimitives> {
/// The channel used to communicate with the persistence service
sender: Sender<PersistenceAction<N>>,
}
impl<T: NodePrimitives> PersistenceHandle<T> {
/// Create a new [`PersistenceHandle`] from a [`Sender<PersistenceAction>`].
pub const fn new(sender: Sender<PersistenceAction<T>>) -> Self {
Self { sender }
}
/// Create a new [`PersistenceHandle`], and spawn the persistence service.
pub fn spawn_service<N>(
provider_factory: ProviderFactory<N>,
pruner: PrunerWithFactory<ProviderFactory<N>>,
sync_metrics_tx: MetricEventsSender,
) -> PersistenceHandle<N::Primitives>
where
N: ProviderNodeTypes,
{
// create the initial channels
let (db_service_tx, db_service_rx) = std::sync::mpsc::channel();
// construct persistence handle
let persistence_handle = PersistenceHandle::new(db_service_tx);
// spawn the persistence service
let db_service =
PersistenceService::new(provider_factory, db_service_rx, pruner, sync_metrics_tx);
std::thread::Builder::new()
.name("Persistence Service".to_string())
.spawn(|| {
if let Err(err) = db_service.run() {
error!(target: "engine::persistence", ?err, "Persistence service failed");
}
})
.unwrap();
persistence_handle
}
/// Sends a specific [`PersistenceAction`] in the contained channel. The caller is responsible
/// for creating any channels for the given action.
pub fn send_action(
&self,
action: PersistenceAction<T>,
) -> Result<(), SendError<PersistenceAction<T>>> {
self.sender.send(action)
}
/// Tells the persistence service to save a certain list of finalized blocks. The blocks are
/// assumed to be ordered by block number.
///
/// This returns the latest hash that has been saved, allowing removal of that block and any
/// previous blocks from in-memory data structures. This value is returned in the receiver end
/// of the sender argument.
///
/// If there are no blocks to persist, then `None` is sent in the sender.
pub fn save_blocks(
&self,
blocks: Vec<ExecutedBlockWithTrieUpdates<T>>,
tx: oneshot::Sender<Option<BlockNumHash>>,
) -> Result<(), SendError<PersistenceAction<T>>> {
self.send_action(PersistenceAction::SaveBlocks(blocks, tx))
}
/// Persists the finalized block number on disk.
pub fn save_finalized_block_number(
&self,
finalized_block: u64,
) -> Result<(), SendError<PersistenceAction<T>>> {
self.send_action(PersistenceAction::SaveFinalizedBlock(finalized_block))
}
/// Persists the finalized block number on disk.
pub fn save_safe_block_number(
&self,
safe_block: u64,
) -> Result<(), SendError<PersistenceAction<T>>> {
self.send_action(PersistenceAction::SaveSafeBlock(safe_block))
}
/// Tells the persistence service to remove blocks above a certain block number. The removed
/// blocks are returned by the service.
///
/// When the operation completes, the new tip hash is returned in the receiver end of the sender
/// argument.
pub fn remove_blocks_above(
&self,
block_num: u64,
tx: oneshot::Sender<Option<BlockNumHash>>,
) -> Result<(), SendError<PersistenceAction<T>>> {
self.send_action(PersistenceAction::RemoveBlocksAbove(block_num, tx))
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_primitives::B256;
use reth_chain_state::test_utils::TestBlockBuilder;
use reth_exex_types::FinishedExExHeight;
use reth_provider::test_utils::create_test_provider_factory;
use reth_prune::Pruner;
use tokio::sync::mpsc::unbounded_channel;
fn default_persistence_handle() -> PersistenceHandle<EthPrimitives> {
let provider = create_test_provider_factory();
let (_finished_exex_height_tx, finished_exex_height_rx) =
tokio::sync::watch::channel(FinishedExExHeight::NoExExs);
let pruner =
Pruner::new_with_factory(provider.clone(), vec![], 5, 0, None, finished_exex_height_rx);
let (sync_metrics_tx, _sync_metrics_rx) = unbounded_channel();
PersistenceHandle::<EthPrimitives>::spawn_service(provider, pruner, sync_metrics_tx)
}
#[tokio::test]
async fn test_save_blocks_empty() {
reth_tracing::init_test_tracing();
let persistence_handle = default_persistence_handle();
let blocks = vec![];
let (tx, rx) = oneshot::channel();
persistence_handle.save_blocks(blocks, tx).unwrap();
let hash = rx.await.unwrap();
assert_eq!(hash, None);
}
#[tokio::test]
async fn test_save_blocks_single_block() {
reth_tracing::init_test_tracing();
let persistence_handle = default_persistence_handle();
let block_number = 0;
let mut test_block_builder = TestBlockBuilder::eth();
let executed =
test_block_builder.get_executed_block_with_number(block_number, B256::random());
let block_hash = executed.recovered_block().hash();
let blocks = vec![executed];
let (tx, rx) = oneshot::channel();
persistence_handle.save_blocks(blocks, tx).unwrap();
let BlockNumHash { hash: actual_hash, number: _ } =
tokio::time::timeout(std::time::Duration::from_secs(10), rx)
.await
.expect("test timed out")
.expect("channel closed unexpectedly")
.expect("no hash returned");
assert_eq!(block_hash, actual_hash);
}
#[tokio::test]
async fn test_save_blocks_multiple_blocks() {
reth_tracing::init_test_tracing();
let persistence_handle = default_persistence_handle();
let mut test_block_builder = TestBlockBuilder::eth();
let blocks = test_block_builder.get_executed_blocks(0..5).collect::<Vec<_>>();
let last_hash = blocks.last().unwrap().recovered_block().hash();
let (tx, rx) = oneshot::channel();
persistence_handle.save_blocks(blocks, tx).unwrap();
let BlockNumHash { hash: actual_hash, number: _ } = rx.await.unwrap().unwrap();
assert_eq!(last_hash, actual_hash);
}
#[tokio::test]
async fn test_save_blocks_multiple_calls() {
reth_tracing::init_test_tracing();
let persistence_handle = default_persistence_handle();
let ranges = [0..1, 1..2, 2..4, 4..5];
let mut test_block_builder = TestBlockBuilder::eth();
for range in ranges {
let blocks = test_block_builder.get_executed_blocks(range).collect::<Vec<_>>();
let last_hash = blocks.last().unwrap().recovered_block().hash();
let (tx, rx) = oneshot::channel();
persistence_handle.save_blocks(blocks, tx).unwrap();
let BlockNumHash { hash: actual_hash, number: _ } = rx.await.unwrap().unwrap();
assert_eq!(last_hash, actual_hash);
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/tree/src/chain.rs | crates/engine/tree/src/chain.rs | use crate::backfill::{BackfillAction, BackfillEvent, BackfillSync};
use futures::Stream;
use reth_stages_api::{ControlFlow, PipelineTarget};
use std::{
fmt::{Display, Formatter, Result},
pin::Pin,
task::{Context, Poll},
};
use tracing::*;
/// The type that drives the chain forward.
///
/// A state machine that orchestrates the components responsible for advancing the chain
///
///
/// ## Control flow
///
/// The [`ChainOrchestrator`] is responsible for controlling the backfill sync and additional hooks.
/// It polls the given `handler`, which is responsible for advancing the chain, how is up to the
/// handler. However, due to database restrictions (e.g. exclusive write access), following
/// invariants apply:
/// - If the handler requests a backfill run (e.g. [`BackfillAction::Start`]), the handler must
/// ensure that while the backfill sync is running, no other write access is granted.
/// - At any time the [`ChainOrchestrator`] can request exclusive write access to the database
/// (e.g. if pruning is required), but will not do so until the handler has acknowledged the
/// request for write access.
///
/// The [`ChainOrchestrator`] polls the [`ChainHandler`] to advance the chain and handles the
/// emitted events. Requests and events are passed to the [`ChainHandler`] via
/// [`ChainHandler::on_event`].
#[must_use = "Stream does nothing unless polled"]
#[derive(Debug)]
pub struct ChainOrchestrator<T, P>
where
T: ChainHandler,
P: BackfillSync,
{
/// The handler for advancing the chain.
handler: T,
/// Controls backfill sync.
backfill_sync: P,
}
impl<T, P> ChainOrchestrator<T, P>
where
T: ChainHandler + Unpin,
P: BackfillSync + Unpin,
{
/// Creates a new [`ChainOrchestrator`] with the given handler and backfill sync.
pub const fn new(handler: T, backfill_sync: P) -> Self {
Self { handler, backfill_sync }
}
/// Returns the handler
pub const fn handler(&self) -> &T {
&self.handler
}
/// Returns a mutable reference to the handler
pub const fn handler_mut(&mut self) -> &mut T {
&mut self.handler
}
/// Triggers a backfill sync for the __valid__ given target.
///
/// CAUTION: This function should be used with care and with a valid target.
pub fn start_backfill_sync(&mut self, target: impl Into<PipelineTarget>) {
self.backfill_sync.on_action(BackfillAction::Start(target.into()));
}
/// Internal function used to advance the chain.
///
/// Polls the `ChainOrchestrator` for the next event.
#[tracing::instrument(level = "debug", name = "ChainOrchestrator::poll", skip(self, cx))]
fn poll_next_event(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<ChainEvent<T::Event>> {
let this = self.get_mut();
// This loop polls the components
//
// 1. Polls the backfill sync to completion, if active.
// 2. Advances the chain by polling the handler.
'outer: loop {
// try to poll the backfill sync to completion, if active
match this.backfill_sync.poll(cx) {
Poll::Ready(backfill_sync_event) => match backfill_sync_event {
BackfillEvent::Started(_) => {
// notify handler that backfill sync started
this.handler.on_event(FromOrchestrator::BackfillSyncStarted);
return Poll::Ready(ChainEvent::BackfillSyncStarted);
}
BackfillEvent::Finished(res) => {
return match res {
Ok(ctrl) => {
tracing::debug!(?ctrl, "backfill sync finished");
// notify handler that backfill sync finished
this.handler.on_event(FromOrchestrator::BackfillSyncFinished(ctrl));
Poll::Ready(ChainEvent::BackfillSyncFinished)
}
Err(err) => {
tracing::error!( %err, "backfill sync failed");
Poll::Ready(ChainEvent::FatalError)
}
}
}
BackfillEvent::TaskDropped(err) => {
tracing::error!( %err, "backfill sync task dropped");
return Poll::Ready(ChainEvent::FatalError);
}
},
Poll::Pending => {}
}
// poll the handler for the next event
match this.handler.poll(cx) {
Poll::Ready(handler_event) => {
match handler_event {
HandlerEvent::BackfillAction(action) => {
// forward action to backfill_sync
this.backfill_sync.on_action(action);
}
HandlerEvent::Event(ev) => {
// bubble up the event
return Poll::Ready(ChainEvent::Handler(ev));
}
HandlerEvent::FatalError => {
error!(target: "engine::tree", "Fatal error");
return Poll::Ready(ChainEvent::FatalError)
}
}
}
Poll::Pending => {
// no more events to process
break 'outer
}
}
}
Poll::Pending
}
}
impl<T, P> Stream for ChainOrchestrator<T, P>
where
T: ChainHandler + Unpin,
P: BackfillSync + Unpin,
{
type Item = ChainEvent<T::Event>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.as_mut().poll_next_event(cx).map(Some)
}
}
/// Event emitted by the [`ChainOrchestrator`]
///
/// These are meant to be used for observability and debugging purposes.
#[derive(Debug)]
pub enum ChainEvent<T> {
/// Backfill sync started
BackfillSyncStarted,
/// Backfill sync finished
BackfillSyncFinished,
/// Fatal error
FatalError,
/// Event emitted by the handler
Handler(T),
}
impl<T: Display> Display for ChainEvent<T> {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
match self {
Self::BackfillSyncStarted => {
write!(f, "BackfillSyncStarted")
}
Self::BackfillSyncFinished => {
write!(f, "BackfillSyncFinished")
}
Self::FatalError => {
write!(f, "FatalError")
}
Self::Handler(event) => {
write!(f, "Handler({event})")
}
}
}
}
/// A trait that advances the chain by handling actions.
///
/// This is intended to be implement the chain consensus logic, for example `engine` API.
///
/// ## Control flow
///
/// The [`ChainOrchestrator`] is responsible for advancing this handler through
/// [`ChainHandler::poll`] and handling the emitted events, for example
/// [`HandlerEvent::BackfillAction`] to start a backfill sync. Events from the [`ChainOrchestrator`]
/// are passed to the handler via [`ChainHandler::on_event`], e.g.
/// [`FromOrchestrator::BackfillSyncStarted`] once the backfill sync started or finished.
pub trait ChainHandler: Send + Sync {
/// Event generated by this handler that orchestrator can bubble up;
type Event: Send;
/// Informs the handler about an event from the [`ChainOrchestrator`].
fn on_event(&mut self, event: FromOrchestrator);
/// Polls for actions that [`ChainOrchestrator`] should handle.
fn poll(&mut self, cx: &mut Context<'_>) -> Poll<HandlerEvent<Self::Event>>;
}
/// Events/Requests that the [`ChainHandler`] can emit to the [`ChainOrchestrator`].
#[derive(Clone, Debug)]
pub enum HandlerEvent<T> {
/// Request an action to backfill sync
BackfillAction(BackfillAction),
/// Other event emitted by the handler
Event(T),
/// Fatal error
FatalError,
}
/// Internal events issued by the [`ChainOrchestrator`].
#[derive(Clone, Debug)]
pub enum FromOrchestrator {
/// Invoked when backfill sync finished
BackfillSyncFinished(ControlFlow),
/// Invoked when backfill sync started
BackfillSyncStarted,
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/tree/src/backfill.rs | crates/engine/tree/src/backfill.rs | //! It is expected that the node has two sync modes:
//!
//! - Backfill sync: Sync to a certain block height in stages, e.g. download data from p2p then
//! execute that range.
//! - Live sync: In this mode the node is keeping up with the latest tip and listens for new
//! requests from the consensus client.
//!
//! These modes are mutually exclusive and the node can only be in one mode at a time.
use futures::FutureExt;
use reth_provider::providers::ProviderNodeTypes;
use reth_stages_api::{ControlFlow, Pipeline, PipelineError, PipelineTarget, PipelineWithResult};
use reth_tasks::TaskSpawner;
use std::task::{ready, Context, Poll};
use tokio::sync::oneshot;
use tracing::trace;
/// Represents the state of the backfill synchronization process.
#[derive(Debug, PartialEq, Eq, Default)]
pub enum BackfillSyncState {
/// The node is not performing any backfill synchronization.
/// This is the initial or default state.
#[default]
Idle,
/// A backfill synchronization has been requested or planned, but processing has not started
/// yet.
Pending,
/// The node is actively engaged in backfill synchronization.
Active,
}
impl BackfillSyncState {
/// Returns true if the state is idle.
pub const fn is_idle(&self) -> bool {
matches!(self, Self::Idle)
}
/// Returns true if the state is pending.
pub const fn is_pending(&self) -> bool {
matches!(self, Self::Pending)
}
/// Returns true if the state is active.
pub const fn is_active(&self) -> bool {
matches!(self, Self::Active)
}
}
/// Backfill sync mode functionality.
pub trait BackfillSync: Send + Sync {
/// Performs a backfill action.
fn on_action(&mut self, action: BackfillAction);
/// Polls the pipeline for completion.
fn poll(&mut self, cx: &mut Context<'_>) -> Poll<BackfillEvent>;
}
/// The backfill actions that can be performed.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum BackfillAction {
/// Start backfilling with the given target.
Start(PipelineTarget),
}
/// The events that can be emitted on backfill sync.
#[derive(Debug)]
pub enum BackfillEvent {
/// Backfill sync started.
Started(PipelineTarget),
/// Backfill sync finished.
///
/// If this is returned, backfill sync is idle.
Finished(Result<ControlFlow, PipelineError>),
/// Sync task was dropped after it was started, unable to receive it because
/// channel closed. This would indicate a panicked task.
TaskDropped(String),
}
/// Pipeline sync.
#[derive(Debug)]
pub struct PipelineSync<N: ProviderNodeTypes> {
/// The type that can spawn the pipeline task.
pipeline_task_spawner: Box<dyn TaskSpawner>,
/// The current state of the pipeline.
/// The pipeline is used for large ranges.
pipeline_state: PipelineState<N>,
/// Pending target block for the pipeline to sync
pending_pipeline_target: Option<PipelineTarget>,
}
impl<N: ProviderNodeTypes> PipelineSync<N> {
/// Create a new instance.
pub fn new(pipeline: Pipeline<N>, pipeline_task_spawner: Box<dyn TaskSpawner>) -> Self {
Self {
pipeline_task_spawner,
pipeline_state: PipelineState::Idle(Some(Box::new(pipeline))),
pending_pipeline_target: None,
}
}
/// Returns `true` if a pipeline target is queued and will be triggered on the next `poll`.
#[expect(dead_code)]
const fn is_pipeline_sync_pending(&self) -> bool {
self.pending_pipeline_target.is_some() && self.pipeline_state.is_idle()
}
/// Returns `true` if the pipeline is idle.
const fn is_pipeline_idle(&self) -> bool {
self.pipeline_state.is_idle()
}
/// Returns `true` if the pipeline is active.
const fn is_pipeline_active(&self) -> bool {
!self.is_pipeline_idle()
}
/// Sets a new target to sync the pipeline to.
///
/// But ensures the target is not the zero hash.
fn set_pipeline_sync_target(&mut self, target: PipelineTarget) {
if target.sync_target().is_some_and(|target| target.is_zero()) {
trace!(
target: "consensus::engine::sync",
"Pipeline target cannot be zero hash."
);
// precaution to never sync to the zero hash
return
}
self.pending_pipeline_target = Some(target);
}
/// This will spawn the pipeline if it is idle and a target is set or if the pipeline is set to
/// run continuously.
fn try_spawn_pipeline(&mut self) -> Option<BackfillEvent> {
match &mut self.pipeline_state {
PipelineState::Idle(pipeline) => {
let target = self.pending_pipeline_target.take()?;
let (tx, rx) = oneshot::channel();
let pipeline = pipeline.take().expect("exists");
self.pipeline_task_spawner.spawn_critical_blocking(
"pipeline task",
Box::pin(async move {
let result = pipeline.run_as_fut(Some(target)).await;
let _ = tx.send(result);
}),
);
self.pipeline_state = PipelineState::Running(rx);
Some(BackfillEvent::Started(target))
}
PipelineState::Running(_) => None,
}
}
/// Advances the pipeline state.
///
/// This checks for the result in the channel, or returns pending if the pipeline is idle.
fn poll_pipeline(&mut self, cx: &mut Context<'_>) -> Poll<BackfillEvent> {
let res = match self.pipeline_state {
PipelineState::Idle(_) => return Poll::Pending,
PipelineState::Running(ref mut fut) => {
ready!(fut.poll_unpin(cx))
}
};
let ev = match res {
Ok((pipeline, result)) => {
self.pipeline_state = PipelineState::Idle(Some(Box::new(pipeline)));
BackfillEvent::Finished(result)
}
Err(why) => {
// failed to receive the pipeline
BackfillEvent::TaskDropped(why.to_string())
}
};
Poll::Ready(ev)
}
}
impl<N: ProviderNodeTypes> BackfillSync for PipelineSync<N> {
fn on_action(&mut self, event: BackfillAction) {
match event {
BackfillAction::Start(target) => self.set_pipeline_sync_target(target),
}
}
fn poll(&mut self, cx: &mut Context<'_>) -> Poll<BackfillEvent> {
// try to spawn a pipeline if a target is set
if let Some(event) = self.try_spawn_pipeline() {
return Poll::Ready(event)
}
// make sure we poll the pipeline if it's active, and return any ready pipeline events
if self.is_pipeline_active() {
// advance the pipeline
if let Poll::Ready(event) = self.poll_pipeline(cx) {
return Poll::Ready(event)
}
}
Poll::Pending
}
}
/// The possible pipeline states within the sync controller.
///
/// [`PipelineState::Idle`] means that the pipeline is currently idle.
/// [`PipelineState::Running`] means that the pipeline is currently running.
///
/// NOTE: The differentiation between these two states is important, because when the pipeline is
/// running, it acquires the write lock over the database. This means that we cannot forward to the
/// blockchain tree any messages that would result in database writes, since it would result in a
/// deadlock.
#[derive(Debug)]
enum PipelineState<N: ProviderNodeTypes> {
/// Pipeline is idle.
Idle(Option<Box<Pipeline<N>>>),
/// Pipeline is running and waiting for a response
Running(oneshot::Receiver<PipelineWithResult<N>>),
}
impl<N: ProviderNodeTypes> PipelineState<N> {
/// Returns `true` if the state matches idle.
const fn is_idle(&self) -> bool {
matches!(self, Self::Idle(_))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::{insert_headers_into_client, TestPipelineBuilder};
use alloy_consensus::Header;
use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT_30M;
use alloy_primitives::{BlockNumber, B256};
use assert_matches::assert_matches;
use futures::poll;
use reth_chainspec::{ChainSpecBuilder, MAINNET};
use reth_network_p2p::test_utils::TestFullBlockClient;
use reth_primitives_traits::SealedHeader;
use reth_provider::test_utils::MockNodeTypesWithDB;
use reth_stages::ExecOutput;
use reth_stages_api::StageCheckpoint;
use reth_tasks::TokioTaskExecutor;
use std::{collections::VecDeque, future::poll_fn, sync::Arc};
struct TestHarness {
pipeline_sync: PipelineSync<MockNodeTypesWithDB>,
tip: B256,
}
impl TestHarness {
fn new(total_blocks: usize, pipeline_done_after: u64) -> Self {
let chain_spec = Arc::new(
ChainSpecBuilder::default()
.chain(MAINNET.chain)
.genesis(MAINNET.genesis.clone())
.paris_activated()
.build(),
);
// force the pipeline to be "done" after `pipeline_done_after` blocks
let pipeline = TestPipelineBuilder::new()
.with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput {
checkpoint: StageCheckpoint::new(BlockNumber::from(pipeline_done_after)),
done: true,
})]))
.build(chain_spec);
let pipeline_sync = PipelineSync::new(pipeline, Box::<TokioTaskExecutor>::default());
let client = TestFullBlockClient::default();
let header = Header {
base_fee_per_gas: Some(7),
gas_limit: ETHEREUM_BLOCK_GAS_LIMIT_30M,
..Default::default()
};
let header = SealedHeader::seal_slow(header);
insert_headers_into_client(&client, header, 0..total_blocks);
let tip = client.highest_block().expect("there should be blocks here").hash();
Self { pipeline_sync, tip }
}
}
#[tokio::test]
async fn pipeline_started_and_finished() {
const TOTAL_BLOCKS: usize = 10;
const PIPELINE_DONE_AFTER: u64 = 5;
let TestHarness { mut pipeline_sync, tip } =
TestHarness::new(TOTAL_BLOCKS, PIPELINE_DONE_AFTER);
let sync_future = poll_fn(|cx| pipeline_sync.poll(cx));
let next_event = poll!(sync_future);
// sync target not set, pipeline not started
assert_matches!(next_event, Poll::Pending);
pipeline_sync.on_action(BackfillAction::Start(PipelineTarget::Sync(tip)));
let sync_future = poll_fn(|cx| pipeline_sync.poll(cx));
let next_event = poll!(sync_future);
// sync target set, pipeline started
assert_matches!(next_event, Poll::Ready(BackfillEvent::Started(target)) => {
assert_eq!(target.sync_target().unwrap(), tip);
});
// the next event should be the pipeline finishing in a good state
let sync_future = poll_fn(|cx| pipeline_sync.poll(cx));
let next_ready = sync_future.await;
assert_matches!(next_ready, BackfillEvent::Finished(result) => {
assert_matches!(result, Ok(control_flow) => assert_eq!(control_flow, ControlFlow::Continue { block_number: PIPELINE_DONE_AFTER }));
});
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/tree/src/test_utils.rs | crates/engine/tree/src/test_utils.rs | use alloy_primitives::B256;
use reth_chainspec::ChainSpec;
use reth_ethereum_primitives::BlockBody;
use reth_network_p2p::test_utils::TestFullBlockClient;
use reth_primitives_traits::SealedHeader;
use reth_provider::{
test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB},
ExecutionOutcome,
};
use reth_prune_types::PruneModes;
use reth_stages::{test_utils::TestStages, ExecOutput, StageError};
use reth_stages_api::Pipeline;
use reth_static_file::StaticFileProducer;
use std::{collections::VecDeque, ops::Range, sync::Arc};
use tokio::sync::watch;
/// Test pipeline builder.
#[derive(Default, Debug)]
pub struct TestPipelineBuilder {
pipeline_exec_outputs: VecDeque<Result<ExecOutput, StageError>>,
executor_results: Vec<ExecutionOutcome>,
}
impl TestPipelineBuilder {
/// Create a new [`TestPipelineBuilder`].
pub const fn new() -> Self {
Self { pipeline_exec_outputs: VecDeque::new(), executor_results: Vec::new() }
}
/// Set the pipeline execution outputs to use for the test consensus engine.
pub fn with_pipeline_exec_outputs(
mut self,
pipeline_exec_outputs: VecDeque<Result<ExecOutput, StageError>>,
) -> Self {
self.pipeline_exec_outputs = pipeline_exec_outputs;
self
}
/// Set the executor results to use for the test consensus engine.
pub fn with_executor_results(mut self, executor_results: Vec<ExecutionOutcome>) -> Self {
self.executor_results = executor_results;
self
}
/// Builds the pipeline.
pub fn build(self, chain_spec: Arc<ChainSpec>) -> Pipeline<MockNodeTypesWithDB> {
reth_tracing::init_test_tracing();
// Setup pipeline
let (tip_tx, _tip_rx) = watch::channel(B256::default());
let pipeline = Pipeline::<MockNodeTypesWithDB>::builder()
.add_stages(TestStages::new(self.pipeline_exec_outputs, Default::default()))
.with_tip_sender(tip_tx);
let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec);
let static_file_producer =
StaticFileProducer::new(provider_factory.clone(), PruneModes::default());
pipeline.build(provider_factory, static_file_producer)
}
}
/// Starting from the given genesis header, inserts headers from the given
/// range in the given test full block client.
pub fn insert_headers_into_client(
client: &TestFullBlockClient,
genesis_header: SealedHeader,
range: Range<usize>,
) {
let mut sealed_header = genesis_header;
let body = BlockBody::default();
for _ in range {
let (mut header, hash) = sealed_header.split();
// update to the next header
header.parent_hash = hash;
header.number += 1;
header.timestamp += 1;
sealed_header = SealedHeader::seal_slow(header);
client.insert(sealed_header.clone(), body.clone());
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/tree/src/backup.rs | crates/engine/tree/src/backup.rs | //! reth's database backup functionality
use alloy_eips::BlockNumHash;
use reth_errors::ProviderError;
use reth_node_core::dirs::{ChainPath, DataDirPath};
use std::{
path::PathBuf,
sync::mpsc::{Receiver, Sender},
time::Instant,
};
use thiserror::Error;
use tokio::sync::oneshot;
use tracing::*;
/// Configuration for the backup service
#[derive(Debug, Clone)]
pub struct BackupConfig {
/// Source directory to backup
pub source_dir: PathBuf,
/// Destination directory for backups
pub dest_dir: PathBuf,
}
/// Service that handles database backups based on block events
#[derive(Debug)]
pub struct BackupService {
/// Incoming backup requests
incoming: Receiver<BackupAction>,
/// The data directory for the engine tree.
data_dir: ChainPath<DataDirPath>,
}
/// A signal to the backup service that a backup should be performed.
#[derive(Debug)]
pub enum BackupAction {
/// Perform a backup at the given block number.
BackupAtBlock(BlockNumHash, oneshot::Sender<Option<BlockNumHash>>),
}
impl BackupService {
/// Create a new backup service
pub fn new(incoming: Receiver<BackupAction>, data_dir: ChainPath<DataDirPath>) -> Self {
Self { incoming, data_dir }
}
/// Main loop that processes backup actions
pub fn run(self) -> Result<(), ProviderError> {
debug!(target: "engine::backup", service=?self, "Backup service starting to run");
while let Ok(action) = self.incoming.recv() {
debug!(target: "engine::backup", action=?action, "Backup service received action");
match action {
BackupAction::BackupAtBlock(block_number, sender) => {
let result = self.perform_backup(block_number);
if let Err(e) = result {
error!(target: "engine::backup", ?e, "Backup failed");
let _ = sender.send(None);
} else {
let _ = sender.send(Some(block_number));
}
}
}
}
Ok(())
}
/// Perform the actual backup operation
fn perform_backup(&self, block_number: BlockNumHash) -> Result<(), ProviderError> {
debug!(target: "engine::backup", ?block_number, "Starting backup");
let backup_path = PathBuf::from(format!("{}_backup", self.data_dir.data_dir().display(),));
// Perform the actual backup using the provider
BackupService::backup_dir(&PathBuf::from(self.data_dir.data_dir()), &backup_path)?;
info!(
target: "engine::backup",
?block_number,
"Backup completed successfully"
);
Ok(())
}
/// Recursively copies the source directory to the destination directory.
///
/// This function uses asynchronous file operations to perform the backup.
///
/// # Arguments
///
/// * `source` - The source directory to backup.
/// * `destination` - The destination directory where the backup will be stored.
///
/// # Returns
///
/// * `Ok(())` if the backup is successful.
/// * `Err(anyhow::Error)` if an error occurs during the backup.
pub fn backup_dir(source: &PathBuf, destination: &PathBuf) -> Result<(), ProviderError> {
debug!(target: "engine::backup", ?source, ?destination);
let source_path = source.as_path();
let destination_path = destination.as_path();
// Retrieve the metadata of the source path
let metadata = std::fs::metadata(source_path)
.expect(&format!("Failed to access source path: {} ", source_path.display(),));
// If the source is a directory, create the destination directory if it does not exist
if metadata.is_dir() {
if !destination_path.exists() {
std::fs::create_dir_all(destination_path)
.expect(&format!("Failed to create destination directory"));
}
// Stack to manage recursive copying
let mut entries_stack =
vec![(source_path.to_path_buf(), destination_path.to_path_buf())];
while let Some((current_src, current_dst)) = entries_stack.pop() {
let mut entries = std::fs::read_dir(¤t_src)
.expect(&format!("Failed to read directory {}", current_src.display(),));
while let Some(entry) =
entries.next().transpose().expect(&format!("Failed to get diredctory entry"))
{
let entry_path = entry.path();
let entry_name = entry.file_name();
let dst_path = current_dst.join(&entry_name);
let entry_metadata =
entry.metadata().expect(&format!("Failed to get diredctory entry"));
if entry_metadata.is_dir() {
if !dst_path.exists() {
std::fs::create_dir_all(&dst_path).expect(&format!(
"Failed to create directory {}",
dst_path.display(),
));
}
entries_stack.push((entry_path, dst_path));
} else {
std::fs::copy(&entry_path, &dst_path).expect(&format!(
"Failed to copy file from {} to {}",
entry_path.display(),
dst_path.display(),
));
}
}
}
} else {
// If the source is a file, copy it directly, creating parent directories if necessary
if let Some(parent) = destination_path.parent() {
if !parent.exists() {
std::fs::create_dir_all(parent)
.expect(
&format!("Failed to create parent directory {}", parent.display(),),
);
}
}
std::fs::copy(source_path, destination_path).expect(&format!(
"Failed to copy file from {} to {}",
source_path.display(),
destination_path.display(),
));
}
Ok(())
}
}
/// Errors that can occur during backup operations
#[derive(Debug, Error)]
pub enum BackupError {
/// IO error
#[error(transparent)]
Io(#[from] std::io::Error),
/// Provider error
#[error(transparent)]
Provider(#[from] reth_provider::ProviderError),
}
/// Handle to interact with the backup service
#[derive(Debug)]
pub struct BackupHandle {
/// The sender for backup actions
pub sender: Sender<BackupAction>,
/// The receiver from backup service
pub rx: Option<(oneshot::Receiver<Option<BlockNumHash>>, Instant)>,
/// The latest backup block number
pub latest_backup_block: BlockNumHash,
}
impl BackupHandle {
/// Create a new backup handle
pub fn new(sender: Sender<BackupAction>) -> Self {
Self { sender, rx: None, latest_backup_block: BlockNumHash::default() }
}
/// Spawn a new backup service
pub fn spawn_service(data_dir: ChainPath<DataDirPath>) -> BackupHandle {
let (tx, rx) = std::sync::mpsc::channel();
let handle = BackupHandle::new(tx);
let service = BackupService::new(rx, data_dir);
std::thread::Builder::new()
.name("Backup Service".to_string())
.spawn(move || {
if let Err(err) = service.run() {
error!(target: "engine::backup", ?err, "Backup service failed");
}
})
.unwrap();
handle
}
/// Checks if a backup is currently in progress.
pub fn in_progress(&self) -> bool {
self.rx.is_some()
}
/// Sets state for a started backup task.
pub(crate) fn start(&mut self, rx: oneshot::Receiver<Option<BlockNumHash>>) {
self.rx = Some((rx, Instant::now()));
}
/// Sets state for a finished backup task.
pub fn finish(&mut self, block_number: BlockNumHash) {
self.latest_backup_block = block_number;
self.rx = None;
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/tree/src/metrics.rs | crates/engine/tree/src/metrics.rs | use reth_metrics::{
metrics::{Gauge, Histogram},
Metrics,
};
/// Metrics for the `BasicBlockDownloader`.
#[derive(Metrics)]
#[metrics(scope = "consensus.engine.beacon")]
pub(crate) struct BlockDownloaderMetrics {
/// How many blocks are currently being downloaded.
pub(crate) active_block_downloads: Gauge,
}
/// Metrics for the `PersistenceService`
#[derive(Metrics)]
#[metrics(scope = "consensus.engine.persistence")]
pub(crate) struct PersistenceMetrics {
/// How long it took for blocks to be removed
pub(crate) remove_blocks_above_duration_seconds: Histogram,
/// How long it took for blocks to be saved
pub(crate) save_blocks_duration_seconds: Histogram,
/// How long it took for blocks to be pruned
pub(crate) prune_before_duration_seconds: Histogram,
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/tree/src/tree/trie_updates.rs | crates/engine/tree/src/tree/trie_updates.rs | use alloy_primitives::{map::HashMap, B256};
use reth_db::DatabaseError;
use reth_trie::{
trie_cursor::{TrieCursor, TrieCursorFactory},
updates::{StorageTrieUpdates, TrieUpdates},
BranchNodeCompact, Nibbles,
};
use std::collections::BTreeSet;
use tracing::warn;
#[derive(Debug)]
struct EntryDiff<T> {
task: T,
regular: T,
database: T,
}
#[derive(Debug, Default)]
struct TrieUpdatesDiff {
account_nodes: HashMap<Nibbles, EntryDiff<Option<BranchNodeCompact>>>,
removed_nodes: HashMap<Nibbles, EntryDiff<bool>>,
storage_tries: HashMap<B256, StorageTrieUpdatesDiff>,
}
impl TrieUpdatesDiff {
fn has_differences(&self) -> bool {
!self.account_nodes.is_empty() ||
!self.removed_nodes.is_empty() ||
!self.storage_tries.is_empty()
}
pub(super) fn log_differences(mut self) {
if self.has_differences() {
for (path, EntryDiff { task, regular, database }) in &mut self.account_nodes {
warn!(target: "engine::tree", ?path, ?task, ?regular, ?database, "Difference in account trie updates");
}
for (
path,
EntryDiff {
task: task_removed,
regular: regular_removed,
database: database_not_exists,
},
) in &self.removed_nodes
{
warn!(target: "engine::tree", ?path, ?task_removed, ?regular_removed, ?database_not_exists, "Difference in removed account trie nodes");
}
for (address, storage_diff) in self.storage_tries {
storage_diff.log_differences(address);
}
}
}
}
#[derive(Debug, Default)]
struct StorageTrieUpdatesDiff {
is_deleted: Option<EntryDiff<bool>>,
storage_nodes: HashMap<Nibbles, EntryDiff<Option<BranchNodeCompact>>>,
removed_nodes: HashMap<Nibbles, EntryDiff<bool>>,
}
impl StorageTrieUpdatesDiff {
fn has_differences(&self) -> bool {
self.is_deleted.is_some() ||
!self.storage_nodes.is_empty() ||
!self.removed_nodes.is_empty()
}
fn log_differences(&self, address: B256) {
if let Some(EntryDiff {
task: task_deleted,
regular: regular_deleted,
database: database_not_exists,
}) = self.is_deleted
{
warn!(target: "engine::tree", ?address, ?task_deleted, ?regular_deleted, ?database_not_exists, "Difference in storage trie deletion");
}
for (path, EntryDiff { task, regular, database }) in &self.storage_nodes {
warn!(target: "engine::tree", ?address, ?path, ?task, ?regular, ?database, "Difference in storage trie updates");
}
for (
path,
EntryDiff {
task: task_removed,
regular: regular_removed,
database: database_not_exists,
},
) in &self.removed_nodes
{
warn!(target: "engine::tree", ?address, ?path, ?task_removed, ?regular_removed, ?database_not_exists, "Difference in removed storage trie nodes");
}
}
}
/// Compares the trie updates from state root task, regular state root calculation and database,
/// and logs the differences if there's any.
pub(super) fn compare_trie_updates(
trie_cursor_factory: impl TrieCursorFactory,
task: TrieUpdates,
regular: TrieUpdates,
) -> Result<(), DatabaseError> {
let mut task = adjust_trie_updates(task);
let mut regular = adjust_trie_updates(regular);
let mut diff = TrieUpdatesDiff::default();
// compare account nodes
let mut account_trie_cursor = trie_cursor_factory.account_trie_cursor()?;
for key in task
.account_nodes
.keys()
.chain(regular.account_nodes.keys())
.copied()
.collect::<BTreeSet<_>>()
{
let (task, regular) = (task.account_nodes.remove(&key), regular.account_nodes.remove(&key));
let database = account_trie_cursor.seek_exact(key)?.map(|x| x.1);
if !branch_nodes_equal(task.as_ref(), regular.as_ref(), database.as_ref())? {
diff.account_nodes.insert(key, EntryDiff { task, regular, database });
}
}
// compare removed nodes
let mut account_trie_cursor = trie_cursor_factory.account_trie_cursor()?;
for key in task
.removed_nodes
.iter()
.chain(regular.removed_nodes.iter())
.copied()
.collect::<BTreeSet<_>>()
{
let (task_removed, regular_removed) =
(task.removed_nodes.contains(&key), regular.removed_nodes.contains(&key));
let database_not_exists = account_trie_cursor.seek_exact(key)?.is_none();
// If the deletion is a no-op, meaning that the entry is not in the
// database, do not add it to the diff.
if task_removed != regular_removed && !database_not_exists {
diff.removed_nodes.insert(
key,
EntryDiff {
task: task_removed,
regular: regular_removed,
database: database_not_exists,
},
);
}
}
// compare storage tries
for key in task
.storage_tries
.keys()
.chain(regular.storage_tries.keys())
.copied()
.collect::<BTreeSet<_>>()
{
let (mut task, mut regular) =
(task.storage_tries.remove(&key), regular.storage_tries.remove(&key));
if task != regular {
#[expect(clippy::or_fun_call)]
let storage_diff = compare_storage_trie_updates(
|| trie_cursor_factory.storage_trie_cursor(key),
// Compare non-existent storage tries as empty.
task.as_mut().unwrap_or(&mut Default::default()),
regular.as_mut().unwrap_or(&mut Default::default()),
)?;
if storage_diff.has_differences() {
diff.storage_tries.insert(key, storage_diff);
}
}
}
// log differences
diff.log_differences();
Ok(())
}
fn compare_storage_trie_updates<C: TrieCursor>(
trie_cursor: impl Fn() -> Result<C, DatabaseError>,
task: &mut StorageTrieUpdates,
regular: &mut StorageTrieUpdates,
) -> Result<StorageTrieUpdatesDiff, DatabaseError> {
let database_not_exists = trie_cursor()?.next()?.is_none();
let mut diff = StorageTrieUpdatesDiff {
// If the deletion is a no-op, meaning that the entry is not in the
// database, do not add it to the diff.
is_deleted: (task.is_deleted != regular.is_deleted && !database_not_exists).then_some(
EntryDiff {
task: task.is_deleted,
regular: regular.is_deleted,
database: database_not_exists,
},
),
..Default::default()
};
// compare storage nodes
let mut storage_trie_cursor = trie_cursor()?;
for key in task
.storage_nodes
.keys()
.chain(regular.storage_nodes.keys())
.copied()
.collect::<BTreeSet<_>>()
{
let (task, regular) = (task.storage_nodes.remove(&key), regular.storage_nodes.remove(&key));
let database = storage_trie_cursor.seek_exact(key)?.map(|x| x.1);
if !branch_nodes_equal(task.as_ref(), regular.as_ref(), database.as_ref())? {
diff.storage_nodes.insert(key, EntryDiff { task, regular, database });
}
}
// compare removed nodes
let mut storage_trie_cursor = trie_cursor()?;
for key in
task.removed_nodes.iter().chain(regular.removed_nodes.iter()).collect::<BTreeSet<_>>()
{
let (task_removed, regular_removed) =
(task.removed_nodes.contains(key), regular.removed_nodes.contains(key));
if task_removed == regular_removed {
continue;
}
let database_not_exists = storage_trie_cursor.seek_exact(*key)?.map(|x| x.1).is_none();
// If the deletion is a no-op, meaning that the entry is not in the
// database, do not add it to the diff.
if !database_not_exists {
diff.removed_nodes.insert(
*key,
EntryDiff {
task: task_removed,
regular: regular_removed,
database: database_not_exists,
},
);
}
}
Ok(diff)
}
/// Filters the removed nodes of both account trie updates and storage trie updates, so that they
/// don't include those nodes that were also updated.
fn adjust_trie_updates(trie_updates: TrieUpdates) -> TrieUpdates {
TrieUpdates {
removed_nodes: trie_updates
.removed_nodes
.into_iter()
.filter(|key| !trie_updates.account_nodes.contains_key(key))
.collect(),
storage_tries: trie_updates
.storage_tries
.into_iter()
.map(|(address, updates)| {
(
address,
StorageTrieUpdates {
removed_nodes: updates
.removed_nodes
.into_iter()
.filter(|key| !updates.storage_nodes.contains_key(key))
.collect(),
..updates
},
)
})
.collect(),
..trie_updates
}
}
/// Compares the branch nodes from state root task and regular state root calculation.
///
/// If one of the branch nodes is [`None`], it means it's not updated and the other is compared to
/// the branch node from the database.
///
/// Returns `true` if they are equal.
fn branch_nodes_equal(
task: Option<&BranchNodeCompact>,
regular: Option<&BranchNodeCompact>,
database: Option<&BranchNodeCompact>,
) -> Result<bool, DatabaseError> {
Ok(match (task, regular) {
(Some(task), Some(regular)) => {
task.state_mask == regular.state_mask &&
task.tree_mask == regular.tree_mask &&
task.hash_mask == regular.hash_mask &&
task.hashes == regular.hashes &&
task.root_hash == regular.root_hash
}
(None, None) => true,
_ => {
if task.is_some() {
task == database
} else {
regular == database
}
}
})
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/tree/src/tree/tests.rs | crates/engine/tree/src/tree/tests.rs | use super::*;
use crate::{backup::BackupHandle, persistence::PersistenceAction};
use alloy_consensus::Header;
use alloy_primitives::{
map::{HashMap, HashSet},
Bytes, B256,
};
use alloy_rlp::Decodable;
use alloy_rpc_types_engine::{ExecutionData, ExecutionPayloadSidecar, ExecutionPayloadV1};
use assert_matches::assert_matches;
use reth_chain_state::{test_utils::TestBlockBuilder, BlockState};
use reth_chainspec::{ChainSpec, HOLESKY, MAINNET};
use reth_engine_primitives::{EngineApiValidator, ForkchoiceStatus, NoopInvalidBlockHook};
use reth_ethereum_consensus::EthBeaconConsensus;
use reth_ethereum_engine_primitives::EthEngineTypes;
use reth_ethereum_primitives::{Block, EthPrimitives};
use reth_evm_ethereum::MockEvmConfig;
use reth_primitives_traits::Block as _;
use reth_provider::{test_utils::MockEthProvider, ExecutionOutcome};
use reth_trie::HashedPostState;
use std::{
collections::BTreeMap,
str::FromStr,
sync::mpsc::{channel, Sender},
};
use tokio::sync::oneshot;
/// Mock engine validator for tests
#[derive(Debug, Clone)]
struct MockEngineValidator;
impl reth_engine_primitives::PayloadValidator<EthEngineTypes> for MockEngineValidator {
type Block = Block;
fn ensure_well_formed_payload(
&self,
payload: ExecutionData,
) -> Result<
reth_primitives_traits::RecoveredBlock<Self::Block>,
reth_payload_primitives::NewPayloadError,
> {
// For tests, convert the execution payload to a block
let block = reth_ethereum_primitives::Block::try_from(payload.payload).map_err(|e| {
reth_payload_primitives::NewPayloadError::Other(format!("{e:?}").into())
})?;
let sealed = block.seal_slow();
sealed.try_recover().map_err(|e| reth_payload_primitives::NewPayloadError::Other(e.into()))
}
}
impl EngineApiValidator<EthEngineTypes> for MockEngineValidator {
fn validate_version_specific_fields(
&self,
_version: reth_payload_primitives::EngineApiMessageVersion,
_payload_or_attrs: reth_payload_primitives::PayloadOrAttributes<
'_,
alloy_rpc_types_engine::ExecutionData,
alloy_rpc_types_engine::PayloadAttributes,
>,
) -> Result<(), reth_payload_primitives::EngineObjectValidationError> {
// Mock implementation - always valid
Ok(())
}
fn ensure_well_formed_attributes(
&self,
_version: reth_payload_primitives::EngineApiMessageVersion,
_attributes: &alloy_rpc_types_engine::PayloadAttributes,
) -> Result<(), reth_payload_primitives::EngineObjectValidationError> {
// Mock implementation - always valid
Ok(())
}
}
/// This is a test channel that allows you to `release` any value that is in the channel.
///
/// If nothing has been sent, then the next value will be immediately sent.
struct TestChannel<T> {
/// If an item is sent to this channel, an item will be released in the wrapped channel
release: Receiver<()>,
/// The sender channel
tx: Sender<T>,
/// The receiver channel
rx: Receiver<T>,
}
impl<T: Send + 'static> TestChannel<T> {
/// Creates a new test channel
fn spawn_channel() -> (Sender<T>, Receiver<T>, TestChannelHandle) {
let (original_tx, original_rx) = channel();
let (wrapped_tx, wrapped_rx) = channel();
let (release_tx, release_rx) = channel();
let handle = TestChannelHandle::new(release_tx);
let test_channel = Self { release: release_rx, tx: wrapped_tx, rx: original_rx };
// spawn the task that listens and releases stuff
std::thread::spawn(move || test_channel.intercept_loop());
(original_tx, wrapped_rx, handle)
}
/// Runs the intercept loop, waiting for the handle to release a value
fn intercept_loop(&self) {
while self.release.recv() == Ok(()) {
let Ok(value) = self.rx.recv() else { return };
let _ = self.tx.send(value);
}
}
}
struct TestChannelHandle {
/// The sender to use for releasing values
release: Sender<()>,
}
impl TestChannelHandle {
/// Returns a [`TestChannelHandle`]
const fn new(release: Sender<()>) -> Self {
Self { release }
}
/// Signals to the channel task that a value should be released
#[expect(dead_code)]
fn release(&self) {
let _ = self.release.send(());
}
}
struct TestHarness {
tree: EngineApiTreeHandler<
EthPrimitives,
MockEthProvider,
EthEngineTypes,
BasicEngineValidator<MockEthProvider, MockEvmConfig, MockEngineValidator>,
MockEvmConfig,
>,
to_tree_tx: Sender<FromEngine<EngineApiRequest<EthEngineTypes, EthPrimitives>, Block>>,
from_tree_rx: UnboundedReceiver<EngineApiEvent>,
blocks: Vec<ExecutedBlockWithTrieUpdates>,
action_rx: Receiver<PersistenceAction>,
block_builder: TestBlockBuilder,
provider: MockEthProvider,
}
impl TestHarness {
fn new(chain_spec: Arc<ChainSpec>) -> Self {
let (action_tx, action_rx) = channel();
Self::with_persistence_channel(chain_spec, action_tx, action_rx)
}
#[expect(dead_code)]
fn with_test_channel(chain_spec: Arc<ChainSpec>) -> (Self, TestChannelHandle) {
let (action_tx, action_rx, handle) = TestChannel::spawn_channel();
(Self::with_persistence_channel(chain_spec, action_tx, action_rx), handle)
}
fn with_persistence_channel(
chain_spec: Arc<ChainSpec>,
action_tx: Sender<PersistenceAction>,
action_rx: Receiver<PersistenceAction>,
) -> Self {
let persistence_handle = PersistenceHandle::new(action_tx);
let consensus = Arc::new(EthBeaconConsensus::new(chain_spec.clone()));
let provider = MockEthProvider::default();
let payload_validator = MockEngineValidator;
let (from_tree_tx, from_tree_rx) = unbounded_channel();
let header = chain_spec.genesis_header().clone();
let header = SealedHeader::seal_slow(header);
let engine_api_tree_state =
EngineApiTreeState::new(10, 10, header.num_hash(), EngineApiKind::Ethereum);
let canonical_in_memory_state = CanonicalInMemoryState::with_head(header, None, None);
let (to_payload_service, _payload_command_rx) = unbounded_channel();
let payload_builder = PayloadBuilderHandle::new(to_payload_service);
let evm_config = MockEvmConfig::default();
let engine_validator = BasicEngineValidator::new(
provider.clone(),
consensus.clone(),
evm_config.clone(),
payload_validator,
TreeConfig::default(),
Box::new(NoopInvalidBlockHook::default()),
);
let (backup_tx, _backup_rx) = channel();
let backup_handle = BackupHandle::new(backup_tx);
let tree = EngineApiTreeHandler::new(
provider.clone(),
consensus,
engine_validator,
from_tree_tx,
engine_api_tree_state,
canonical_in_memory_state,
persistence_handle,
PersistenceState::default(),
payload_builder,
// always assume enough parallelism for tests
TreeConfig::default().with_legacy_state_root(false).with_has_enough_parallelism(true),
EngineApiKind::Ethereum,
evm_config,
backup_handle,
);
let block_builder = TestBlockBuilder::default().with_chain_spec((*chain_spec).clone());
Self {
to_tree_tx: tree.incoming_tx.clone(),
tree,
from_tree_rx,
blocks: vec![],
action_rx,
block_builder,
provider,
}
}
fn with_blocks(mut self, blocks: Vec<ExecutedBlockWithTrieUpdates>) -> Self {
let mut blocks_by_hash = HashMap::default();
let mut blocks_by_number = BTreeMap::new();
let mut state_by_hash = HashMap::default();
let mut hash_by_number = BTreeMap::new();
let mut parent_to_child: HashMap<B256, HashSet<B256>> = HashMap::default();
let mut parent_hash = B256::ZERO;
for block in &blocks {
let sealed_block = block.recovered_block();
let hash = sealed_block.hash();
let number = sealed_block.number;
blocks_by_hash.insert(hash, block.clone());
blocks_by_number.entry(number).or_insert_with(Vec::new).push(block.clone());
state_by_hash.insert(hash, Arc::new(BlockState::new(block.clone())));
hash_by_number.insert(number, hash);
parent_to_child.entry(parent_hash).or_default().insert(hash);
parent_hash = hash;
}
self.tree.state.tree_state = TreeState {
blocks_by_hash,
blocks_by_number,
current_canonical_head: blocks.last().unwrap().recovered_block().num_hash(),
parent_to_child,
persisted_trie_updates: HashMap::default(),
engine_kind: EngineApiKind::Ethereum,
};
let last_executed_block = blocks.last().unwrap().clone();
let pending = Some(BlockState::new(last_executed_block));
self.tree.canonical_in_memory_state =
CanonicalInMemoryState::new(state_by_hash, hash_by_number, pending, None, None);
self.blocks = blocks.clone();
let recovered_blocks =
blocks.iter().map(|b| b.recovered_block().clone()).collect::<Vec<_>>();
self.persist_blocks(recovered_blocks);
self
}
const fn with_backfill_state(mut self, state: BackfillSyncState) -> Self {
self.tree.backfill_sync_state = state;
self
}
async fn fcu_to(&mut self, block_hash: B256, fcu_status: impl Into<ForkchoiceStatus>) {
let fcu_status = fcu_status.into();
self.send_fcu(block_hash, fcu_status).await;
self.check_fcu(block_hash, fcu_status).await;
}
async fn send_fcu(&mut self, block_hash: B256, fcu_status: impl Into<ForkchoiceStatus>) {
let fcu_state = self.fcu_state(block_hash);
let (tx, rx) = oneshot::channel();
self.tree
.on_engine_message(FromEngine::Request(
BeaconEngineMessage::ForkchoiceUpdated {
state: fcu_state,
payload_attrs: None,
tx,
version: EngineApiMessageVersion::default(),
}
.into(),
))
.unwrap();
let response = rx.await.unwrap().unwrap().await.unwrap();
match fcu_status.into() {
ForkchoiceStatus::Valid => assert!(response.payload_status.is_valid()),
ForkchoiceStatus::Syncing => assert!(response.payload_status.is_syncing()),
ForkchoiceStatus::Invalid => assert!(response.payload_status.is_invalid()),
}
}
async fn check_fcu(&mut self, block_hash: B256, fcu_status: impl Into<ForkchoiceStatus>) {
let fcu_state = self.fcu_state(block_hash);
// check for ForkchoiceUpdated event
let event = self.from_tree_rx.recv().await.unwrap();
match event {
EngineApiEvent::BeaconConsensus(ConsensusEngineEvent::ForkchoiceUpdated(
state,
status,
)) => {
assert_eq!(state, fcu_state);
assert_eq!(status, fcu_status.into());
}
_ => panic!("Unexpected event: {event:#?}"),
}
}
const fn fcu_state(&self, block_hash: B256) -> ForkchoiceState {
ForkchoiceState {
head_block_hash: block_hash,
safe_block_hash: block_hash,
finalized_block_hash: block_hash,
}
}
fn persist_blocks(&self, blocks: Vec<RecoveredBlock<reth_ethereum_primitives::Block>>) {
let mut block_data: Vec<(B256, Block)> = Vec::with_capacity(blocks.len());
let mut headers_data: Vec<(B256, Header)> = Vec::with_capacity(blocks.len());
for block in &blocks {
block_data.push((block.hash(), block.clone_block()));
headers_data.push((block.hash(), block.header().clone()));
}
self.provider.extend_blocks(block_data);
self.provider.extend_headers(headers_data);
}
}
#[test]
fn test_tree_persist_block_batch() {
let tree_config = TreeConfig::default();
let chain_spec = MAINNET.clone();
let mut test_block_builder = TestBlockBuilder::eth().with_chain_spec((*chain_spec).clone());
// we need more than tree_config.persistence_threshold() +1 blocks to
// trigger the persistence task.
let blocks: Vec<_> = test_block_builder
.get_executed_blocks(1..tree_config.persistence_threshold() + 2)
.collect();
let mut test_harness = TestHarness::new(chain_spec).with_blocks(blocks);
let mut blocks = vec![];
for idx in 0..tree_config.max_execute_block_batch_size() * 2 {
blocks.push(test_block_builder.generate_random_block(idx as u64, B256::random()));
}
test_harness.to_tree_tx.send(FromEngine::DownloadedBlocks(blocks)).unwrap();
// process the message
let msg = test_harness.tree.try_recv_engine_message().unwrap().unwrap();
test_harness.tree.on_engine_message(msg).unwrap();
// we now should receive the other batch
let msg = test_harness.tree.try_recv_engine_message().unwrap().unwrap();
match msg {
FromEngine::DownloadedBlocks(blocks) => {
assert_eq!(blocks.len(), tree_config.max_execute_block_batch_size());
}
_ => panic!("unexpected message: {msg:#?}"),
}
}
#[tokio::test]
async fn test_tree_persist_blocks() {
let tree_config = TreeConfig::default();
let chain_spec = MAINNET.clone();
let mut test_block_builder = TestBlockBuilder::eth().with_chain_spec((*chain_spec).clone());
// we need more than tree_config.persistence_threshold() +1 blocks to
// trigger the persistence task.
let blocks: Vec<_> = test_block_builder
.get_executed_blocks(1..tree_config.persistence_threshold() + 2)
.collect();
let test_harness = TestHarness::new(chain_spec).with_blocks(blocks.clone());
std::thread::Builder::new()
.name("Engine Task".to_string())
.spawn(|| test_harness.tree.run())
.unwrap();
// send a message to the tree to enter the main loop.
test_harness.to_tree_tx.send(FromEngine::DownloadedBlocks(vec![])).unwrap();
let received_action =
test_harness.action_rx.recv().expect("Failed to receive save blocks action");
if let PersistenceAction::SaveBlocks(saved_blocks, _) = received_action {
// only blocks.len() - tree_config.memory_block_buffer_target() will be
// persisted
let expected_persist_len = blocks.len() - tree_config.memory_block_buffer_target() as usize;
assert_eq!(saved_blocks.len(), expected_persist_len);
assert_eq!(saved_blocks, blocks[..expected_persist_len]);
} else {
panic!("unexpected action received {received_action:?}");
}
}
#[tokio::test]
async fn test_in_memory_state_trait_impl() {
let blocks: Vec<_> = TestBlockBuilder::eth().get_executed_blocks(0..10).collect();
let test_harness = TestHarness::new(MAINNET.clone()).with_blocks(blocks.clone());
for executed_block in blocks {
let sealed_block = executed_block.recovered_block();
let expected_state = BlockState::new(executed_block.clone());
let actual_state_by_hash =
test_harness.tree.canonical_in_memory_state.state_by_hash(sealed_block.hash()).unwrap();
assert_eq!(expected_state, *actual_state_by_hash);
let actual_state_by_number = test_harness
.tree
.canonical_in_memory_state
.state_by_number(sealed_block.number)
.unwrap();
assert_eq!(expected_state, *actual_state_by_number);
}
}
#[tokio::test]
#[ignore = "We have persistence threshold set to 0 for snapshot purposes so this test no longer works or serves a purpose"]
async fn test_engine_request_during_backfill() {
let tree_config = TreeConfig::default();
let blocks: Vec<_> = TestBlockBuilder::eth()
.get_executed_blocks(0..tree_config.persistence_threshold())
.collect();
let mut test_harness = TestHarness::new(MAINNET.clone())
.with_blocks(blocks)
.with_backfill_state(BackfillSyncState::Active);
let (tx, rx) = oneshot::channel();
test_harness
.tree
.on_engine_message(FromEngine::Request(
BeaconEngineMessage::ForkchoiceUpdated {
state: ForkchoiceState {
head_block_hash: B256::random(),
safe_block_hash: B256::random(),
finalized_block_hash: B256::random(),
},
payload_attrs: None,
tx,
version: EngineApiMessageVersion::default(),
}
.into(),
))
.unwrap();
let resp = rx.await.unwrap().unwrap().await.unwrap();
assert!(resp.payload_status.is_syncing());
}
#[test]
fn test_disconnected_payload() {
let s = include_str!("../../test-data/holesky/2.rlp");
let data = Bytes::from_str(s).unwrap();
let block = Block::decode(&mut data.as_ref()).unwrap();
let sealed = block.seal_slow();
let hash = sealed.hash();
let payload = ExecutionPayloadV1::from_block_unchecked(hash, &sealed.clone().into_block());
let mut test_harness = TestHarness::new(HOLESKY.clone());
let outcome = test_harness
.tree
.on_new_payload(ExecutionData {
payload: payload.into(),
sidecar: ExecutionPayloadSidecar::none(),
})
.unwrap();
assert!(outcome.outcome.is_syncing());
// ensure block is buffered
let buffered = test_harness.tree.state.buffer.block(&hash).unwrap();
assert_eq!(buffered.clone_sealed_block(), sealed);
}
#[test]
fn test_disconnected_block() {
let s = include_str!("../../test-data/holesky/2.rlp");
let data = Bytes::from_str(s).unwrap();
let block = Block::decode(&mut data.as_ref()).unwrap();
let sealed = block.seal_slow().try_recover().unwrap();
let mut test_harness = TestHarness::new(HOLESKY.clone());
let outcome = test_harness.tree.insert_block(sealed.clone()).unwrap();
assert_eq!(
outcome,
InsertPayloadOk::Inserted(BlockStatus::Disconnected {
head: test_harness.tree.state.tree_state.current_canonical_head,
missing_ancestor: sealed.parent_num_hash()
})
);
}
#[tokio::test]
async fn test_holesky_payload() {
let s = include_str!("../../test-data/holesky/1.rlp");
let data = Bytes::from_str(s).unwrap();
let block: Block = Block::decode(&mut data.as_ref()).unwrap();
let sealed = block.seal_slow();
let payload =
ExecutionPayloadV1::from_block_unchecked(sealed.hash(), &sealed.clone().into_block());
let mut test_harness =
TestHarness::new(HOLESKY.clone()).with_backfill_state(BackfillSyncState::Active);
let (tx, rx) = oneshot::channel();
test_harness
.tree
.on_engine_message(FromEngine::Request(
BeaconEngineMessage::NewPayload {
payload: ExecutionData {
payload: payload.clone().into(),
sidecar: ExecutionPayloadSidecar::none(),
},
tx,
}
.into(),
))
.unwrap();
let resp = rx.await.unwrap().unwrap();
assert!(resp.is_syncing());
}
#[tokio::test]
async fn test_tree_state_on_new_head_reorg() {
reth_tracing::init_test_tracing();
let chain_spec = MAINNET.clone();
// Set persistence_threshold to 1
let mut test_harness = TestHarness::new(chain_spec);
test_harness.tree.config =
test_harness.tree.config.with_persistence_threshold(1).with_memory_block_buffer_target(1);
let mut test_block_builder = TestBlockBuilder::eth();
let blocks: Vec<_> = test_block_builder.get_executed_blocks(1..6).collect();
for block in &blocks {
test_harness.tree.state.tree_state.insert_executed(block.clone());
}
// set block 3 as the current canonical head
test_harness.tree.state.tree_state.set_canonical_head(blocks[2].recovered_block().num_hash());
// create a fork from block 2
let fork_block_3 =
test_block_builder.get_executed_block_with_number(3, blocks[1].recovered_block().hash());
let fork_block_4 =
test_block_builder.get_executed_block_with_number(4, fork_block_3.recovered_block().hash());
let fork_block_5 =
test_block_builder.get_executed_block_with_number(5, fork_block_4.recovered_block().hash());
test_harness.tree.state.tree_state.insert_executed(fork_block_3.clone());
test_harness.tree.state.tree_state.insert_executed(fork_block_4.clone());
test_harness.tree.state.tree_state.insert_executed(fork_block_5.clone());
// normal (non-reorg) case
let result = test_harness.tree.on_new_head(blocks[4].recovered_block().hash()).unwrap();
assert!(matches!(result, Some(NewCanonicalChain::Commit { .. })));
if let Some(NewCanonicalChain::Commit { new }) = result {
assert_eq!(new.len(), 2);
assert_eq!(new[0].recovered_block().hash(), blocks[3].recovered_block().hash());
assert_eq!(new[1].recovered_block().hash(), blocks[4].recovered_block().hash());
}
// should be a None persistence action before we advance persistence
let current_action = test_harness.tree.persistence_state.current_action();
assert_eq!(current_action, None);
// let's attempt to persist and check that it attempts to save blocks
//
// since in-memory block buffer target and persistence_threshold are both 1, this should
// save all but the current tip of the canonical chain (up to blocks[1])
test_harness.tree.advance_persistence().unwrap();
let current_action = test_harness.tree.persistence_state.current_action().cloned();
assert_eq!(
current_action,
Some(CurrentPersistenceAction::SavingBlocks {
highest: blocks[1].recovered_block().num_hash()
})
);
// get rid of the prev action
let received_action = test_harness.action_rx.recv().unwrap();
let PersistenceAction::SaveBlocks(saved_blocks, sender) = received_action else {
panic!("received wrong action");
};
assert_eq!(saved_blocks, vec![blocks[0].clone(), blocks[1].clone()]);
// send the response so we can advance again
sender.send(Some(blocks[1].recovered_block().num_hash())).unwrap();
// we should be persisting blocks[1] because we threw out the prev action
let current_action = test_harness.tree.persistence_state.current_action().cloned();
assert_eq!(
current_action,
Some(CurrentPersistenceAction::SavingBlocks {
highest: blocks[1].recovered_block().num_hash()
})
);
// after advancing persistence, we should be at `None` for the next action
test_harness.tree.advance_persistence().unwrap();
let current_action = test_harness.tree.persistence_state.current_action().cloned();
assert_eq!(current_action, None);
// reorg case
let result = test_harness.tree.on_new_head(fork_block_5.recovered_block().hash()).unwrap();
assert!(matches!(result, Some(NewCanonicalChain::Reorg { .. })));
if let Some(NewCanonicalChain::Reorg { new, old }) = result {
assert_eq!(new.len(), 3);
assert_eq!(new[0].recovered_block().hash(), fork_block_3.recovered_block().hash());
assert_eq!(new[1].recovered_block().hash(), fork_block_4.recovered_block().hash());
assert_eq!(new[2].recovered_block().hash(), fork_block_5.recovered_block().hash());
assert_eq!(old.len(), 1);
assert_eq!(old[0].recovered_block().hash(), blocks[2].recovered_block().hash());
}
// The canonical block has not changed, so we will not get any active persistence action
test_harness.tree.advance_persistence().unwrap();
let current_action = test_harness.tree.persistence_state.current_action().cloned();
assert_eq!(current_action, None);
// Let's change the canonical head and advance persistence
test_harness
.tree
.state
.tree_state
.set_canonical_head(fork_block_5.recovered_block().num_hash());
// The canonical block has changed now, we should get fork_block_4 due to the persistence
// threshold and in memory block buffer target
test_harness.tree.advance_persistence().unwrap();
let current_action = test_harness.tree.persistence_state.current_action().cloned();
assert_eq!(
current_action,
Some(CurrentPersistenceAction::SavingBlocks {
highest: fork_block_4.recovered_block().num_hash()
})
);
}
#[test]
fn test_tree_state_on_new_head_deep_fork() {
reth_tracing::init_test_tracing();
let chain_spec = MAINNET.clone();
let mut test_harness = TestHarness::new(chain_spec);
let mut test_block_builder = TestBlockBuilder::eth();
let blocks: Vec<_> = test_block_builder.get_executed_blocks(0..5).collect();
for block in &blocks {
test_harness.tree.state.tree_state.insert_executed(block.clone());
}
// set last block as the current canonical head
let last_block = blocks.last().unwrap().recovered_block().clone();
test_harness.tree.state.tree_state.set_canonical_head(last_block.num_hash());
// create a fork chain from last_block
let chain_a = test_block_builder.create_fork(&last_block, 10);
let chain_b = test_block_builder.create_fork(&last_block, 10);
for block in &chain_a {
test_harness.tree.state.tree_state.insert_executed(ExecutedBlockWithTrieUpdates {
block: ExecutedBlock {
recovered_block: Arc::new(block.clone()),
execution_output: Arc::new(ExecutionOutcome::default()),
hashed_state: Arc::new(HashedPostState::default()),
},
trie: ExecutedTrieUpdates::empty(),
});
}
test_harness.tree.state.tree_state.set_canonical_head(chain_a.last().unwrap().num_hash());
for block in &chain_b {
test_harness.tree.state.tree_state.insert_executed(ExecutedBlockWithTrieUpdates {
block: ExecutedBlock {
recovered_block: Arc::new(block.clone()),
execution_output: Arc::new(ExecutionOutcome::default()),
hashed_state: Arc::new(HashedPostState::default()),
},
trie: ExecutedTrieUpdates::empty(),
});
}
// for each block in chain_b, reorg to it and then back to canonical
let mut expected_new = Vec::new();
for block in &chain_b {
// reorg to chain from block b
let result = test_harness.tree.on_new_head(block.hash()).unwrap();
assert_matches!(result, Some(NewCanonicalChain::Reorg { .. }));
expected_new.push(block);
if let Some(NewCanonicalChain::Reorg { new, old }) = result {
assert_eq!(new.len(), expected_new.len());
for (index, block) in expected_new.iter().enumerate() {
assert_eq!(new[index].recovered_block().hash(), block.hash());
}
assert_eq!(old.len(), chain_a.len());
for (index, block) in chain_a.iter().enumerate() {
assert_eq!(old[index].recovered_block().hash(), block.hash());
}
}
// set last block of chain a as canonical head
test_harness.tree.on_new_head(chain_a.last().unwrap().hash()).unwrap();
}
}
#[tokio::test]
async fn test_get_canonical_blocks_to_persist() {
let chain_spec = MAINNET.clone();
let mut test_harness = TestHarness::new(chain_spec);
let mut test_block_builder = TestBlockBuilder::eth();
let canonical_head_number = 9;
let blocks: Vec<_> =
test_block_builder.get_executed_blocks(0..canonical_head_number + 1).collect();
test_harness = test_harness.with_blocks(blocks.clone());
let last_persisted_block_number = 3;
test_harness.tree.persistence_state.last_persisted_block =
blocks[last_persisted_block_number as usize].recovered_block.num_hash();
let persistence_threshold = 4;
let memory_block_buffer_target = 3;
test_harness.tree.config = TreeConfig::default()
.with_persistence_threshold(persistence_threshold)
.with_memory_block_buffer_target(memory_block_buffer_target);
let blocks_to_persist = test_harness.tree.get_canonical_blocks_to_persist().unwrap();
let expected_blocks_to_persist_length: usize =
(canonical_head_number - memory_block_buffer_target - last_persisted_block_number)
.try_into()
.unwrap();
assert_eq!(blocks_to_persist.len(), expected_blocks_to_persist_length);
for (i, item) in blocks_to_persist.iter().enumerate().take(expected_blocks_to_persist_length) {
assert_eq!(item.recovered_block().number, last_persisted_block_number + i as u64 + 1);
}
// make sure only canonical blocks are included
let fork_block = test_block_builder.get_executed_block_with_number(4, B256::random());
let fork_block_hash = fork_block.recovered_block().hash();
test_harness.tree.state.tree_state.insert_executed(fork_block);
assert!(test_harness.tree.state.tree_state.sealed_header_by_hash(&fork_block_hash).is_some());
let blocks_to_persist = test_harness.tree.get_canonical_blocks_to_persist().unwrap();
assert_eq!(blocks_to_persist.len(), expected_blocks_to_persist_length);
// check that the fork block is not included in the blocks to persist
assert!(!blocks_to_persist.iter().any(|b| b.recovered_block().hash() == fork_block_hash));
// check that the original block 4 is still included
assert!(blocks_to_persist.iter().any(|b| b.recovered_block().number == 4 &&
b.recovered_block().hash() == blocks[4].recovered_block().hash()));
// check that if we advance persistence, the persistence action is the correct value
test_harness.tree.advance_persistence().expect("advancing persistence should succeed");
assert_eq!(
test_harness.tree.persistence_state.current_action().cloned(),
Some(CurrentPersistenceAction::SavingBlocks {
highest: blocks_to_persist.last().unwrap().recovered_block().num_hash()
})
);
}
#[tokio::test]
async fn test_engine_tree_fcu_missing_head() {
let chain_spec = MAINNET.clone();
let mut test_harness = TestHarness::new(chain_spec.clone());
let mut test_block_builder = TestBlockBuilder::eth().with_chain_spec((*chain_spec).clone());
let blocks: Vec<_> = test_block_builder.get_executed_blocks(0..5).collect();
test_harness = test_harness.with_blocks(blocks);
let missing_block = test_block_builder
.generate_random_block(6, test_harness.blocks.last().unwrap().recovered_block().hash());
test_harness.fcu_to(missing_block.hash(), PayloadStatusEnum::Syncing).await;
// after FCU we receive an EngineApiEvent::Download event to get the missing block.
let event = test_harness.from_tree_rx.recv().await.unwrap();
match event {
EngineApiEvent::Download(DownloadRequest::BlockSet(actual_block_set)) => {
let expected_block_set = HashSet::from_iter([missing_block.hash()]);
assert_eq!(actual_block_set, expected_block_set);
}
_ => panic!("Unexpected event: {event:#?}"),
}
}
#[tokio::test]
async fn test_engine_tree_live_sync_transition_required_blocks_requested() {
reth_tracing::init_test_tracing();
let chain_spec = MAINNET.clone();
let mut test_harness = TestHarness::new(chain_spec.clone());
let base_chain: Vec<_> = test_harness.block_builder.get_executed_blocks(0..1).collect();
test_harness = test_harness.with_blocks(base_chain.clone());
test_harness
.fcu_to(base_chain.last().unwrap().recovered_block().hash(), ForkchoiceStatus::Valid)
.await;
// extend main chain with enough blocks to trigger pipeline run but don't insert them
let main_chain = test_harness
.block_builder
.create_fork(base_chain[0].recovered_block(), MIN_BLOCKS_FOR_PIPELINE_RUN + 10);
let main_chain_last_hash = main_chain.last().unwrap().hash();
test_harness.send_fcu(main_chain_last_hash, ForkchoiceStatus::Syncing).await;
test_harness.check_fcu(main_chain_last_hash, ForkchoiceStatus::Syncing).await;
// create event for backfill finished
let backfill_finished_block_number = MIN_BLOCKS_FOR_PIPELINE_RUN + 1;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/tree/src/tree/block_buffer.rs | crates/engine/tree/src/tree/block_buffer.rs | use crate::tree::metrics::BlockBufferMetrics;
use alloy_consensus::BlockHeader;
use alloy_primitives::{BlockHash, BlockNumber};
use reth_primitives_traits::{Block, RecoveredBlock};
use std::collections::{BTreeMap, HashMap, HashSet, VecDeque};
/// Contains the tree of pending blocks that cannot be executed due to missing parent.
/// It allows to store unconnected blocks for potential future inclusion.
///
/// The buffer has three main functionalities:
/// * [`BlockBuffer::insert_block`] for inserting blocks inside the buffer.
/// * [`BlockBuffer::remove_block_with_children`] for connecting blocks if the parent gets received
/// and inserted.
/// * [`BlockBuffer::remove_old_blocks`] to remove old blocks that precede the finalized number.
///
/// Note: Buffer is limited by number of blocks that it can contain and eviction of the block
/// is done by last recently used block.
#[derive(Debug)]
pub struct BlockBuffer<B: Block> {
/// All blocks in the buffer stored by their block hash.
pub(crate) blocks: HashMap<BlockHash, RecoveredBlock<B>>,
/// Map of any parent block hash (even the ones not currently in the buffer)
/// to the buffered children.
/// Allows connecting buffered blocks by parent.
pub(crate) parent_to_child: HashMap<BlockHash, HashSet<BlockHash>>,
/// `BTreeMap` tracking the earliest blocks by block number.
/// Used for removal of old blocks that precede finalization.
pub(crate) earliest_blocks: BTreeMap<BlockNumber, HashSet<BlockHash>>,
/// FIFO queue tracking block insertion order for eviction.
/// When the buffer reaches its capacity limit, the oldest block is evicted first.
pub(crate) block_queue: VecDeque<BlockHash>,
/// Maximum number of blocks that can be stored in the buffer
pub(crate) max_blocks: usize,
/// Various metrics for the block buffer.
pub(crate) metrics: BlockBufferMetrics,
}
impl<B: Block> BlockBuffer<B> {
/// Create new buffer with max limit of blocks
pub fn new(limit: u32) -> Self {
Self {
blocks: Default::default(),
parent_to_child: Default::default(),
earliest_blocks: Default::default(),
block_queue: VecDeque::default(),
max_blocks: limit as usize,
metrics: Default::default(),
}
}
/// Return reference to the requested block.
pub fn block(&self, hash: &BlockHash) -> Option<&RecoveredBlock<B>> {
self.blocks.get(hash)
}
/// Return a reference to the lowest ancestor of the given block in the buffer.
pub fn lowest_ancestor(&self, hash: &BlockHash) -> Option<&RecoveredBlock<B>> {
let mut current_block = self.blocks.get(hash)?;
while let Some(parent) = self.blocks.get(¤t_block.parent_hash()) {
current_block = parent;
}
Some(current_block)
}
/// Insert a correct block inside the buffer.
pub fn insert_block(&mut self, block: RecoveredBlock<B>) {
let hash = block.hash();
self.parent_to_child.entry(block.parent_hash()).or_default().insert(hash);
self.earliest_blocks.entry(block.number()).or_default().insert(hash);
self.blocks.insert(hash, block);
// Add block to FIFO queue and handle eviction if needed
if self.block_queue.len() >= self.max_blocks {
// Evict oldest block if limit is hit
if let Some(evicted_hash) = self.block_queue.pop_front() {
if let Some(evicted_block) = self.remove_block(&evicted_hash) {
self.remove_from_parent(evicted_block.parent_hash(), &evicted_hash);
}
}
}
self.block_queue.push_back(hash);
self.metrics.blocks.set(self.blocks.len() as f64);
}
/// Removes the given block from the buffer and also all the children of the block.
///
/// This is used to get all the blocks that are dependent on the block that is included.
///
/// Note: that order of returned blocks is important and the blocks with lower block number
/// in the chain will come first so that they can be executed in the correct order.
pub fn remove_block_with_children(
&mut self,
parent_hash: &BlockHash,
) -> Vec<RecoveredBlock<B>> {
let removed = self
.remove_block(parent_hash)
.into_iter()
.chain(self.remove_children(vec![*parent_hash]))
.collect();
self.metrics.blocks.set(self.blocks.len() as f64);
removed
}
/// Discard all blocks that precede block number from the buffer.
pub fn remove_old_blocks(&mut self, block_number: BlockNumber) {
let mut block_hashes_to_remove = Vec::new();
// discard all blocks that are before the finalized number.
while let Some(entry) = self.earliest_blocks.first_entry() {
if *entry.key() > block_number {
break
}
let block_hashes = entry.remove();
block_hashes_to_remove.extend(block_hashes);
}
// remove from other collections.
for block_hash in &block_hashes_to_remove {
// It's fine to call
self.remove_block(block_hash);
}
self.remove_children(block_hashes_to_remove);
self.metrics.blocks.set(self.blocks.len() as f64);
}
/// Remove block entry
fn remove_from_earliest_blocks(&mut self, number: BlockNumber, hash: &BlockHash) {
if let Some(entry) = self.earliest_blocks.get_mut(&number) {
entry.remove(hash);
if entry.is_empty() {
self.earliest_blocks.remove(&number);
}
}
}
/// Remove from parent child connection. This method does not remove children.
fn remove_from_parent(&mut self, parent_hash: BlockHash, hash: &BlockHash) {
// remove from parent to child connection, but only for this block parent.
if let Some(entry) = self.parent_to_child.get_mut(&parent_hash) {
entry.remove(hash);
// if set is empty remove block entry.
if entry.is_empty() {
self.parent_to_child.remove(&parent_hash);
}
}
}
/// Removes block from inner collections.
/// This method will only remove the block if it's present inside `self.blocks`.
/// The block might be missing from other collections, the method will only ensure that it has
/// been removed.
fn remove_block(&mut self, hash: &BlockHash) -> Option<RecoveredBlock<B>> {
let block = self.blocks.remove(hash)?;
self.remove_from_earliest_blocks(block.number(), hash);
self.remove_from_parent(block.parent_hash(), hash);
self.block_queue.retain(|h| h != hash);
Some(block)
}
/// Remove all children and their descendants for the given blocks and return them.
fn remove_children(&mut self, parent_hashes: Vec<BlockHash>) -> Vec<RecoveredBlock<B>> {
// remove all parent child connection and all the child children blocks that are connected
// to the discarded parent blocks.
let mut remove_parent_children = parent_hashes;
let mut removed_blocks = Vec::new();
while let Some(parent_hash) = remove_parent_children.pop() {
// get this child blocks children and add them to the remove list.
if let Some(parent_children) = self.parent_to_child.remove(&parent_hash) {
// remove child from buffer
for child_hash in &parent_children {
if let Some(block) = self.remove_block(child_hash) {
removed_blocks.push(block);
}
}
remove_parent_children.extend(parent_children);
}
}
removed_blocks
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_eips::BlockNumHash;
use alloy_primitives::BlockHash;
use reth_primitives_traits::RecoveredBlock;
use reth_testing_utils::generators::{self, random_block, BlockParams, Rng};
use std::collections::HashMap;
/// Create random block with specified number and parent hash.
fn create_block<R: Rng>(
rng: &mut R,
number: u64,
parent: BlockHash,
) -> RecoveredBlock<reth_ethereum_primitives::Block> {
let block =
random_block(rng, number, BlockParams { parent: Some(parent), ..Default::default() });
block.try_recover().unwrap()
}
/// Assert that all buffer collections have the same data length.
fn assert_buffer_lengths<B: Block>(buffer: &BlockBuffer<B>, expected: usize) {
assert_eq!(buffer.blocks.len(), expected);
assert_eq!(buffer.block_queue.len(), expected);
assert_eq!(
buffer.parent_to_child.iter().fold(0, |acc, (_, hashes)| acc + hashes.len()),
expected
);
assert_eq!(
buffer.earliest_blocks.iter().fold(0, |acc, (_, hashes)| acc + hashes.len()),
expected
);
}
/// Assert that the block was removed from all buffer collections.
fn assert_block_removal<B: Block>(
buffer: &BlockBuffer<B>,
block: &RecoveredBlock<reth_ethereum_primitives::Block>,
) {
assert!(!buffer.blocks.contains_key(&block.hash()));
assert!(buffer
.parent_to_child
.get(&block.parent_hash)
.and_then(|p| p.get(&block.hash()))
.is_none());
assert!(buffer
.earliest_blocks
.get(&block.number)
.and_then(|hashes| hashes.get(&block.hash()))
.is_none());
}
#[test]
fn simple_insertion() {
let mut rng = generators::rng();
let parent = rng.random();
let block1 = create_block(&mut rng, 10, parent);
let mut buffer = BlockBuffer::new(3);
buffer.insert_block(block1.clone());
assert_buffer_lengths(&buffer, 1);
assert_eq!(buffer.block(&block1.hash()), Some(&block1));
}
#[test]
fn take_entire_chain_of_children() {
let mut rng = generators::rng();
let main_parent_hash = rng.random();
let block1 = create_block(&mut rng, 10, main_parent_hash);
let block2 = create_block(&mut rng, 11, block1.hash());
let block3 = create_block(&mut rng, 12, block2.hash());
let parent4 = rng.random();
let block4 = create_block(&mut rng, 14, parent4);
let mut buffer = BlockBuffer::new(5);
buffer.insert_block(block1.clone());
buffer.insert_block(block2.clone());
buffer.insert_block(block3.clone());
buffer.insert_block(block4.clone());
assert_buffer_lengths(&buffer, 4);
assert_eq!(buffer.block(&block4.hash()), Some(&block4));
assert_eq!(buffer.block(&block2.hash()), Some(&block2));
assert_eq!(buffer.block(&main_parent_hash), None);
assert_eq!(buffer.lowest_ancestor(&block4.hash()), Some(&block4));
assert_eq!(buffer.lowest_ancestor(&block3.hash()), Some(&block1));
assert_eq!(buffer.lowest_ancestor(&block1.hash()), Some(&block1));
assert_eq!(
buffer.remove_block_with_children(&main_parent_hash),
vec![block1, block2, block3]
);
assert_buffer_lengths(&buffer, 1);
}
#[test]
fn take_all_multi_level_children() {
let mut rng = generators::rng();
let main_parent_hash = rng.random();
let block1 = create_block(&mut rng, 10, main_parent_hash);
let block2 = create_block(&mut rng, 11, block1.hash());
let block3 = create_block(&mut rng, 11, block1.hash());
let block4 = create_block(&mut rng, 12, block2.hash());
let mut buffer = BlockBuffer::new(5);
buffer.insert_block(block1.clone());
buffer.insert_block(block2.clone());
buffer.insert_block(block3.clone());
buffer.insert_block(block4.clone());
assert_buffer_lengths(&buffer, 4);
assert_eq!(
buffer
.remove_block_with_children(&main_parent_hash)
.into_iter()
.map(|b| (b.hash(), b))
.collect::<HashMap<_, _>>(),
HashMap::from([
(block1.hash(), block1),
(block2.hash(), block2),
(block3.hash(), block3),
(block4.hash(), block4)
])
);
assert_buffer_lengths(&buffer, 0);
}
#[test]
fn take_block_with_children() {
let mut rng = generators::rng();
let main_parent = BlockNumHash::new(9, rng.random());
let block1 = create_block(&mut rng, 10, main_parent.hash);
let block2 = create_block(&mut rng, 11, block1.hash());
let block3 = create_block(&mut rng, 11, block1.hash());
let block4 = create_block(&mut rng, 12, block2.hash());
let mut buffer = BlockBuffer::new(5);
buffer.insert_block(block1.clone());
buffer.insert_block(block2.clone());
buffer.insert_block(block3.clone());
buffer.insert_block(block4.clone());
assert_buffer_lengths(&buffer, 4);
assert_eq!(
buffer
.remove_block_with_children(&block1.hash())
.into_iter()
.map(|b| (b.hash(), b))
.collect::<HashMap<_, _>>(),
HashMap::from([
(block1.hash(), block1),
(block2.hash(), block2),
(block3.hash(), block3),
(block4.hash(), block4)
])
);
assert_buffer_lengths(&buffer, 0);
}
#[test]
fn remove_chain_of_children() {
let mut rng = generators::rng();
let main_parent = BlockNumHash::new(9, rng.random());
let block1 = create_block(&mut rng, 10, main_parent.hash);
let block2 = create_block(&mut rng, 11, block1.hash());
let block3 = create_block(&mut rng, 12, block2.hash());
let parent4 = rng.random();
let block4 = create_block(&mut rng, 14, parent4);
let mut buffer = BlockBuffer::new(5);
buffer.insert_block(block1.clone());
buffer.insert_block(block2);
buffer.insert_block(block3);
buffer.insert_block(block4);
assert_buffer_lengths(&buffer, 4);
buffer.remove_old_blocks(block1.number);
assert_buffer_lengths(&buffer, 1);
}
#[test]
fn remove_all_multi_level_children() {
let mut rng = generators::rng();
let main_parent = BlockNumHash::new(9, rng.random());
let block1 = create_block(&mut rng, 10, main_parent.hash);
let block2 = create_block(&mut rng, 11, block1.hash());
let block3 = create_block(&mut rng, 11, block1.hash());
let block4 = create_block(&mut rng, 12, block2.hash());
let mut buffer = BlockBuffer::new(5);
buffer.insert_block(block1.clone());
buffer.insert_block(block2);
buffer.insert_block(block3);
buffer.insert_block(block4);
assert_buffer_lengths(&buffer, 4);
buffer.remove_old_blocks(block1.number);
assert_buffer_lengths(&buffer, 0);
}
#[test]
fn remove_multi_chains() {
let mut rng = generators::rng();
let main_parent = BlockNumHash::new(9, rng.random());
let block1 = create_block(&mut rng, 10, main_parent.hash);
let block1a = create_block(&mut rng, 10, main_parent.hash);
let block2 = create_block(&mut rng, 11, block1.hash());
let block2a = create_block(&mut rng, 11, block1.hash());
let random_parent1 = rng.random();
let random_block1 = create_block(&mut rng, 10, random_parent1);
let random_parent2 = rng.random();
let random_block2 = create_block(&mut rng, 11, random_parent2);
let random_parent3 = rng.random();
let random_block3 = create_block(&mut rng, 12, random_parent3);
let mut buffer = BlockBuffer::new(10);
buffer.insert_block(block1.clone());
buffer.insert_block(block1a.clone());
buffer.insert_block(block2.clone());
buffer.insert_block(block2a.clone());
buffer.insert_block(random_block1.clone());
buffer.insert_block(random_block2.clone());
buffer.insert_block(random_block3.clone());
// check that random blocks are their own ancestor, and that chains have proper ancestors
assert_eq!(buffer.lowest_ancestor(&random_block1.hash()), Some(&random_block1));
assert_eq!(buffer.lowest_ancestor(&random_block2.hash()), Some(&random_block2));
assert_eq!(buffer.lowest_ancestor(&random_block3.hash()), Some(&random_block3));
// descendants have ancestors
assert_eq!(buffer.lowest_ancestor(&block2a.hash()), Some(&block1));
assert_eq!(buffer.lowest_ancestor(&block2.hash()), Some(&block1));
// roots are themselves
assert_eq!(buffer.lowest_ancestor(&block1a.hash()), Some(&block1a));
assert_eq!(buffer.lowest_ancestor(&block1.hash()), Some(&block1));
assert_buffer_lengths(&buffer, 7);
buffer.remove_old_blocks(10);
assert_buffer_lengths(&buffer, 2);
}
#[test]
fn evict_with_gap() {
let mut rng = generators::rng();
let main_parent = BlockNumHash::new(9, rng.random());
let block1 = create_block(&mut rng, 10, main_parent.hash);
let block2 = create_block(&mut rng, 11, block1.hash());
let block3 = create_block(&mut rng, 12, block2.hash());
let parent4 = rng.random();
let block4 = create_block(&mut rng, 13, parent4);
let mut buffer = BlockBuffer::new(3);
buffer.insert_block(block1.clone());
buffer.insert_block(block2.clone());
buffer.insert_block(block3.clone());
// pre-eviction block1 is the root
assert_eq!(buffer.lowest_ancestor(&block3.hash()), Some(&block1));
assert_eq!(buffer.lowest_ancestor(&block2.hash()), Some(&block1));
assert_eq!(buffer.lowest_ancestor(&block1.hash()), Some(&block1));
buffer.insert_block(block4.clone());
assert_eq!(buffer.lowest_ancestor(&block4.hash()), Some(&block4));
// block1 gets evicted
assert_block_removal(&buffer, &block1);
// check lowest ancestor results post eviction
assert_eq!(buffer.lowest_ancestor(&block3.hash()), Some(&block2));
assert_eq!(buffer.lowest_ancestor(&block2.hash()), Some(&block2));
assert_eq!(buffer.lowest_ancestor(&block1.hash()), None);
assert_buffer_lengths(&buffer, 3);
}
#[test]
fn simple_eviction() {
let mut rng = generators::rng();
let main_parent = BlockNumHash::new(9, rng.random());
let block1 = create_block(&mut rng, 10, main_parent.hash);
let block2 = create_block(&mut rng, 11, block1.hash());
let block3 = create_block(&mut rng, 12, block2.hash());
let parent4 = rng.random();
let block4 = create_block(&mut rng, 13, parent4);
let mut buffer = BlockBuffer::new(3);
buffer.insert_block(block1.clone());
buffer.insert_block(block2);
buffer.insert_block(block3);
buffer.insert_block(block4);
// block3 gets evicted
assert_block_removal(&buffer, &block1);
assert_buffer_lengths(&buffer, 3);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/tree/src/tree/state.rs | crates/engine/tree/src/tree/state.rs | //! Functionality related to tree state.
use crate::engine::EngineApiKind;
use alloy_eips::{eip1898::BlockWithParent, merge::EPOCH_SLOTS, BlockNumHash};
use alloy_primitives::{
map::{HashMap, HashSet},
BlockNumber, B256,
};
use reth_chain_state::{EthPrimitives, ExecutedBlockWithTrieUpdates};
use reth_primitives_traits::{AlloyBlockHeader, NodePrimitives, SealedHeader};
use reth_trie::updates::TrieUpdates;
use std::{
collections::{btree_map, hash_map, BTreeMap, VecDeque},
ops::Bound,
sync::Arc,
};
use tracing::debug;
/// Default number of blocks to retain persisted trie updates
const DEFAULT_PERSISTED_TRIE_UPDATES_RETENTION: u64 = EPOCH_SLOTS * 2;
/// Number of blocks to retain persisted trie updates for OP Stack chains
/// OP Stack chains only need `EPOCH_BLOCKS` as reorgs are relevant only when
/// op-node reorgs to the same chain twice
const OPSTACK_PERSISTED_TRIE_UPDATES_RETENTION: u64 = EPOCH_SLOTS;
/// Keeps track of the state of the tree.
///
/// ## Invariants
///
/// - This only stores blocks that are connected to the canonical chain.
/// - All executed blocks are valid and have been executed.
#[derive(Debug, Default)]
pub struct TreeState<N: NodePrimitives = EthPrimitives> {
/// __All__ unique executed blocks by block hash that are connected to the canonical chain.
///
/// This includes blocks of all forks.
pub(crate) blocks_by_hash: HashMap<B256, ExecutedBlockWithTrieUpdates<N>>,
/// Executed blocks grouped by their respective block number.
///
/// This maps unique block number to all known blocks for that height.
///
/// Note: there can be multiple blocks at the same height due to forks.
pub(crate) blocks_by_number: BTreeMap<BlockNumber, Vec<ExecutedBlockWithTrieUpdates<N>>>,
/// Map of any parent block hash to its children.
pub(crate) parent_to_child: HashMap<B256, HashSet<B256>>,
/// Map of hash to trie updates for canonical blocks that are persisted but not finalized.
///
/// Contains the block number for easy removal.
pub(crate) persisted_trie_updates: HashMap<B256, (BlockNumber, Arc<TrieUpdates>)>,
/// Currently tracked canonical head of the chain.
pub(crate) current_canonical_head: BlockNumHash,
/// The engine API variant of this handler
pub(crate) engine_kind: EngineApiKind,
}
impl<N: NodePrimitives> TreeState<N> {
/// Returns a new, empty tree state that points to the given canonical head.
pub(crate) fn new(current_canonical_head: BlockNumHash, engine_kind: EngineApiKind) -> Self {
Self {
blocks_by_hash: HashMap::default(),
blocks_by_number: BTreeMap::new(),
current_canonical_head,
parent_to_child: HashMap::default(),
persisted_trie_updates: HashMap::default(),
engine_kind,
}
}
/// Resets the state and points to the given canonical head.
pub(crate) fn reset(&mut self, current_canonical_head: BlockNumHash) {
*self = Self::new(current_canonical_head, self.engine_kind);
}
/// Returns the number of executed blocks stored.
pub(crate) fn block_count(&self) -> usize {
self.blocks_by_hash.len()
}
/// Returns the [`ExecutedBlockWithTrieUpdates`] by hash.
pub(crate) fn executed_block_by_hash(
&self,
hash: B256,
) -> Option<&ExecutedBlockWithTrieUpdates<N>> {
self.blocks_by_hash.get(&hash)
}
/// Returns the sealed block header by hash.
pub(crate) fn sealed_header_by_hash(
&self,
hash: &B256,
) -> Option<SealedHeader<N::BlockHeader>> {
self.blocks_by_hash.get(hash).map(|b| b.sealed_block().sealed_header().clone())
}
/// Returns all available blocks for the given hash that lead back to the canonical chain, from
/// newest to oldest. And the parent hash of the oldest block that is missing from the buffer.
///
/// Returns `None` if the block for the given hash is not found.
pub(crate) fn blocks_by_hash(
&self,
hash: B256,
) -> Option<(B256, Vec<ExecutedBlockWithTrieUpdates<N>>)> {
let block = self.blocks_by_hash.get(&hash).cloned()?;
let mut parent_hash = block.recovered_block().parent_hash();
let mut blocks = vec![block];
while let Some(executed) = self.blocks_by_hash.get(&parent_hash) {
parent_hash = executed.recovered_block().parent_hash();
blocks.push(executed.clone());
}
Some((parent_hash, blocks))
}
/// Insert executed block into the state.
pub(crate) fn insert_executed(&mut self, executed: ExecutedBlockWithTrieUpdates<N>) {
let hash = executed.recovered_block().hash();
let parent_hash = executed.recovered_block().parent_hash();
let block_number = executed.recovered_block().number();
if self.blocks_by_hash.contains_key(&hash) {
return;
}
self.blocks_by_hash.insert(hash, executed.clone());
self.blocks_by_number.entry(block_number).or_default().push(executed);
self.parent_to_child.entry(parent_hash).or_default().insert(hash);
for children in self.parent_to_child.values_mut() {
children.retain(|child| self.blocks_by_hash.contains_key(child));
}
}
/// Remove single executed block by its hash.
///
/// ## Returns
///
/// The removed block and the block hashes of its children.
fn remove_by_hash(
&mut self,
hash: B256,
) -> Option<(ExecutedBlockWithTrieUpdates<N>, HashSet<B256>)> {
let executed = self.blocks_by_hash.remove(&hash)?;
// Remove this block from collection of children of its parent block.
let parent_entry = self.parent_to_child.entry(executed.recovered_block().parent_hash());
if let hash_map::Entry::Occupied(mut entry) = parent_entry {
entry.get_mut().remove(&hash);
if entry.get().is_empty() {
entry.remove();
}
}
// Remove point to children of this block.
let children = self.parent_to_child.remove(&hash).unwrap_or_default();
// Remove this block from `blocks_by_number`.
let block_number_entry = self.blocks_by_number.entry(executed.recovered_block().number());
if let btree_map::Entry::Occupied(mut entry) = block_number_entry {
// We have to find the index of the block since it exists in a vec
if let Some(index) = entry.get().iter().position(|b| b.recovered_block().hash() == hash)
{
entry.get_mut().swap_remove(index);
// If there are no blocks left then remove the entry for this block
if entry.get().is_empty() {
entry.remove();
}
}
}
Some((executed, children))
}
/// Returns whether or not the hash is part of the canonical chain.
pub(crate) fn is_canonical(&self, hash: B256) -> bool {
let mut current_block = self.current_canonical_head.hash;
if current_block == hash {
return true
}
while let Some(executed) = self.blocks_by_hash.get(¤t_block) {
current_block = executed.recovered_block().parent_hash();
if current_block == hash {
return true
}
}
false
}
/// Removes canonical blocks below the upper bound, only if the last persisted hash is
/// part of the canonical chain.
pub(crate) fn remove_canonical_until(
&mut self,
upper_bound: BlockNumber,
last_persisted_hash: B256,
) {
debug!(target: "engine::tree", ?upper_bound, ?last_persisted_hash, "Removing canonical blocks from the tree");
// If the last persisted hash is not canonical, then we don't want to remove any canonical
// blocks yet.
if !self.is_canonical(last_persisted_hash) {
return
}
// First, let's walk back the canonical chain and remove canonical blocks lower than the
// upper bound
let mut current_block = self.current_canonical_head.hash;
while let Some(executed) = self.blocks_by_hash.get(¤t_block) {
current_block = executed.recovered_block().parent_hash();
if executed.recovered_block().number() <= upper_bound {
let num_hash = executed.recovered_block().num_hash();
debug!(target: "engine::tree", ?num_hash, "Attempting to remove block walking back from the head");
if let Some((mut removed, _)) =
self.remove_by_hash(executed.recovered_block().hash())
{
debug!(target: "engine::tree", ?num_hash, "Removed block walking back from the head");
// finally, move the trie updates
let Some(trie_updates) = removed.trie.take_present() else {
debug!(target: "engine::tree", ?num_hash, "No trie updates found for persisted block");
continue;
};
self.persisted_trie_updates.insert(
removed.recovered_block().hash(),
(removed.recovered_block().number(), trie_updates),
);
}
}
}
debug!(target: "engine::tree", ?upper_bound, ?last_persisted_hash, "Removed canonical blocks from the tree");
}
/// Prunes old persisted trie updates based on the current block number
/// and chain type (OP Stack or regular)
pub(crate) fn prune_persisted_trie_updates(&mut self) {
let retention_blocks = if self.engine_kind.is_opstack() {
OPSTACK_PERSISTED_TRIE_UPDATES_RETENTION
} else {
DEFAULT_PERSISTED_TRIE_UPDATES_RETENTION
};
let earliest_block_to_retain =
self.current_canonical_head.number.saturating_sub(retention_blocks);
self.persisted_trie_updates
.retain(|_, (block_number, _)| *block_number > earliest_block_to_retain);
}
/// Removes all blocks that are below the finalized block, as well as removing non-canonical
/// sidechains that fork from below the finalized block.
pub(crate) fn prune_finalized_sidechains(&mut self, finalized_num_hash: BlockNumHash) {
let BlockNumHash { number: finalized_num, hash: finalized_hash } = finalized_num_hash;
// We remove disconnected sidechains in three steps:
// * first, remove everything with a block number __below__ the finalized block.
// * next, we populate a vec with parents __at__ the finalized block.
// * finally, we iterate through the vec, removing children until the vec is empty
// (BFS).
// We _exclude_ the finalized block because we will be dealing with the blocks __at__
// the finalized block later.
let blocks_to_remove = self
.blocks_by_number
.range((Bound::Unbounded, Bound::Excluded(finalized_num)))
.flat_map(|(_, blocks)| blocks.iter().map(|b| b.recovered_block().hash()))
.collect::<Vec<_>>();
for hash in blocks_to_remove {
if let Some((removed, _)) = self.remove_by_hash(hash) {
debug!(target: "engine::tree", num_hash=?removed.recovered_block().num_hash(), "Removed finalized sidechain block");
}
}
self.prune_persisted_trie_updates();
// The only block that should remain at the `finalized` number now, is the finalized
// block, if it exists.
//
// For all other blocks, we first put their children into this vec.
// Then, we will iterate over them, removing them, adding their children, etc,
// until the vec is empty.
let mut blocks_to_remove = self.blocks_by_number.remove(&finalized_num).unwrap_or_default();
// re-insert the finalized hash if we removed it
if let Some(position) =
blocks_to_remove.iter().position(|b| b.recovered_block().hash() == finalized_hash)
{
let finalized_block = blocks_to_remove.swap_remove(position);
self.blocks_by_number.insert(finalized_num, vec![finalized_block]);
}
let mut blocks_to_remove = blocks_to_remove
.into_iter()
.map(|e| e.recovered_block().hash())
.collect::<VecDeque<_>>();
while let Some(block) = blocks_to_remove.pop_front() {
if let Some((removed, children)) = self.remove_by_hash(block) {
debug!(target: "engine::tree", num_hash=?removed.recovered_block().num_hash(), "Removed finalized sidechain child block");
blocks_to_remove.extend(children);
}
}
}
/// Remove all blocks up to __and including__ the given block number.
///
/// If a finalized hash is provided, the only non-canonical blocks which will be removed are
/// those which have a fork point at or below the finalized hash.
///
/// Canonical blocks below the upper bound will still be removed.
///
/// NOTE: if the finalized block is greater than the upper bound, the only blocks that will be
/// removed are canonical blocks and sidechains that fork below the `upper_bound`. This is the
/// same behavior as if the `finalized_num` were `Some(upper_bound)`.
pub(crate) fn remove_until(
&mut self,
upper_bound: BlockNumHash,
last_persisted_hash: B256,
finalized_num_hash: Option<BlockNumHash>,
) {
debug!(target: "engine::tree", ?upper_bound, ?finalized_num_hash, "Removing blocks from the tree");
// If the finalized num is ahead of the upper bound, and exists, we need to instead ensure
// that the only blocks removed, are canonical blocks less than the upper bound
let finalized_num_hash = finalized_num_hash.map(|mut finalized| {
if upper_bound.number < finalized.number {
finalized = upper_bound;
debug!(target: "engine::tree", ?finalized, "Adjusted upper bound");
}
finalized
});
// We want to do two things:
// * remove canonical blocks that are persisted
// * remove forks whose root are below the finalized block
// We can do this in 2 steps:
// * remove all canonical blocks below the upper bound
// * fetch the number of the finalized hash, removing any sidechains that are __below__ the
// finalized block
self.remove_canonical_until(upper_bound.number, last_persisted_hash);
// Now, we have removed canonical blocks (assuming the upper bound is above the finalized
// block) and only have sidechains below the finalized block.
if let Some(finalized_num_hash) = finalized_num_hash {
self.prune_finalized_sidechains(finalized_num_hash);
}
}
/// Determines if the second block is a direct descendant of the first block.
///
/// If the two blocks are the same, this returns `false`.
pub(crate) fn is_descendant(&self, first: BlockNumHash, second: BlockWithParent) -> bool {
// If the second block's parent is the first block's hash, then it is a direct descendant
// and we can return early.
if second.parent == first.hash {
return true
}
// If the second block is lower than, or has the same block number, they are not
// descendants.
if second.block.number <= first.number {
return false
}
// iterate through parents of the second until we reach the number
let Some(mut current_block) = self.blocks_by_hash.get(&second.parent) else {
// If we can't find its parent in the tree, we can't continue, so return false
return false
};
while current_block.recovered_block().number() > first.number + 1 {
let Some(block) =
self.blocks_by_hash.get(¤t_block.recovered_block().parent_hash())
else {
// If we can't find its parent in the tree, we can't continue, so return false
return false
};
current_block = block;
}
// Now the block numbers should be equal, so we compare hashes.
current_block.recovered_block().parent_hash() == first.hash
}
/// Updates the canonical head to the given block.
pub(crate) const fn set_canonical_head(&mut self, new_head: BlockNumHash) {
self.current_canonical_head = new_head;
}
/// Returns the tracked canonical head.
pub(crate) const fn canonical_head(&self) -> &BlockNumHash {
&self.current_canonical_head
}
/// Returns the block hash of the canonical head.
pub(crate) const fn canonical_block_hash(&self) -> B256 {
self.canonical_head().hash
}
/// Returns the block number of the canonical head.
pub(crate) const fn canonical_block_number(&self) -> BlockNumber {
self.canonical_head().number
}
}
#[cfg(test)]
mod tests {
use super::*;
use reth_chain_state::test_utils::TestBlockBuilder;
#[test]
fn test_tree_state_normal_descendant() {
let mut tree_state = TreeState::new(BlockNumHash::default(), EngineApiKind::Ethereum);
let blocks: Vec<_> = TestBlockBuilder::eth().get_executed_blocks(1..4).collect();
tree_state.insert_executed(blocks[0].clone());
assert!(tree_state.is_descendant(
blocks[0].recovered_block().num_hash(),
blocks[1].recovered_block().block_with_parent()
));
tree_state.insert_executed(blocks[1].clone());
assert!(tree_state.is_descendant(
blocks[0].recovered_block().num_hash(),
blocks[2].recovered_block().block_with_parent()
));
assert!(tree_state.is_descendant(
blocks[1].recovered_block().num_hash(),
blocks[2].recovered_block().block_with_parent()
));
}
#[tokio::test]
async fn test_tree_state_insert_executed() {
let mut tree_state = TreeState::new(BlockNumHash::default(), EngineApiKind::Ethereum);
let blocks: Vec<_> = TestBlockBuilder::eth().get_executed_blocks(1..4).collect();
tree_state.insert_executed(blocks[0].clone());
tree_state.insert_executed(blocks[1].clone());
assert_eq!(
tree_state.parent_to_child.get(&blocks[0].recovered_block().hash()),
Some(&HashSet::from_iter([blocks[1].recovered_block().hash()]))
);
assert!(!tree_state.parent_to_child.contains_key(&blocks[1].recovered_block().hash()));
tree_state.insert_executed(blocks[2].clone());
assert_eq!(
tree_state.parent_to_child.get(&blocks[1].recovered_block().hash()),
Some(&HashSet::from_iter([blocks[2].recovered_block().hash()]))
);
assert!(tree_state.parent_to_child.contains_key(&blocks[1].recovered_block().hash()));
assert!(!tree_state.parent_to_child.contains_key(&blocks[2].recovered_block().hash()));
}
#[tokio::test]
async fn test_tree_state_insert_executed_with_reorg() {
let mut tree_state = TreeState::new(BlockNumHash::default(), EngineApiKind::Ethereum);
let mut test_block_builder = TestBlockBuilder::eth();
let blocks: Vec<_> = test_block_builder.get_executed_blocks(1..6).collect();
for block in &blocks {
tree_state.insert_executed(block.clone());
}
assert_eq!(tree_state.blocks_by_hash.len(), 5);
let fork_block_3 = test_block_builder
.get_executed_block_with_number(3, blocks[1].recovered_block().hash());
let fork_block_4 = test_block_builder
.get_executed_block_with_number(4, fork_block_3.recovered_block().hash());
let fork_block_5 = test_block_builder
.get_executed_block_with_number(5, fork_block_4.recovered_block().hash());
tree_state.insert_executed(fork_block_3.clone());
tree_state.insert_executed(fork_block_4.clone());
tree_state.insert_executed(fork_block_5.clone());
assert_eq!(tree_state.blocks_by_hash.len(), 8);
assert_eq!(tree_state.blocks_by_number[&3].len(), 2); // two blocks at height 3 (original and fork)
assert_eq!(tree_state.parent_to_child[&blocks[1].recovered_block().hash()].len(), 2); // block 2 should have two children
// verify that we can insert the same block again without issues
tree_state.insert_executed(fork_block_4.clone());
assert_eq!(tree_state.blocks_by_hash.len(), 8);
assert!(tree_state.parent_to_child[&fork_block_3.recovered_block().hash()]
.contains(&fork_block_4.recovered_block().hash()));
assert!(tree_state.parent_to_child[&fork_block_4.recovered_block().hash()]
.contains(&fork_block_5.recovered_block().hash()));
assert_eq!(tree_state.blocks_by_number[&4].len(), 2);
assert_eq!(tree_state.blocks_by_number[&5].len(), 2);
}
#[tokio::test]
async fn test_tree_state_remove_before() {
let start_num_hash = BlockNumHash::default();
let mut tree_state = TreeState::new(start_num_hash, EngineApiKind::Ethereum);
let blocks: Vec<_> = TestBlockBuilder::eth().get_executed_blocks(1..6).collect();
for block in &blocks {
tree_state.insert_executed(block.clone());
}
let last = blocks.last().unwrap();
// set the canonical head
tree_state.set_canonical_head(last.recovered_block().num_hash());
// inclusive bound, so we should remove anything up to and including 2
tree_state.remove_until(
BlockNumHash::new(2, blocks[1].recovered_block().hash()),
start_num_hash.hash,
Some(blocks[1].recovered_block().num_hash()),
);
assert!(!tree_state.blocks_by_hash.contains_key(&blocks[0].recovered_block().hash()));
assert!(!tree_state.blocks_by_hash.contains_key(&blocks[1].recovered_block().hash()));
assert!(!tree_state.blocks_by_number.contains_key(&1));
assert!(!tree_state.blocks_by_number.contains_key(&2));
assert!(tree_state.blocks_by_hash.contains_key(&blocks[2].recovered_block().hash()));
assert!(tree_state.blocks_by_hash.contains_key(&blocks[3].recovered_block().hash()));
assert!(tree_state.blocks_by_hash.contains_key(&blocks[4].recovered_block().hash()));
assert!(tree_state.blocks_by_number.contains_key(&3));
assert!(tree_state.blocks_by_number.contains_key(&4));
assert!(tree_state.blocks_by_number.contains_key(&5));
assert!(!tree_state.parent_to_child.contains_key(&blocks[0].recovered_block().hash()));
assert!(!tree_state.parent_to_child.contains_key(&blocks[1].recovered_block().hash()));
assert!(tree_state.parent_to_child.contains_key(&blocks[2].recovered_block().hash()));
assert!(tree_state.parent_to_child.contains_key(&blocks[3].recovered_block().hash()));
assert!(!tree_state.parent_to_child.contains_key(&blocks[4].recovered_block().hash()));
assert_eq!(
tree_state.parent_to_child.get(&blocks[2].recovered_block().hash()),
Some(&HashSet::from_iter([blocks[3].recovered_block().hash()]))
);
assert_eq!(
tree_state.parent_to_child.get(&blocks[3].recovered_block().hash()),
Some(&HashSet::from_iter([blocks[4].recovered_block().hash()]))
);
}
#[tokio::test]
async fn test_tree_state_remove_before_finalized() {
let start_num_hash = BlockNumHash::default();
let mut tree_state = TreeState::new(start_num_hash, EngineApiKind::Ethereum);
let blocks: Vec<_> = TestBlockBuilder::eth().get_executed_blocks(1..6).collect();
for block in &blocks {
tree_state.insert_executed(block.clone());
}
let last = blocks.last().unwrap();
// set the canonical head
tree_state.set_canonical_head(last.recovered_block().num_hash());
// we should still remove everything up to and including 2
tree_state.remove_until(
BlockNumHash::new(2, blocks[1].recovered_block().hash()),
start_num_hash.hash,
None,
);
assert!(!tree_state.blocks_by_hash.contains_key(&blocks[0].recovered_block().hash()));
assert!(!tree_state.blocks_by_hash.contains_key(&blocks[1].recovered_block().hash()));
assert!(!tree_state.blocks_by_number.contains_key(&1));
assert!(!tree_state.blocks_by_number.contains_key(&2));
assert!(tree_state.blocks_by_hash.contains_key(&blocks[2].recovered_block().hash()));
assert!(tree_state.blocks_by_hash.contains_key(&blocks[3].recovered_block().hash()));
assert!(tree_state.blocks_by_hash.contains_key(&blocks[4].recovered_block().hash()));
assert!(tree_state.blocks_by_number.contains_key(&3));
assert!(tree_state.blocks_by_number.contains_key(&4));
assert!(tree_state.blocks_by_number.contains_key(&5));
assert!(!tree_state.parent_to_child.contains_key(&blocks[0].recovered_block().hash()));
assert!(!tree_state.parent_to_child.contains_key(&blocks[1].recovered_block().hash()));
assert!(tree_state.parent_to_child.contains_key(&blocks[2].recovered_block().hash()));
assert!(tree_state.parent_to_child.contains_key(&blocks[3].recovered_block().hash()));
assert!(!tree_state.parent_to_child.contains_key(&blocks[4].recovered_block().hash()));
assert_eq!(
tree_state.parent_to_child.get(&blocks[2].recovered_block().hash()),
Some(&HashSet::from_iter([blocks[3].recovered_block().hash()]))
);
assert_eq!(
tree_state.parent_to_child.get(&blocks[3].recovered_block().hash()),
Some(&HashSet::from_iter([blocks[4].recovered_block().hash()]))
);
}
#[tokio::test]
async fn test_tree_state_remove_before_lower_finalized() {
let start_num_hash = BlockNumHash::default();
let mut tree_state = TreeState::new(start_num_hash, EngineApiKind::Ethereum);
let blocks: Vec<_> = TestBlockBuilder::eth().get_executed_blocks(1..6).collect();
for block in &blocks {
tree_state.insert_executed(block.clone());
}
let last = blocks.last().unwrap();
// set the canonical head
tree_state.set_canonical_head(last.recovered_block().num_hash());
// we have no forks so we should still remove anything up to and including 2
tree_state.remove_until(
BlockNumHash::new(2, blocks[1].recovered_block().hash()),
start_num_hash.hash,
Some(blocks[0].recovered_block().num_hash()),
);
assert!(!tree_state.blocks_by_hash.contains_key(&blocks[0].recovered_block().hash()));
assert!(!tree_state.blocks_by_hash.contains_key(&blocks[1].recovered_block().hash()));
assert!(!tree_state.blocks_by_number.contains_key(&1));
assert!(!tree_state.blocks_by_number.contains_key(&2));
assert!(tree_state.blocks_by_hash.contains_key(&blocks[2].recovered_block().hash()));
assert!(tree_state.blocks_by_hash.contains_key(&blocks[3].recovered_block().hash()));
assert!(tree_state.blocks_by_hash.contains_key(&blocks[4].recovered_block().hash()));
assert!(tree_state.blocks_by_number.contains_key(&3));
assert!(tree_state.blocks_by_number.contains_key(&4));
assert!(tree_state.blocks_by_number.contains_key(&5));
assert!(!tree_state.parent_to_child.contains_key(&blocks[0].recovered_block().hash()));
assert!(!tree_state.parent_to_child.contains_key(&blocks[1].recovered_block().hash()));
assert!(tree_state.parent_to_child.contains_key(&blocks[2].recovered_block().hash()));
assert!(tree_state.parent_to_child.contains_key(&blocks[3].recovered_block().hash()));
assert!(!tree_state.parent_to_child.contains_key(&blocks[4].recovered_block().hash()));
assert_eq!(
tree_state.parent_to_child.get(&blocks[2].recovered_block().hash()),
Some(&HashSet::from_iter([blocks[3].recovered_block().hash()]))
);
assert_eq!(
tree_state.parent_to_child.get(&blocks[3].recovered_block().hash()),
Some(&HashSet::from_iter([blocks[4].recovered_block().hash()]))
);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/tree/src/tree/precompile_cache.rs | crates/engine/tree/src/tree/precompile_cache.rs | //! Contains a precompile cache that is backed by a moka cache.
use alloy_primitives::Bytes;
use parking_lot::Mutex;
use reth_evm::precompiles::{DynPrecompile, Precompile, PrecompileInput};
use revm::precompile::{PrecompileId, PrecompileOutput, PrecompileResult};
use revm_primitives::Address;
use schnellru::LruMap;
use std::{
collections::HashMap,
hash::{Hash, Hasher},
sync::Arc,
};
/// Default max cache size for [`PrecompileCache`]
const MAX_CACHE_SIZE: u32 = 10_000;
/// Stores caches for each precompile.
#[derive(Debug, Clone, Default)]
pub struct PrecompileCacheMap<S>(HashMap<Address, PrecompileCache<S>>)
where
S: Eq + Hash + std::fmt::Debug + Send + Sync + Clone;
impl<S> PrecompileCacheMap<S>
where
S: Eq + Hash + std::fmt::Debug + Send + Sync + Clone + 'static,
{
pub(crate) fn cache_for_address(&mut self, address: Address) -> PrecompileCache<S> {
self.0.entry(address).or_default().clone()
}
}
/// Cache for precompiles, for each input stores the result.
///
/// [`LruMap`] requires a mutable reference on `get` since it updates the LRU order,
/// so we use a [`Mutex`] instead of an `RwLock`.
#[derive(Debug, Clone)]
pub struct PrecompileCache<S>(Arc<Mutex<LruMap<CacheKey<S>, CacheEntry>>>)
where
S: Eq + Hash + std::fmt::Debug + Send + Sync + Clone;
impl<S> Default for PrecompileCache<S>
where
S: Eq + Hash + std::fmt::Debug + Send + Sync + Clone + 'static,
{
fn default() -> Self {
Self(Arc::new(Mutex::new(LruMap::new(schnellru::ByLength::new(MAX_CACHE_SIZE)))))
}
}
impl<S> PrecompileCache<S>
where
S: Eq + Hash + std::fmt::Debug + Send + Sync + Clone + 'static,
{
fn get(&self, key: &CacheKeyRef<'_, S>) -> Option<CacheEntry> {
self.0.lock().get(key).cloned()
}
/// Inserts the given key and value into the cache, returning the new cache size.
fn insert(&self, key: CacheKey<S>, value: CacheEntry) -> usize {
let mut cache = self.0.lock();
cache.insert(key, value);
cache.len()
}
}
/// Cache key, spec id and precompile call input. spec id is included in the key to account for
/// precompile repricing across fork activations.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct CacheKey<S>((S, Bytes));
impl<S> CacheKey<S> {
const fn new(spec_id: S, input: Bytes) -> Self {
Self((spec_id, input))
}
}
/// Cache key reference, used to avoid cloning the input bytes when looking up using a [`CacheKey`].
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct CacheKeyRef<'a, S>((S, &'a [u8]));
impl<'a, S> CacheKeyRef<'a, S> {
const fn new(spec_id: S, input: &'a [u8]) -> Self {
Self((spec_id, input))
}
}
impl<S: PartialEq> PartialEq<CacheKey<S>> for CacheKeyRef<'_, S> {
fn eq(&self, other: &CacheKey<S>) -> bool {
self.0 .0 == other.0 .0 && self.0 .1 == other.0 .1.as_ref()
}
}
impl<'a, S: Hash> Hash for CacheKeyRef<'a, S> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.0 .0.hash(state);
self.0 .1.hash(state);
}
}
/// Cache entry, precompile successful output.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct CacheEntry(PrecompileOutput);
impl CacheEntry {
const fn gas_used(&self) -> u64 {
self.0.gas_used
}
fn to_precompile_result(&self) -> PrecompileResult {
Ok(self.0.clone())
}
}
/// A cache for precompile inputs / outputs.
#[derive(Debug)]
pub(crate) struct CachedPrecompile<S>
where
S: Eq + Hash + std::fmt::Debug + Send + Sync + Clone + 'static,
{
/// Cache for precompile results and gas bounds.
cache: PrecompileCache<S>,
/// The precompile.
precompile: DynPrecompile,
/// Cache metrics.
metrics: Option<CachedPrecompileMetrics>,
/// Spec id associated to the EVM from which this cached precompile was created.
spec_id: S,
}
impl<S> CachedPrecompile<S>
where
S: Eq + Hash + std::fmt::Debug + Send + Sync + Clone + 'static,
{
/// `CachedPrecompile` constructor.
pub(crate) const fn new(
precompile: DynPrecompile,
cache: PrecompileCache<S>,
spec_id: S,
metrics: Option<CachedPrecompileMetrics>,
) -> Self {
Self { precompile, cache, spec_id, metrics }
}
pub(crate) fn wrap(
precompile: DynPrecompile,
cache: PrecompileCache<S>,
spec_id: S,
metrics: Option<CachedPrecompileMetrics>,
) -> DynPrecompile {
let precompile_id = precompile.precompile_id().clone();
let wrapped = Self::new(precompile, cache, spec_id, metrics);
(precompile_id, move |input: PrecompileInput<'_>| -> PrecompileResult {
wrapped.call(input)
})
.into()
}
fn increment_by_one_precompile_cache_hits(&self) {
if let Some(metrics) = &self.metrics {
metrics.precompile_cache_hits.increment(1);
}
}
fn increment_by_one_precompile_cache_misses(&self) {
if let Some(metrics) = &self.metrics {
metrics.precompile_cache_misses.increment(1);
}
}
fn set_precompile_cache_size_metric(&self, to: f64) {
if let Some(metrics) = &self.metrics {
metrics.precompile_cache_size.set(to);
}
}
fn increment_by_one_precompile_errors(&self) {
if let Some(metrics) = &self.metrics {
metrics.precompile_errors.increment(1);
}
}
}
impl<S> Precompile for CachedPrecompile<S>
where
S: Eq + Hash + std::fmt::Debug + Send + Sync + Clone + 'static,
{
fn precompile_id(&self) -> &PrecompileId {
self.precompile.precompile_id()
}
fn call(&self, input: PrecompileInput<'_>) -> PrecompileResult {
let key = CacheKeyRef::new(self.spec_id.clone(), input.data);
if let Some(entry) = &self.cache.get(&key) {
self.increment_by_one_precompile_cache_hits();
if input.gas >= entry.gas_used() {
return entry.to_precompile_result()
}
}
let calldata = input.data;
let result = self.precompile.call(input);
match &result {
Ok(output) => {
let key = CacheKey::new(self.spec_id.clone(), Bytes::copy_from_slice(calldata));
let size = self.cache.insert(key, CacheEntry(output.clone()));
self.set_precompile_cache_size_metric(size as f64);
self.increment_by_one_precompile_cache_misses();
}
_ => {
self.increment_by_one_precompile_errors();
}
}
result
}
}
/// Metrics for the cached precompile.
#[derive(reth_metrics::Metrics, Clone)]
#[metrics(scope = "sync.caching")]
pub(crate) struct CachedPrecompileMetrics {
/// Precompile cache hits
precompile_cache_hits: metrics::Counter,
/// Precompile cache misses
precompile_cache_misses: metrics::Counter,
/// Precompile cache size. Uses the LRU cache length as the size metric.
precompile_cache_size: metrics::Gauge,
/// Precompile execution errors.
precompile_errors: metrics::Counter,
}
impl CachedPrecompileMetrics {
/// Creates a new instance of [`CachedPrecompileMetrics`] with the given address.
///
/// Adds address as an `address` label padded with zeros to at least two hex symbols, prefixed
/// by `0x`.
pub(crate) fn new_with_address(address: Address) -> Self {
Self::new_with_labels(&[("address", format!("0x{address:02x}"))])
}
}
#[cfg(test)]
mod tests {
use std::hash::DefaultHasher;
use super::*;
use reth_evm::{EthEvmFactory, Evm, EvmEnv, EvmFactory};
use reth_revm::db::EmptyDB;
use revm::{context::TxEnv, precompile::PrecompileOutput};
use revm_primitives::hardfork::SpecId;
#[test]
fn test_cache_key_ref_hash() {
let key1 = CacheKey::new(SpecId::PRAGUE, b"test_input".into());
let key2 = CacheKeyRef::new(SpecId::PRAGUE, b"test_input");
assert!(PartialEq::eq(&key2, &key1));
let mut hasher = DefaultHasher::new();
key1.hash(&mut hasher);
let hash1 = hasher.finish();
let mut hasher = DefaultHasher::new();
key2.hash(&mut hasher);
let hash2 = hasher.finish();
assert_eq!(hash1, hash2);
}
#[test]
fn test_precompile_cache_basic() {
let dyn_precompile: DynPrecompile = |_input: PrecompileInput<'_>| -> PrecompileResult {
Ok(PrecompileOutput { gas_used: 0, bytes: Bytes::default(), reverted: false })
}
.into();
let cache =
CachedPrecompile::new(dyn_precompile, PrecompileCache::default(), SpecId::PRAGUE, None);
let output = PrecompileOutput {
gas_used: 50,
bytes: alloy_primitives::Bytes::copy_from_slice(b"cached_result"),
reverted: false,
};
let key = CacheKey::new(SpecId::PRAGUE, b"test_input".into());
let expected = CacheEntry(output);
cache.cache.insert(key, expected.clone());
let key = CacheKeyRef::new(SpecId::PRAGUE, b"test_input");
let actual = cache.cache.get(&key).unwrap();
assert_eq!(actual, expected);
}
#[test]
fn test_precompile_cache_map_separate_addresses() {
let mut evm = EthEvmFactory::default().create_evm(EmptyDB::default(), EvmEnv::default());
let input_data = b"same_input";
let gas_limit = 100_000;
let address1 = Address::repeat_byte(1);
let address2 = Address::repeat_byte(2);
let mut cache_map = PrecompileCacheMap::default();
// create the first precompile with a specific output
let precompile1: DynPrecompile = (PrecompileId::custom("custom"), {
move |input: PrecompileInput<'_>| -> PrecompileResult {
assert_eq!(input.data, input_data);
Ok(PrecompileOutput {
gas_used: 5000,
bytes: alloy_primitives::Bytes::copy_from_slice(b"output_from_precompile_1"),
reverted: false,
})
}
})
.into();
// create the second precompile with a different output
let precompile2: DynPrecompile = (PrecompileId::custom("custom"), {
move |input: PrecompileInput<'_>| -> PrecompileResult {
assert_eq!(input.data, input_data);
Ok(PrecompileOutput {
gas_used: 7000,
bytes: alloy_primitives::Bytes::copy_from_slice(b"output_from_precompile_2"),
reverted: false,
})
}
})
.into();
let wrapped_precompile1 = CachedPrecompile::wrap(
precompile1,
cache_map.cache_for_address(address1),
SpecId::PRAGUE,
None,
);
let wrapped_precompile2 = CachedPrecompile::wrap(
precompile2,
cache_map.cache_for_address(address2),
SpecId::PRAGUE,
None,
);
let precompile1_address = Address::with_last_byte(1);
let precompile2_address = Address::with_last_byte(2);
evm.precompiles_mut().apply_precompile(&precompile1_address, |_| Some(wrapped_precompile1));
evm.precompiles_mut().apply_precompile(&precompile2_address, |_| Some(wrapped_precompile2));
// first invocation of precompile1 (cache miss)
let result1 = evm
.transact_raw(TxEnv {
caller: Address::ZERO,
gas_limit,
data: input_data.into(),
kind: precompile1_address.into(),
..Default::default()
})
.unwrap()
.result
.into_output()
.unwrap();
assert_eq!(result1.as_ref(), b"output_from_precompile_1");
// first invocation of precompile2 with the same input (should be a cache miss)
// if cache was incorrectly shared, we'd get precompile1's result
let result2 = evm
.transact_raw(TxEnv {
caller: Address::ZERO,
gas_limit,
data: input_data.into(),
kind: precompile2_address.into(),
..Default::default()
})
.unwrap()
.result
.into_output()
.unwrap();
assert_eq!(result2.as_ref(), b"output_from_precompile_2");
// second invocation of precompile1 (should be a cache hit)
let result3 = evm
.transact_raw(TxEnv {
caller: Address::ZERO,
gas_limit,
data: input_data.into(),
kind: precompile1_address.into(),
..Default::default()
})
.unwrap()
.result
.into_output()
.unwrap();
assert_eq!(result3.as_ref(), b"output_from_precompile_1");
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/engine/tree/src/tree/error.rs | crates/engine/tree/src/tree/error.rs | //! Internal errors for the tree module.
use alloy_consensus::BlockHeader;
use alloy_primitives::B256;
use reth_consensus::ConsensusError;
use reth_errors::{BlockExecutionError, BlockValidationError, ProviderError};
use reth_evm::execute::InternalBlockExecutionError;
use reth_payload_primitives::NewPayloadError;
use reth_primitives_traits::{Block, BlockBody, SealedBlock};
use tokio::sync::oneshot::error::TryRecvError;
/// This is an error that can come from advancing persistence. Either this can be a
/// [`TryRecvError`], or this can be a [`ProviderError`]
#[derive(Debug, thiserror::Error)]
pub enum AdvancePersistenceError {
/// An error that can be from failing to receive a value from persistence
#[error(transparent)]
RecvError(#[from] TryRecvError),
/// A provider error
#[error(transparent)]
Provider(#[from] ProviderError),
/// Missing ancestor.
///
/// This error occurs when we need to compute the state root for a block with missing trie
/// updates, but the ancestor block is not available. State root computation requires the state
/// from the parent block as a starting point.
///
/// A block may be missing the trie updates when it's a fork chain block building on top of the
/// historical database state. Since we don't store the historical trie state, we cannot
/// generate the trie updates for it until the moment when database is unwound to the canonical
/// chain.
///
/// Also see [`reth_chain_state::ExecutedTrieUpdates::Missing`].
#[error("Missing ancestor with hash {0}")]
MissingAncestor(B256),
}
#[derive(thiserror::Error)]
#[error("Failed to insert block (hash={}, number={}, parent_hash={}): {}",
.block.hash(),
.block.number(),
.block.parent_hash(),
.kind)]
struct InsertBlockErrorData<B: Block> {
block: SealedBlock<B>,
#[source]
kind: InsertBlockErrorKind,
}
impl<B: Block> std::fmt::Debug for InsertBlockErrorData<B> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("InsertBlockError")
.field("error", &self.kind)
.field("hash", &self.block.hash())
.field("number", &self.block.number())
.field("parent_hash", &self.block.parent_hash())
.field("num_txs", &self.block.body().transactions().len())
.finish_non_exhaustive()
}
}
impl<B: Block> InsertBlockErrorData<B> {
const fn new(block: SealedBlock<B>, kind: InsertBlockErrorKind) -> Self {
Self { block, kind }
}
fn boxed(block: SealedBlock<B>, kind: InsertBlockErrorKind) -> Box<Self> {
Box::new(Self::new(block, kind))
}
}
/// Error thrown when inserting a block failed because the block is considered invalid.
#[derive(thiserror::Error)]
#[error(transparent)]
pub struct InsertBlockError<B: Block> {
inner: Box<InsertBlockErrorData<B>>,
}
// === impl InsertBlockErrorTwo ===
impl<B: Block> InsertBlockError<B> {
/// Create a new `InsertInvalidBlockErrorTwo`
pub fn new(block: SealedBlock<B>, kind: InsertBlockErrorKind) -> Self {
Self { inner: InsertBlockErrorData::boxed(block, kind) }
}
/// Create a new `InsertInvalidBlockError` from a consensus error
pub fn consensus_error(error: ConsensusError, block: SealedBlock<B>) -> Self {
Self::new(block, InsertBlockErrorKind::Consensus(error))
}
/// Consumes the error and returns the block that resulted in the error
#[inline]
pub fn into_block(self) -> SealedBlock<B> {
self.inner.block
}
/// Returns the error kind
#[inline]
pub const fn kind(&self) -> &InsertBlockErrorKind {
&self.inner.kind
}
/// Returns the block that resulted in the error
#[inline]
pub const fn block(&self) -> &SealedBlock<B> {
&self.inner.block
}
/// Consumes the type and returns the block and error kind.
#[inline]
pub fn split(self) -> (SealedBlock<B>, InsertBlockErrorKind) {
let inner = *self.inner;
(inner.block, inner.kind)
}
}
impl<B: Block> std::fmt::Debug for InsertBlockError<B> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
std::fmt::Debug::fmt(&self.inner, f)
}
}
/// All error variants possible when inserting a block
#[derive(Debug, thiserror::Error)]
pub enum InsertBlockErrorKind {
/// Block violated consensus rules.
#[error(transparent)]
Consensus(#[from] ConsensusError),
/// Block execution failed.
#[error(transparent)]
Execution(#[from] BlockExecutionError),
/// Provider error.
#[error(transparent)]
Provider(#[from] ProviderError),
/// Other errors.
#[error(transparent)]
Other(#[from] Box<dyn core::error::Error + Send + Sync + 'static>),
}
impl InsertBlockErrorKind {
/// Returns an [`InsertBlockValidationError`] if the error is caused by an invalid block.
///
/// Returns an [`InsertBlockFatalError`] if the error is caused by an error that is not
/// validation related or is otherwise fatal.
///
/// This is intended to be used to determine if we should respond `INVALID` as a response when
/// processing a new block.
pub fn ensure_validation_error(
self,
) -> Result<InsertBlockValidationError, InsertBlockFatalError> {
match self {
Self::Consensus(err) => Ok(InsertBlockValidationError::Consensus(err)),
// other execution errors that are considered internal errors
Self::Execution(err) => {
match err {
BlockExecutionError::Validation(err) => {
Ok(InsertBlockValidationError::Validation(err))
}
// these are internal errors, not caused by an invalid block
BlockExecutionError::Internal(error) => {
Err(InsertBlockFatalError::BlockExecutionError(error))
}
}
}
Self::Provider(err) => Err(InsertBlockFatalError::Provider(err)),
Self::Other(err) => Err(InternalBlockExecutionError::Other(err).into()),
}
}
}
/// Error variants that are not caused by invalid blocks
#[derive(Debug, thiserror::Error)]
pub enum InsertBlockFatalError {
/// A provider error
#[error(transparent)]
Provider(#[from] ProviderError),
/// An internal / fatal block execution error
#[error(transparent)]
BlockExecutionError(#[from] InternalBlockExecutionError),
}
/// Error variants that are caused by invalid blocks
#[derive(Debug, thiserror::Error)]
pub enum InsertBlockValidationError {
/// Block violated consensus rules.
#[error(transparent)]
Consensus(#[from] ConsensusError),
/// Validation error, transparently wrapping [`BlockValidationError`]
#[error(transparent)]
Validation(#[from] BlockValidationError),
}
/// Errors that may occur when inserting a payload.
#[derive(Debug, thiserror::Error)]
pub enum InsertPayloadError<B: Block> {
/// Block validation error
#[error(transparent)]
Block(#[from] InsertBlockError<B>),
/// Payload validation error
#[error(transparent)]
Payload(#[from] NewPayloadError),
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.